coco_karpathy_dataset.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import os
  2. import json
  3. from torch.utils.data import Dataset
  4. from torchvision.datasets.utils import download_url
  5. from PIL import Image
  6. from data.utils import pre_caption
  7. class coco_karpathy_train(Dataset):
  8. def __init__(self, transform, image_root, ann_root, max_words=30, prompt=''):
  9. '''
  10. image_root (string): Root directory of images (e.g. coco/images/)
  11. ann_root (string): directory to store the annotation file
  12. '''
  13. url = 'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json'
  14. filename = 'coco_karpathy_train.json'
  15. download_url(url,ann_root)
  16. self.annotation = json.load(open(os.path.join(ann_root,filename),'r'))
  17. self.transform = transform
  18. self.image_root = image_root
  19. self.max_words = max_words
  20. self.prompt = prompt
  21. self.img_ids = {}
  22. n = 0
  23. for ann in self.annotation:
  24. img_id = ann['image_id']
  25. if img_id not in self.img_ids.keys():
  26. self.img_ids[img_id] = n
  27. n += 1
  28. def __len__(self):
  29. return len(self.annotation)
  30. def __getitem__(self, index):
  31. ann = self.annotation[index]
  32. image_path = os.path.join(self.image_root,ann['image'])
  33. image = Image.open(image_path).convert('RGB')
  34. image = self.transform(image)
  35. caption = self.prompt+pre_caption(ann['caption'], self.max_words)
  36. return image, caption, self.img_ids[ann['image_id']]
  37. class coco_karpathy_caption_eval(Dataset):
  38. def __init__(self, transform, image_root, ann_root, split):
  39. '''
  40. image_root (string): Root directory of images (e.g. coco/images/)
  41. ann_root (string): directory to store the annotation file
  42. split (string): val or test
  43. '''
  44. urls = {'val':'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json',
  45. 'test':'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json'}
  46. filenames = {'val':'coco_karpathy_val.json','test':'coco_karpathy_test.json'}
  47. download_url(urls[split],ann_root)
  48. self.annotation = json.load(open(os.path.join(ann_root,filenames[split]),'r'))
  49. self.transform = transform
  50. self.image_root = image_root
  51. def __len__(self):
  52. return len(self.annotation)
  53. def __getitem__(self, index):
  54. ann = self.annotation[index]
  55. image_path = os.path.join(self.image_root,ann['image'])
  56. image = Image.open(image_path).convert('RGB')
  57. image = self.transform(image)
  58. img_id = ann['image'].split('/')[-1].strip('.jpg').split('_')[-1]
  59. return image, int(img_id)
  60. class coco_karpathy_retrieval_eval(Dataset):
  61. def __init__(self, transform, image_root, ann_root, split, max_words=30):
  62. '''
  63. image_root (string): Root directory of images (e.g. coco/images/)
  64. ann_root (string): directory to store the annotation file
  65. split (string): val or test
  66. '''
  67. urls = {'val':'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json',
  68. 'test':'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json'}
  69. filenames = {'val':'coco_karpathy_val.json','test':'coco_karpathy_test.json'}
  70. download_url(urls[split],ann_root)
  71. self.annotation = json.load(open(os.path.join(ann_root,filenames[split]),'r'))
  72. self.transform = transform
  73. self.image_root = image_root
  74. self.text = []
  75. self.image = []
  76. self.txt2img = {}
  77. self.img2txt = {}
  78. txt_id = 0
  79. for img_id, ann in enumerate(self.annotation):
  80. self.image.append(ann['image'])
  81. self.img2txt[img_id] = []
  82. for i, caption in enumerate(ann['caption']):
  83. self.text.append(pre_caption(caption,max_words))
  84. self.img2txt[img_id].append(txt_id)
  85. self.txt2img[txt_id] = img_id
  86. txt_id += 1
  87. def __len__(self):
  88. return len(self.annotation)
  89. def __getitem__(self, index):
  90. image_path = os.path.join(self.image_root, self.annotation[index]['image'])
  91. image = Image.open(image_path).convert('RGB')
  92. image = self.transform(image)
  93. return image, index