update model url

This commit is contained in:
root
2022-03-02 23:59:09 +00:00
parent 96a956cfe2
commit 21aaf5d67b
4 changed files with 5 additions and 5 deletions

View File

@@ -3,7 +3,7 @@ ann_root: 'annotation'
coco_gt_root: 'annotation/coco_gt'
# set pretrained as a file path or an url
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
# size of vit model; base or large
vit: 'base'

View File

@@ -2,7 +2,7 @@ image_root: '/export/share/datasets/vision/nocaps/'
ann_root: 'annotation'
# set pretrained as a file path or an url
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
vit: 'base'
batch_size: 32

View File

@@ -4,7 +4,7 @@ train_files: ['vqa_train','vqa_val','vg_qa']
ann_root: 'annotation'
# set pretrained as a file path or an url
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
# size of vit model; base or large
vit: 'base'

View File

@@ -99,7 +99,7 @@
"image_size = 384\n",
"image = load_demo_image(image_size=image_size, device=device)\n",
"\n",
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'\n",
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'\n",
" \n",
"model = blip_decoder(pretrained=model_url, image_size=image_size, vit='base')\n",
"model.eval()\n",
@@ -153,7 +153,7 @@
"image_size = 480\n",
"image = load_demo_image(image_size=image_size, device=device) \n",
"\n",
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'\n",
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'\n",
" \n",
"model = blip_vqa(pretrained=model_url, image_size=image_size, vit='base')\n",
"model.eval()\n",