mirror of
https://github.com/salesforce/BLIP.git
synced 2026-02-23 12:33:55 +00:00
init
This commit is contained in:
21
configs/bert_config.json
Normal file
21
configs/bert_config.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"architectures": [
|
||||
"BertForMaskedLM"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"layer_norm_eps": 1e-12,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "bert",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"pad_token_id": 0,
|
||||
"type_vocab_size": 2,
|
||||
"vocab_size": 30522,
|
||||
"encoder_width": 768,
|
||||
"add_cross_attention": true
|
||||
}
|
||||
33
configs/caption_coco.yaml
Normal file
33
configs/caption_coco.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
image_root: '/export/share/datasets/vision/coco/images/'
|
||||
ann_root: 'annotation'
|
||||
coco_gt_root: 'annotation/coco_gt'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
batch_size: 32
|
||||
init_lr: 1e-5
|
||||
|
||||
# vit: 'large'
|
||||
# vit_grad_ckpt: True
|
||||
# vit_ckpt_layer: 5
|
||||
# batch_size: 16
|
||||
# init_lr: 2e-6
|
||||
|
||||
image_size: 384
|
||||
|
||||
# generation configs
|
||||
max_length: 20
|
||||
min_length: 5
|
||||
num_beams: 3
|
||||
prompt: 'a picture of '
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 5
|
||||
|
||||
21
configs/med_config.json
Normal file
21
configs/med_config.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"architectures": [
|
||||
"BertModel"
|
||||
],
|
||||
"attention_probs_dropout_prob": 0.1,
|
||||
"hidden_act": "gelu",
|
||||
"hidden_dropout_prob": 0.1,
|
||||
"hidden_size": 768,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"layer_norm_eps": 1e-12,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "bert",
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12,
|
||||
"pad_token_id": 0,
|
||||
"type_vocab_size": 2,
|
||||
"vocab_size": 30524,
|
||||
"encoder_width": 768,
|
||||
"add_cross_attention": true
|
||||
}
|
||||
21
configs/nlvr.yaml
Normal file
21
configs/nlvr.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
image_root: '/export/share/datasets/vision/NLVR2/'
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth'
|
||||
|
||||
#size of vit model; base or large
|
||||
vit: 'base'
|
||||
batch_size_train: 16
|
||||
batch_size_test: 64
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
max_epoch: 15
|
||||
|
||||
image_size: 384
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
init_lr: 3e-5
|
||||
min_lr: 0
|
||||
|
||||
15
configs/nocaps.yaml
Normal file
15
configs/nocaps.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
image_root: '/export/share/datasets/vision/nocaps/'
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'
|
||||
|
||||
vit: 'base'
|
||||
batch_size: 32
|
||||
|
||||
image_size: 384
|
||||
|
||||
max_length: 20
|
||||
min_length: 5
|
||||
num_beams: 3
|
||||
prompt: 'a picture of '
|
||||
27
configs/pretrain.yaml
Normal file
27
configs/pretrain.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
train_file: ['/export/share/junnan-li/VL_pretrain/annotation/coco_karpathy_train.json',
|
||||
'/export/share/junnan-li/VL_pretrain/annotation/vg_caption.json',
|
||||
]
|
||||
laion_path: ''
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
|
||||
image_size: 224
|
||||
batch_size: 75
|
||||
|
||||
queue_size: 57600
|
||||
alpha: 0.4
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
init_lr: 3e-4
|
||||
min_lr: 1e-6
|
||||
warmup_lr: 1e-6
|
||||
lr_decay_rate: 0.9
|
||||
max_epoch: 20
|
||||
warmup_steps: 3000
|
||||
|
||||
|
||||
|
||||
34
configs/retrieval_coco.yaml
Normal file
34
configs/retrieval_coco.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
image_root: '/export/share/datasets/vision/coco/images/'
|
||||
ann_root: 'annotation'
|
||||
dataset: 'coco'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
|
||||
vit: 'base'
|
||||
batch_size_train: 32
|
||||
batch_size_test: 64
|
||||
vit_grad_ckpt: True
|
||||
vit_ckpt_layer: 4
|
||||
init_lr: 1e-5
|
||||
|
||||
# vit: 'large'
|
||||
# batch_size_train: 16
|
||||
# batch_size_test: 32
|
||||
# vit_grad_ckpt: True
|
||||
# vit_ckpt_layer: 12
|
||||
# init_lr: 5e-6
|
||||
|
||||
image_size: 384
|
||||
queue_size: 57600
|
||||
alpha: 0.4
|
||||
k_test: 256
|
||||
negative_all_rank: True
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 6
|
||||
|
||||
34
configs/retrieval_flickr.yaml
Normal file
34
configs/retrieval_flickr.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
image_root: '/export/share/datasets/vision/flickr30k/'
|
||||
ann_root: 'annotation'
|
||||
dataset: 'flickr'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_flickr.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
|
||||
vit: 'base'
|
||||
batch_size_train: 32
|
||||
batch_size_test: 64
|
||||
vit_grad_ckpt: True
|
||||
vit_ckpt_layer: 4
|
||||
init_lr: 1e-5
|
||||
|
||||
# vit: 'large'
|
||||
# batch_size_train: 16
|
||||
# batch_size_test: 32
|
||||
# vit_grad_ckpt: True
|
||||
# vit_ckpt_layer: 10
|
||||
# init_lr: 5e-6
|
||||
|
||||
image_size: 384
|
||||
queue_size: 57600
|
||||
alpha: 0.4
|
||||
k_test: 128
|
||||
negative_all_rank: False
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 6
|
||||
|
||||
25
configs/vqa.yaml
Normal file
25
configs/vqa.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
vqa_root: '/export/share/datasets/vision/VQA/Images/mscoco/' #followed by train2014/
|
||||
vg_root: '/export/share/datasets/vision/visual-genome/' #followed by image/
|
||||
train_files: ['vqa_train','vqa_val','vg_qa']
|
||||
ann_root: 'annotation'
|
||||
|
||||
# set pretrained as a file path or an url
|
||||
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'
|
||||
|
||||
# size of vit model; base or large
|
||||
vit: 'base'
|
||||
batch_size_train: 16
|
||||
batch_size_test: 32
|
||||
vit_grad_ckpt: False
|
||||
vit_ckpt_layer: 0
|
||||
init_lr: 2e-5
|
||||
|
||||
image_size: 480
|
||||
|
||||
k_test: 128
|
||||
inference: 'rank'
|
||||
|
||||
# optimizer
|
||||
weight_decay: 0.05
|
||||
min_lr: 0
|
||||
max_epoch: 10
|
||||
173
demo.ipynb
Normal file
173
demo.ipynb
Normal file
File diff suppressed because one or more lines are too long
118
eval_nocaps.py
Normal file
118
eval_nocaps.py
Normal file
@@ -0,0 +1,118 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import argparse
|
||||
import os
|
||||
import ruamel_yaml as yaml
|
||||
import numpy as np
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from models.blip import blip_decoder
|
||||
import utils
|
||||
from data import create_dataset, create_sampler, create_loader
|
||||
from data.utils import save_result
|
||||
|
||||
@torch.no_grad()
|
||||
def evaluate(model, data_loader, device, config):
|
||||
# evaluate
|
||||
model.eval()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
header = 'Evaluation:'
|
||||
print_freq = 10
|
||||
|
||||
result = []
|
||||
for image, image_id in metric_logger.log_every(data_loader, print_freq, header):
|
||||
|
||||
image = image.to(device)
|
||||
|
||||
captions = model.generate(image, sample=False, num_beams=config['num_beams'], max_length=config['max_length'],
|
||||
min_length=config['min_length'], repetition_penalty=1.1)
|
||||
|
||||
for caption, img_id in zip(captions, image_id):
|
||||
result.append({"image_id": img_id.item(), "caption": caption})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main(args, config):
|
||||
utils.init_distributed_mode(args)
|
||||
|
||||
device = torch.device(args.device)
|
||||
|
||||
# fix the seed for reproducibility
|
||||
seed = args.seed + utils.get_rank()
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
cudnn.benchmark = True
|
||||
|
||||
#### Dataset ####
|
||||
print("Creating captioning dataset")
|
||||
val_dataset, test_dataset = create_dataset('nocaps', config)
|
||||
|
||||
if args.distributed:
|
||||
num_tasks = utils.get_world_size()
|
||||
global_rank = utils.get_rank()
|
||||
samplers = create_sampler([val_dataset,test_dataset], [False,False], num_tasks, global_rank)
|
||||
else:
|
||||
samplers = [None,None]
|
||||
|
||||
val_loader, test_loader = create_loader([val_dataset, test_dataset],samplers,
|
||||
batch_size=[config['batch_size']]*2,num_workers=[4,4],
|
||||
is_trains=[False, False], collate_fns=[None,None])
|
||||
|
||||
#### Model ####
|
||||
print("Creating model")
|
||||
model = blip_decoder(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'],
|
||||
prompt=config['prompt'])
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
model_without_ddp = model
|
||||
if args.distributed:
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
|
||||
model_without_ddp = model.module
|
||||
|
||||
val_result = evaluate(model_without_ddp, val_loader, device, config)
|
||||
val_result_file = save_result(val_result, args.result_dir, 'val', remove_duplicate='image_id')
|
||||
test_result = evaluate(model_without_ddp, test_loader, device, config)
|
||||
test_result_file = save_result(test_result, args.result_dir, 'test', remove_duplicate='image_id')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', default='./configs/nocaps.yaml')
|
||||
parser.add_argument('--output_dir', default='output/NoCaps')
|
||||
parser.add_argument('--device', default='cuda')
|
||||
parser.add_argument('--seed', default=42, type=int)
|
||||
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
|
||||
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
||||
parser.add_argument('--distributed', default=True, type=bool)
|
||||
args = parser.parse_args()
|
||||
|
||||
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
|
||||
|
||||
args.result_dir = os.path.join(args.output_dir, 'result')
|
||||
|
||||
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
||||
Path(args.result_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
|
||||
|
||||
main(args, config)
|
||||
0
models/__init__.py
Normal file
0
models/__init__.py
Normal file
BIN
models/__pycache__/__init__.cpython-36.pyc
Normal file
BIN
models/__pycache__/__init__.cpython-36.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/__init__.cpython-38.pyc
Normal file
BIN
models/__pycache__/__init__.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/blip.cpython-38.pyc
Normal file
BIN
models/__pycache__/blip.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/blip_nlvr.cpython-38.pyc
Normal file
BIN
models/__pycache__/blip_nlvr.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/blip_retrieval.cpython-38.pyc
Normal file
BIN
models/__pycache__/blip_retrieval.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/blip_vqa.cpython-38.pyc
Normal file
BIN
models/__pycache__/blip_vqa.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/booster.cpython-38.pyc
Normal file
BIN
models/__pycache__/booster.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/booster_nlvr.cpython-38.pyc
Normal file
BIN
models/__pycache__/booster_nlvr.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/booster_retrieval.cpython-38.pyc
Normal file
BIN
models/__pycache__/booster_retrieval.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/booster_retrieval_new.cpython-38.pyc
Normal file
BIN
models/__pycache__/booster_retrieval_new.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/booster_vqa.cpython-38.pyc
Normal file
BIN
models/__pycache__/booster_vqa.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/med.cpython-36.pyc
Normal file
BIN
models/__pycache__/med.cpython-36.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/med.cpython-38.pyc
Normal file
BIN
models/__pycache__/med.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/nlvr_encoder.cpython-38.pyc
Normal file
BIN
models/__pycache__/nlvr_encoder.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/univlm.cpython-36.pyc
Normal file
BIN
models/__pycache__/univlm.cpython-36.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/univlm.cpython-38.pyc
Normal file
BIN
models/__pycache__/univlm.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/univlm_pretrain.cpython-38.pyc
Normal file
BIN
models/__pycache__/univlm_pretrain.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/univlm_retrieval.cpython-38.pyc
Normal file
BIN
models/__pycache__/univlm_retrieval.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/univlm_vqa.cpython-38.pyc
Normal file
BIN
models/__pycache__/univlm_vqa.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/vit.cpython-36.pyc
Normal file
BIN
models/__pycache__/vit.cpython-36.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/vit.cpython-38.pyc
Normal file
BIN
models/__pycache__/vit.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/vl_model.cpython-38.pyc
Normal file
BIN
models/__pycache__/vl_model.cpython-38.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/xbert.cpython-38.pyc
Normal file
BIN
models/__pycache__/xbert.cpython-38.pyc
Normal file
Binary file not shown.
236
models/blip.py
Normal file
236
models/blip.py
Normal file
@@ -0,0 +1,236 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
|
||||
from models.vit import VisionTransformer, interpolate_pos_embed
|
||||
from models.med import BertConfig, BertModel, BertLMHeadModel
|
||||
from transformers import BertTokenizer
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
from timm.models.hub import download_cached_file
|
||||
|
||||
class BLIP_Base(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = './configs/med_config.json',
|
||||
image_size = 384,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
||||
self.tokenizer = init_tokenizer()
|
||||
med_config = BertConfig.from_json_file(med_config)
|
||||
med_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
||||
|
||||
|
||||
def forward(self, image, caption, mode):
|
||||
|
||||
assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
|
||||
text = self.tokenizer(caption, return_tensors="pt").to(image.device)
|
||||
|
||||
if mode=='image':
|
||||
# return image features
|
||||
image_embeds = self.visual_encoder(image)
|
||||
return image_embeds
|
||||
|
||||
elif mode=='text':
|
||||
# return text features
|
||||
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
||||
return_dict = True, mode = 'text')
|
||||
return text_output.last_hidden_state
|
||||
|
||||
elif mode=='multimodal':
|
||||
# return multimodel features
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
|
||||
text.input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
output = self.text_encoder(text.input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True,
|
||||
)
|
||||
return output.last_hidden_state
|
||||
|
||||
|
||||
|
||||
class BLIP_Decoder(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = './configs/med_config.json',
|
||||
image_size = 384,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
prompt = 'a picture of ',
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
||||
self.tokenizer = init_tokenizer()
|
||||
med_config = BertConfig.from_json_file(med_config)
|
||||
med_config.encoder_width = vision_width
|
||||
self.text_decoder = BertLMHeadModel(config=med_config)
|
||||
|
||||
self.prompt = prompt
|
||||
self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
|
||||
|
||||
|
||||
def forward(self, image, caption):
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
|
||||
text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
|
||||
|
||||
text.input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
|
||||
decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
|
||||
decoder_targets[:,:self.prompt_length] = -100
|
||||
|
||||
decoder_output = self.text_decoder(text.input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
labels = decoder_targets,
|
||||
return_dict = True,
|
||||
)
|
||||
loss_lm = decoder_output.loss
|
||||
|
||||
return loss_lm
|
||||
|
||||
def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
|
||||
image_embeds = self.visual_encoder(image)
|
||||
|
||||
if not sample:
|
||||
image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
|
||||
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
|
||||
|
||||
prompt = [self.prompt] * image.size(0)
|
||||
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
|
||||
input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
input_ids = input_ids[:, :-1]
|
||||
|
||||
if sample:
|
||||
#nucleus sampling
|
||||
outputs = self.text_decoder.generate(input_ids=input_ids,
|
||||
max_length=max_length,
|
||||
min_length=min_length,
|
||||
do_sample=True,
|
||||
top_p=top_p,
|
||||
num_return_sequences=1,
|
||||
eos_token_id=self.tokenizer.sep_token_id,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
repetition_penalty=1.1,
|
||||
**model_kwargs)
|
||||
else:
|
||||
#beam search
|
||||
outputs = self.text_decoder.generate(input_ids=input_ids,
|
||||
max_length=max_length,
|
||||
min_length=min_length,
|
||||
num_beams=num_beams,
|
||||
eos_token_id=self.tokenizer.sep_token_id,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
repetition_penalty=repetition_penalty,
|
||||
**model_kwargs)
|
||||
|
||||
captions = []
|
||||
for output in outputs:
|
||||
caption = self.tokenizer.decode(output, skip_special_tokens=True)
|
||||
captions.append(caption[len(self.prompt):])
|
||||
return captions
|
||||
|
||||
|
||||
def blip_decoder(pretrained='',**kwargs):
|
||||
model = BLIP_Decoder(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
assert(len(msg.missing_keys)==0)
|
||||
return model
|
||||
|
||||
def blip_feature_extractor(pretrained='',**kwargs):
|
||||
model = BLIP_Base(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
assert(len(msg.missing_keys)==0)
|
||||
return model
|
||||
|
||||
def init_tokenizer():
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
||||
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
|
||||
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
|
||||
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
|
||||
return tokenizer
|
||||
|
||||
|
||||
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
|
||||
|
||||
assert vit in ['base', 'large'], "vit parameter must be base or large"
|
||||
if vit=='base':
|
||||
vision_width = 768
|
||||
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
|
||||
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
||||
drop_path_rate=0 or drop_path_rate
|
||||
)
|
||||
elif vit=='large':
|
||||
vision_width = 1024
|
||||
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
|
||||
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
|
||||
drop_path_rate=0.1 or drop_path_rate
|
||||
)
|
||||
return visual_encoder, vision_width
|
||||
|
||||
def is_url(url_or_filename):
|
||||
parsed = urlparse(url_or_filename)
|
||||
return parsed.scheme in ("http", "https")
|
||||
|
||||
def load_checkpoint(model,url_or_filename):
|
||||
if is_url(url_or_filename):
|
||||
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
||||
checkpoint = torch.load(cached_file, map_location='cpu')
|
||||
elif os.path.isfile(url_or_filename):
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
||||
else:
|
||||
raise RuntimeError('checkpoint url or path is invalid')
|
||||
|
||||
state_dict = checkpoint['model']
|
||||
|
||||
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
|
||||
if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
|
||||
state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
|
||||
model.visual_encoder_m)
|
||||
for key in model.state_dict().keys():
|
||||
if key in state_dict.keys():
|
||||
if state_dict[key].shape!=model.state_dict()[key].shape:
|
||||
del state_dict[key]
|
||||
|
||||
msg = model.load_state_dict(state_dict,strict=False)
|
||||
print('load checkpoint from %s'%url_or_filename)
|
||||
return model,msg
|
||||
|
||||
103
models/blip_nlvr.py
Normal file
103
models/blip_nlvr.py
Normal file
@@ -0,0 +1,103 @@
|
||||
from models.med import BertConfig
|
||||
from models.nlvr_encoder import BertModel
|
||||
from models.vit import interpolate_pos_embed
|
||||
from models.blip import create_vit, init_tokenizer, is_url
|
||||
|
||||
from timm.models.hub import download_cached_file
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
from transformers import BertTokenizer
|
||||
import numpy as np
|
||||
|
||||
class BLIP_NLVR(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = './configs/med_config.json',
|
||||
image_size = 480,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1)
|
||||
self.tokenizer = init_tokenizer()
|
||||
med_config = BertConfig.from_json_file(med_config)
|
||||
med_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
||||
|
||||
self.cls_head = nn.Sequential(
|
||||
nn.Linear(self.text_encoder.config.hidden_size, self.text_encoder.config.hidden_size),
|
||||
nn.ReLU(),
|
||||
nn.Linear(self.text_encoder.config.hidden_size, 2)
|
||||
)
|
||||
|
||||
def forward(self, image, text, targets, train=True):
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
image0_embeds, image1_embeds = torch.split(image_embeds,targets.size(0))
|
||||
|
||||
text = self.tokenizer(text, padding='longest', return_tensors="pt").to(image.device)
|
||||
text.input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
|
||||
output = self.text_encoder(text.input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = [image0_embeds,image1_embeds],
|
||||
encoder_attention_mask = [image_atts[:image0_embeds.size(0)],
|
||||
image_atts[image0_embeds.size(0):]],
|
||||
return_dict = True,
|
||||
)
|
||||
hidden_state = output.last_hidden_state[:,0,:]
|
||||
prediction = self.cls_head(hidden_state)
|
||||
|
||||
if train:
|
||||
loss = F.cross_entropy(prediction, targets)
|
||||
return loss
|
||||
else:
|
||||
return prediction
|
||||
|
||||
def blip_nlvr(pretrained='',**kwargs):
|
||||
model = BLIP_NLVR(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
print("missing keys:")
|
||||
print(msg.missing_keys)
|
||||
return model
|
||||
|
||||
|
||||
def load_checkpoint(model,url_or_filename):
|
||||
if is_url(url_or_filename):
|
||||
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
||||
checkpoint = torch.load(cached_file, map_location='cpu')
|
||||
elif os.path.isfile(url_or_filename):
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
||||
else:
|
||||
raise RuntimeError('checkpoint url or path is invalid')
|
||||
state_dict = checkpoint['model']
|
||||
|
||||
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
|
||||
|
||||
for key in list(state_dict.keys()):
|
||||
if 'crossattention.self.' in key:
|
||||
new_key0 = key.replace('self','self0')
|
||||
new_key1 = key.replace('self','self1')
|
||||
state_dict[new_key0] = state_dict[key]
|
||||
state_dict[new_key1] = state_dict[key]
|
||||
elif 'crossattention.output.dense.' in key:
|
||||
new_key0 = key.replace('dense','dense0')
|
||||
new_key1 = key.replace('dense','dense1')
|
||||
state_dict[new_key0] = state_dict[key]
|
||||
state_dict[new_key1] = state_dict[key]
|
||||
|
||||
msg = model.load_state_dict(state_dict,strict=False)
|
||||
print('load checkpoint from %s'%url_or_filename)
|
||||
return model,msg
|
||||
|
||||
339
models/blip_pretrain.py
Normal file
339
models/blip_pretrain.py
Normal file
@@ -0,0 +1,339 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
from models.med import BertConfig, BertModel, BertLMHeadModel
|
||||
from transformers import BertTokenizer
|
||||
import transformers
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from models.blip import create_vit, init_tokenizer, load_checkpoint
|
||||
|
||||
class BLIP_Pretrain(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = './configs/bert_config.json',
|
||||
image_size = 224,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
embed_dim = 256,
|
||||
queue_size = 57600,
|
||||
momentum = 0.995,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0)
|
||||
|
||||
if vit=='base':
|
||||
checkpoint = torch.hub.load_state_dict_from_url(
|
||||
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
|
||||
map_location="cpu", check_hash=True)
|
||||
state_dict = checkpoint["model"]
|
||||
msg = self.visual_encoder.load_state_dict(state_dict,strict=False)
|
||||
elif vit=='large':
|
||||
from timm.models.helpers import load_custom_pretrained
|
||||
from timm.models.vision_transformer import default_cfgs
|
||||
load_custom_pretrained(self.visual_encoder,default_cfgs['vit_large_patch16_224_in21k'])
|
||||
|
||||
self.tokenizer = init_tokenizer()
|
||||
encoder_config = BertConfig.from_json_file(med_config)
|
||||
encoder_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel.from_pretrained('bert-base-uncased',config=encoder_config, add_pooling_layer=False)
|
||||
self.text_encoder.resize_token_embeddings(len(self.tokenizer))
|
||||
|
||||
text_width = self.text_encoder.config.hidden_size
|
||||
|
||||
self.vision_proj = nn.Linear(vision_width, embed_dim)
|
||||
self.text_proj = nn.Linear(text_width, embed_dim)
|
||||
|
||||
self.itm_head = nn.Linear(text_width, 2)
|
||||
|
||||
# create momentum encoders
|
||||
self.visual_encoder_m, vision_width = create_vit(vit,image_size)
|
||||
self.vision_proj_m = nn.Linear(vision_width, embed_dim)
|
||||
self.text_encoder_m = BertModel(config=encoder_config, add_pooling_layer=False)
|
||||
self.text_proj_m = nn.Linear(text_width, embed_dim)
|
||||
|
||||
self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
|
||||
[self.vision_proj,self.vision_proj_m],
|
||||
[self.text_encoder,self.text_encoder_m],
|
||||
[self.text_proj,self.text_proj_m],
|
||||
]
|
||||
self.copy_params()
|
||||
|
||||
# create the queue
|
||||
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
|
||||
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
|
||||
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
|
||||
|
||||
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
|
||||
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
|
||||
|
||||
self.queue_size = queue_size
|
||||
self.momentum = momentum
|
||||
self.temp = nn.Parameter(0.07*torch.ones([]))
|
||||
|
||||
# create the decoder
|
||||
decoder_config = BertConfig.from_json_file(med_config)
|
||||
decoder_config.encoder_width = vision_width
|
||||
self.text_decoder = BertLMHeadModel.from_pretrained('bert-base-uncased',config=decoder_config)
|
||||
self.text_decoder.resize_token_embeddings(len(self.tokenizer))
|
||||
tie_encoder_decoder_weights(self.text_decoder.bert,self.text_encoder,'','/attention')
|
||||
|
||||
|
||||
def forward(self, image, caption, alpha):
|
||||
with torch.no_grad():
|
||||
self.temp.clamp_(0.001,0.5)
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
|
||||
|
||||
text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=30,
|
||||
return_tensors="pt").to(image.device)
|
||||
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
||||
return_dict = True, mode = 'text')
|
||||
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
|
||||
|
||||
# get momentum features
|
||||
with torch.no_grad():
|
||||
self._momentum_update()
|
||||
image_embeds_m = self.visual_encoder_m(image)
|
||||
image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
|
||||
image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
|
||||
|
||||
text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
|
||||
return_dict = True, mode = 'text')
|
||||
text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
|
||||
text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
|
||||
|
||||
sim_i2t_m = image_feat_m @ text_feat_all / self.temp
|
||||
sim_t2i_m = text_feat_m @ image_feat_all / self.temp
|
||||
|
||||
sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
|
||||
sim_targets.fill_diagonal_(1)
|
||||
|
||||
sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
|
||||
sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
|
||||
|
||||
sim_i2t = image_feat @ text_feat_all / self.temp
|
||||
sim_t2i = text_feat @ image_feat_all / self.temp
|
||||
|
||||
loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
|
||||
loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
|
||||
|
||||
loss_ita = (loss_i2t+loss_t2i)/2
|
||||
|
||||
self._dequeue_and_enqueue(image_feat_m, text_feat_m)
|
||||
|
||||
###============== Image-text Matching ===================###
|
||||
encoder_input_ids = text.input_ids.clone()
|
||||
encoder_input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
|
||||
# forward the positve image-text pair
|
||||
bs = image.size(0)
|
||||
output_pos = self.text_encoder(encoder_input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True,
|
||||
)
|
||||
with torch.no_grad():
|
||||
weights_t2i = F.softmax(sim_t2i[:,:bs],dim=1)+1e-4
|
||||
weights_t2i.fill_diagonal_(0)
|
||||
weights_i2t = F.softmax(sim_i2t[:,:bs],dim=1)+1e-4
|
||||
weights_i2t.fill_diagonal_(0)
|
||||
|
||||
# select a negative image for each text
|
||||
image_embeds_neg = []
|
||||
for b in range(bs):
|
||||
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
||||
image_embeds_neg.append(image_embeds[neg_idx])
|
||||
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
||||
|
||||
# select a negative text for each image
|
||||
text_ids_neg = []
|
||||
text_atts_neg = []
|
||||
for b in range(bs):
|
||||
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
||||
text_ids_neg.append(encoder_input_ids[neg_idx])
|
||||
text_atts_neg.append(text.attention_mask[neg_idx])
|
||||
|
||||
text_ids_neg = torch.stack(text_ids_neg,dim=0)
|
||||
text_atts_neg = torch.stack(text_atts_neg,dim=0)
|
||||
|
||||
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
|
||||
text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
|
||||
|
||||
image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
|
||||
image_atts_all = torch.cat([image_atts,image_atts],dim=0)
|
||||
|
||||
output_neg = self.text_encoder(text_ids_all,
|
||||
attention_mask = text_atts_all,
|
||||
encoder_hidden_states = image_embeds_all,
|
||||
encoder_attention_mask = image_atts_all,
|
||||
return_dict = True,
|
||||
)
|
||||
|
||||
vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
|
||||
vl_output = self.itm_head(vl_embeddings)
|
||||
|
||||
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
|
||||
dim=0).to(image.device)
|
||||
loss_itm = F.cross_entropy(vl_output, itm_labels)
|
||||
|
||||
##================= LM ========================##
|
||||
decoder_input_ids = text.input_ids.clone()
|
||||
decoder_input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
decoder_targets = decoder_input_ids.masked_fill(decoder_input_ids == self.tokenizer.pad_token_id, -100)
|
||||
|
||||
decoder_output = self.text_decoder(decoder_input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
labels = decoder_targets,
|
||||
return_dict = True,
|
||||
)
|
||||
|
||||
loss_lm = decoder_output.loss
|
||||
return loss_ita, loss_itm, loss_lm
|
||||
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def copy_params(self):
|
||||
for model_pair in self.model_pairs:
|
||||
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
||||
param_m.data.copy_(param.data) # initialize
|
||||
param_m.requires_grad = False # not update by gradient
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def _momentum_update(self):
|
||||
for model_pair in self.model_pairs:
|
||||
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
||||
param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def _dequeue_and_enqueue(self, image_feat, text_feat):
|
||||
# gather keys before updating queue
|
||||
image_feats = concat_all_gather(image_feat)
|
||||
text_feats = concat_all_gather(text_feat)
|
||||
|
||||
batch_size = image_feats.shape[0]
|
||||
|
||||
ptr = int(self.queue_ptr)
|
||||
assert self.queue_size % batch_size == 0 # for simplicity
|
||||
|
||||
# replace the keys at ptr (dequeue and enqueue)
|
||||
self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
|
||||
self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
|
||||
ptr = (ptr + batch_size) % self.queue_size # move pointer
|
||||
|
||||
self.queue_ptr[0] = ptr
|
||||
|
||||
|
||||
def blip_pretrain(**kwargs):
|
||||
model = BLIP_Pretrain(**kwargs)
|
||||
return model
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def concat_all_gather(tensor):
|
||||
"""
|
||||
Performs all_gather operation on the provided tensors.
|
||||
*** Warning ***: torch.distributed.all_gather has no gradient.
|
||||
"""
|
||||
tensors_gather = [torch.ones_like(tensor)
|
||||
for _ in range(torch.distributed.get_world_size())]
|
||||
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
||||
|
||||
output = torch.cat(tensors_gather, dim=0)
|
||||
return output
|
||||
|
||||
|
||||
from typing import List
|
||||
def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, skip_key:str):
|
||||
uninitialized_encoder_weights: List[str] = []
|
||||
if decoder.__class__ != encoder.__class__:
|
||||
logger.info(
|
||||
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
|
||||
)
|
||||
|
||||
def tie_encoder_to_decoder_recursively(
|
||||
decoder_pointer: nn.Module,
|
||||
encoder_pointer: nn.Module,
|
||||
module_name: str,
|
||||
uninitialized_encoder_weights: List[str],
|
||||
skip_key: str,
|
||||
depth=0,
|
||||
):
|
||||
assert isinstance(decoder_pointer, nn.Module) and isinstance(
|
||||
encoder_pointer, nn.Module
|
||||
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
|
||||
if hasattr(decoder_pointer, "weight") and skip_key not in module_name:
|
||||
assert hasattr(encoder_pointer, "weight")
|
||||
encoder_pointer.weight = decoder_pointer.weight
|
||||
if hasattr(decoder_pointer, "bias"):
|
||||
assert hasattr(encoder_pointer, "bias")
|
||||
encoder_pointer.bias = decoder_pointer.bias
|
||||
print(module_name+' is tied')
|
||||
return
|
||||
|
||||
encoder_modules = encoder_pointer._modules
|
||||
decoder_modules = decoder_pointer._modules
|
||||
if len(decoder_modules) > 0:
|
||||
assert (
|
||||
len(encoder_modules) > 0
|
||||
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
|
||||
|
||||
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
|
||||
encoder_layer_pos = 0
|
||||
for name, module in decoder_modules.items():
|
||||
if name.isdigit():
|
||||
encoder_name = str(int(name) + encoder_layer_pos)
|
||||
decoder_name = name
|
||||
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
|
||||
encoder_modules
|
||||
) != len(decoder_modules):
|
||||
# this can happen if the name corresponds to the position in a list module list of layers
|
||||
# in this case the decoder has added a cross-attention that the encoder does not have
|
||||
# thus skip this step and subtract one layer pos from encoder
|
||||
encoder_layer_pos -= 1
|
||||
continue
|
||||
elif name not in encoder_modules:
|
||||
continue
|
||||
elif depth > 500:
|
||||
raise ValueError(
|
||||
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
|
||||
)
|
||||
else:
|
||||
decoder_name = encoder_name = name
|
||||
tie_encoder_to_decoder_recursively(
|
||||
decoder_modules[decoder_name],
|
||||
encoder_modules[encoder_name],
|
||||
module_name + "/" + name,
|
||||
uninitialized_encoder_weights,
|
||||
skip_key,
|
||||
depth=depth + 1,
|
||||
)
|
||||
all_encoder_weights.remove(module_name + "/" + encoder_name)
|
||||
|
||||
uninitialized_encoder_weights += list(all_encoder_weights)
|
||||
|
||||
# tie weights recursively
|
||||
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights, skip_key)
|
||||
322
models/blip_retrieval.py
Normal file
322
models/blip_retrieval.py
Normal file
@@ -0,0 +1,322 @@
|
||||
from models.med import BertConfig, BertModel
|
||||
from transformers import BertTokenizer
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from models.blip import create_vit, init_tokenizer, load_checkpoint
|
||||
|
||||
class BLIP_Retrieval(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = './configs/med_config.json',
|
||||
image_size = 384,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
embed_dim = 256,
|
||||
queue_size = 57600,
|
||||
momentum = 0.995,
|
||||
negative_all_rank = False,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
|
||||
self.tokenizer = init_tokenizer()
|
||||
med_config = BertConfig.from_json_file(med_config)
|
||||
med_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
|
||||
|
||||
text_width = self.text_encoder.config.hidden_size
|
||||
|
||||
self.vision_proj = nn.Linear(vision_width, embed_dim)
|
||||
self.text_proj = nn.Linear(text_width, embed_dim)
|
||||
|
||||
self.itm_head = nn.Linear(text_width, 2)
|
||||
|
||||
# create momentum encoders
|
||||
self.visual_encoder_m, vision_width = create_vit(vit,image_size)
|
||||
self.vision_proj_m = nn.Linear(vision_width, embed_dim)
|
||||
self.text_encoder_m = BertModel(config=med_config, add_pooling_layer=False)
|
||||
self.text_proj_m = nn.Linear(text_width, embed_dim)
|
||||
|
||||
self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
|
||||
[self.vision_proj,self.vision_proj_m],
|
||||
[self.text_encoder,self.text_encoder_m],
|
||||
[self.text_proj,self.text_proj_m],
|
||||
]
|
||||
self.copy_params()
|
||||
|
||||
# create the queue
|
||||
self.register_buffer("image_queue", torch.randn(embed_dim, queue_size))
|
||||
self.register_buffer("text_queue", torch.randn(embed_dim, queue_size))
|
||||
self.register_buffer("idx_queue", torch.full((1,queue_size),-100))
|
||||
self.register_buffer("ptr_queue", torch.zeros(1, dtype=torch.long))
|
||||
|
||||
self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
|
||||
self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
|
||||
|
||||
self.queue_size = queue_size
|
||||
self.momentum = momentum
|
||||
self.temp = nn.Parameter(0.07*torch.ones([]))
|
||||
|
||||
self.negative_all_rank = negative_all_rank
|
||||
|
||||
|
||||
def forward(self, image, caption, alpha, idx):
|
||||
with torch.no_grad():
|
||||
self.temp.clamp_(0.001,0.5)
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1)
|
||||
|
||||
text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=35,
|
||||
return_tensors="pt").to(image.device)
|
||||
|
||||
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
|
||||
return_dict = True, mode = 'text')
|
||||
text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1)
|
||||
|
||||
###============== Image-text Contrastive Learning ===================###
|
||||
idx = idx.view(-1,1)
|
||||
idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()],dim=1)
|
||||
pos_idx = torch.eq(idx, idx_all).float()
|
||||
sim_targets = pos_idx / pos_idx.sum(1,keepdim=True)
|
||||
|
||||
# get momentum features
|
||||
with torch.no_grad():
|
||||
self._momentum_update()
|
||||
image_embeds_m = self.visual_encoder_m(image)
|
||||
image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1)
|
||||
image_feat_m_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1)
|
||||
|
||||
text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask,
|
||||
return_dict = True, mode = 'text')
|
||||
text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1)
|
||||
text_feat_m_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1)
|
||||
|
||||
sim_i2t_m = image_feat_m @ text_feat_m_all / self.temp
|
||||
sim_t2i_m = text_feat_m @ image_feat_m_all / self.temp
|
||||
|
||||
sim_targets = torch.zeros(sim_i2t_m.size()).to(image.device)
|
||||
sim_targets.fill_diagonal_(1)
|
||||
|
||||
sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets
|
||||
sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets
|
||||
|
||||
sim_i2t = image_feat @ text_feat_m_all / self.temp
|
||||
sim_t2i = text_feat @ image_feat_m_all / self.temp
|
||||
|
||||
loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean()
|
||||
loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean()
|
||||
|
||||
loss_ita = (loss_i2t+loss_t2i)/2
|
||||
|
||||
idxs = concat_all_gather(idx)
|
||||
self._dequeue_and_enqueue(image_feat_m, text_feat_m, idxs)
|
||||
|
||||
###============== Image-text Matching ===================###
|
||||
encoder_input_ids = text.input_ids.clone()
|
||||
encoder_input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
|
||||
# forward the positve image-text pair
|
||||
bs = image.size(0)
|
||||
output_pos = self.text_encoder(encoder_input_ids,
|
||||
attention_mask = text.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True,
|
||||
)
|
||||
|
||||
|
||||
if self.negative_all_rank:
|
||||
# compute sample similarity
|
||||
with torch.no_grad():
|
||||
mask = torch.eq(idx, idxs.t())
|
||||
|
||||
image_feat_world = concat_all_gather(image_feat)
|
||||
text_feat_world = concat_all_gather(text_feat)
|
||||
|
||||
sim_i2t = image_feat @ text_feat_world.t() / self.temp
|
||||
sim_t2i = text_feat @ image_feat_world.t() / self.temp
|
||||
|
||||
weights_i2t = F.softmax(sim_i2t,dim=1)
|
||||
weights_i2t.masked_fill_(mask, 0)
|
||||
|
||||
weights_t2i = F.softmax(sim_t2i,dim=1)
|
||||
weights_t2i.masked_fill_(mask, 0)
|
||||
|
||||
image_embeds_world = all_gather_with_grad(image_embeds)
|
||||
|
||||
# select a negative image (from all ranks) for each text
|
||||
image_embeds_neg = []
|
||||
for b in range(bs):
|
||||
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
||||
image_embeds_neg.append(image_embeds_world[neg_idx])
|
||||
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
||||
|
||||
# select a negative text (from all ranks) for each image
|
||||
input_ids_world = concat_all_gather(encoder_input_ids)
|
||||
att_mask_world = concat_all_gather(text.attention_mask)
|
||||
|
||||
text_ids_neg = []
|
||||
text_atts_neg = []
|
||||
for b in range(bs):
|
||||
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
||||
text_ids_neg.append(input_ids_world[neg_idx])
|
||||
text_atts_neg.append(att_mask_world[neg_idx])
|
||||
|
||||
else:
|
||||
with torch.no_grad():
|
||||
mask = torch.eq(idx, idx.t())
|
||||
|
||||
sim_i2t = image_feat @ text_feat.t() / self.temp
|
||||
sim_t2i = text_feat @ image_feat.t() / self.temp
|
||||
|
||||
weights_i2t = F.softmax(sim_i2t,dim=1)
|
||||
weights_i2t.masked_fill_(mask, 0)
|
||||
|
||||
weights_t2i = F.softmax(sim_t2i,dim=1)
|
||||
weights_t2i.masked_fill_(mask, 0)
|
||||
|
||||
# select a negative image (from same rank) for each text
|
||||
image_embeds_neg = []
|
||||
for b in range(bs):
|
||||
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
|
||||
image_embeds_neg.append(image_embeds[neg_idx])
|
||||
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
|
||||
|
||||
# select a negative text (from same rank) for each image
|
||||
text_ids_neg = []
|
||||
text_atts_neg = []
|
||||
for b in range(bs):
|
||||
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
|
||||
text_ids_neg.append(encoder_input_ids[neg_idx])
|
||||
text_atts_neg.append(text.attention_mask[neg_idx])
|
||||
|
||||
text_ids_neg = torch.stack(text_ids_neg,dim=0)
|
||||
text_atts_neg = torch.stack(text_atts_neg,dim=0)
|
||||
|
||||
text_ids_all = torch.cat([encoder_input_ids, text_ids_neg],dim=0)
|
||||
text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0)
|
||||
|
||||
image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
|
||||
image_atts_all = torch.cat([image_atts,image_atts],dim=0)
|
||||
|
||||
output_neg = self.text_encoder(text_ids_all,
|
||||
attention_mask = text_atts_all,
|
||||
encoder_hidden_states = image_embeds_all,
|
||||
encoder_attention_mask = image_atts_all,
|
||||
return_dict = True,
|
||||
)
|
||||
|
||||
|
||||
vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
|
||||
vl_output = self.itm_head(vl_embeddings)
|
||||
|
||||
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
|
||||
dim=0).to(image.device)
|
||||
loss_itm = F.cross_entropy(vl_output, itm_labels)
|
||||
|
||||
return loss_ita, loss_itm
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def copy_params(self):
|
||||
for model_pair in self.model_pairs:
|
||||
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
||||
param_m.data.copy_(param.data) # initialize
|
||||
param_m.requires_grad = False # not update by gradient
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def _momentum_update(self):
|
||||
for model_pair in self.model_pairs:
|
||||
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
|
||||
param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def _dequeue_and_enqueue(self, image_feat, text_feat, idxs):
|
||||
# gather keys before updating queue
|
||||
image_feats = concat_all_gather(image_feat)
|
||||
text_feats = concat_all_gather(text_feat)
|
||||
|
||||
|
||||
batch_size = image_feats.shape[0]
|
||||
|
||||
ptr = int(self.ptr_queue)
|
||||
assert self.queue_size % batch_size == 0 # for simplicity
|
||||
|
||||
# replace the keys at ptr (dequeue and enqueue)
|
||||
self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
|
||||
self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
|
||||
self.idx_queue[:, ptr:ptr + batch_size] = idxs.T
|
||||
ptr = (ptr + batch_size) % self.queue_size # move pointer
|
||||
|
||||
self.ptr_queue[0] = ptr
|
||||
|
||||
|
||||
def blip_retrieval(pretrained='',**kwargs):
|
||||
model = BLIP_Retrieval(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
print("missing keys:")
|
||||
print(msg.missing_keys)
|
||||
return model
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def concat_all_gather(tensor):
|
||||
"""
|
||||
Performs all_gather operation on the provided tensors.
|
||||
*** Warning ***: torch.distributed.all_gather has no gradient.
|
||||
"""
|
||||
tensors_gather = [torch.ones_like(tensor)
|
||||
for _ in range(torch.distributed.get_world_size())]
|
||||
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
||||
|
||||
output = torch.cat(tensors_gather, dim=0)
|
||||
return output
|
||||
|
||||
|
||||
class GatherLayer(torch.autograd.Function):
|
||||
"""
|
||||
Gather tensors from all workers with support for backward propagation:
|
||||
This implementation does not cut the gradients as torch.distributed.all_gather does.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def forward(ctx, x):
|
||||
output = [torch.zeros_like(x) for _ in range(torch.distributed.get_world_size())]
|
||||
torch.distributed.all_gather(output, x)
|
||||
return tuple(output)
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, *grads):
|
||||
all_gradients = torch.stack(grads)
|
||||
torch.distributed.all_reduce(all_gradients)
|
||||
return all_gradients[torch.distributed.get_rank()]
|
||||
|
||||
|
||||
def all_gather_with_grad(tensors):
|
||||
"""
|
||||
Performs all_gather operation on the provided tensors.
|
||||
Graph remains connected for backward grad computation.
|
||||
"""
|
||||
# Queue the gathered tensors
|
||||
world_size = torch.distributed.get_world_size()
|
||||
# There is no need for reduction in the single-proc case
|
||||
if world_size == 1:
|
||||
return tensors
|
||||
|
||||
tensor_all = GatherLayer.apply(tensors)
|
||||
|
||||
return torch.cat(tensor_all, dim=0)
|
||||
186
models/blip_vqa.py
Normal file
186
models/blip_vqa.py
Normal file
@@ -0,0 +1,186 @@
|
||||
from models.med import BertConfig, BertModel, BertLMHeadModel
|
||||
from models.blip import create_vit, init_tokenizer, load_checkpoint
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
from transformers import BertTokenizer
|
||||
import numpy as np
|
||||
|
||||
class BLIP_VQA(nn.Module):
|
||||
def __init__(self,
|
||||
med_config = './configs/med_config.json',
|
||||
image_size = 480,
|
||||
vit = 'base',
|
||||
vit_grad_ckpt = False,
|
||||
vit_ckpt_layer = 0,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
med_config (str): path for the mixture of encoder-decoder model's configuration file
|
||||
image_size (int): input image size
|
||||
vit (str): model size of vision transformer
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer, drop_path_rate=0.1)
|
||||
self.tokenizer = init_tokenizer()
|
||||
|
||||
encoder_config = BertConfig.from_json_file(med_config)
|
||||
encoder_config.encoder_width = vision_width
|
||||
self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False)
|
||||
|
||||
decoder_config = BertConfig.from_json_file(med_config)
|
||||
self.text_decoder = BertLMHeadModel(config=decoder_config)
|
||||
|
||||
|
||||
def forward(self, image, question, answer=None, n=None, weights=None, train=True, inference='rank', k_test=128):
|
||||
|
||||
image_embeds = self.visual_encoder(image)
|
||||
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
|
||||
|
||||
question = self.tokenizer(question, padding='longest', truncation=True, max_length=35,
|
||||
return_tensors="pt").to(image.device)
|
||||
question.input_ids[:,0] = self.tokenizer.enc_token_id
|
||||
|
||||
if train:
|
||||
'''
|
||||
n: number of answers for each question
|
||||
weights: weight for each answer
|
||||
'''
|
||||
answer = self.tokenizer(answer, padding='longest', return_tensors="pt").to(image.device)
|
||||
answer.input_ids[:,0] = self.tokenizer.bos_token_id
|
||||
answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100)
|
||||
|
||||
question_output = self.text_encoder(question.input_ids,
|
||||
attention_mask = question.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True)
|
||||
|
||||
question_states = []
|
||||
question_atts = []
|
||||
for b, n in enumerate(n):
|
||||
question_states += [question_output.last_hidden_state[b]]*n
|
||||
question_atts += [question.attention_mask[b]]*n
|
||||
question_states = torch.stack(question_states,0)
|
||||
question_atts = torch.stack(question_atts,0)
|
||||
|
||||
answer_output = self.text_decoder(answer.input_ids,
|
||||
attention_mask = answer.attention_mask,
|
||||
encoder_hidden_states = question_states,
|
||||
encoder_attention_mask = question_atts,
|
||||
labels = answer_targets,
|
||||
return_dict = True,
|
||||
reduction = 'none',
|
||||
)
|
||||
|
||||
loss = weights * answer_output.loss
|
||||
loss = loss.sum()/image.size(0)
|
||||
|
||||
return loss
|
||||
|
||||
|
||||
else:
|
||||
question_output = self.text_encoder(question.input_ids,
|
||||
attention_mask = question.attention_mask,
|
||||
encoder_hidden_states = image_embeds,
|
||||
encoder_attention_mask = image_atts,
|
||||
return_dict = True)
|
||||
|
||||
if inference=='generate':
|
||||
num_beams = 3
|
||||
question_states = question_output.last_hidden_state.repeat_interleave(num_beams,dim=0)
|
||||
question_atts = torch.ones(question_states.size()[:-1],dtype=torch.long).to(question_states.device)
|
||||
model_kwargs = {"encoder_hidden_states": question_states, "encoder_attention_mask":question_atts}
|
||||
|
||||
bos_ids = torch.full((image.size(0),1),fill_value=self.tokenizer.bos_token_id,device=image.device)
|
||||
|
||||
outputs = self.text_decoder.generate(input_ids=bos_ids,
|
||||
max_length=10,
|
||||
min_length=1,
|
||||
num_beams=num_beams,
|
||||
eos_token_id=self.tokenizer.sep_token_id,
|
||||
pad_token_id=self.tokenizer.pad_token_id,
|
||||
**model_kwargs)
|
||||
|
||||
answers = []
|
||||
for output in outputs:
|
||||
answer = self.tokenizer.decode(output, skip_special_tokens=True)
|
||||
answers.append(answer)
|
||||
return answers
|
||||
|
||||
elif inference=='rank':
|
||||
max_ids = self.rank_answer(question_output.last_hidden_state, question.attention_mask,
|
||||
answer.input_ids, answer.attention_mask, k_test)
|
||||
return max_ids
|
||||
|
||||
|
||||
|
||||
def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k):
|
||||
|
||||
num_ques = question_states.size(0)
|
||||
start_ids = answer_ids[0,0].repeat(num_ques,1) # bos token
|
||||
|
||||
start_output = self.text_decoder(start_ids,
|
||||
encoder_hidden_states = question_states,
|
||||
encoder_attention_mask = question_atts,
|
||||
return_dict = True,
|
||||
reduction = 'none')
|
||||
logits = start_output.logits[:,0,:] # first token's logit
|
||||
|
||||
# topk_probs: top-k probability
|
||||
# topk_ids: [num_question, k]
|
||||
answer_first_token = answer_ids[:,1]
|
||||
prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token)
|
||||
topk_probs, topk_ids = prob_first_token.topk(k,dim=1)
|
||||
|
||||
# answer input: [num_question*k, answer_len]
|
||||
input_ids = []
|
||||
input_atts = []
|
||||
for b, topk_id in enumerate(topk_ids):
|
||||
input_ids.append(answer_ids.index_select(dim=0, index=topk_id))
|
||||
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
|
||||
input_ids = torch.cat(input_ids,dim=0)
|
||||
input_atts = torch.cat(input_atts,dim=0)
|
||||
|
||||
targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100)
|
||||
|
||||
# repeat encoder's output for top-k answers
|
||||
question_states = tile(question_states, 0, k)
|
||||
question_atts = tile(question_atts, 0, k)
|
||||
|
||||
output = self.text_decoder(input_ids,
|
||||
attention_mask = input_atts,
|
||||
encoder_hidden_states = question_states,
|
||||
encoder_attention_mask = question_atts,
|
||||
labels = targets_ids,
|
||||
return_dict = True,
|
||||
reduction = 'none')
|
||||
|
||||
log_probs_sum = -output.loss
|
||||
log_probs_sum = log_probs_sum.view(num_ques,k)
|
||||
|
||||
max_topk_ids = log_probs_sum.argmax(dim=1)
|
||||
max_ids = topk_ids[max_topk_ids>=0,max_topk_ids]
|
||||
|
||||
return max_ids
|
||||
|
||||
|
||||
def blip_vqa(pretrained='',**kwargs):
|
||||
model = BLIP_VQA(**kwargs)
|
||||
if pretrained:
|
||||
model,msg = load_checkpoint(model,pretrained)
|
||||
# assert(len(msg.missing_keys)==0)
|
||||
return model
|
||||
|
||||
|
||||
def tile(x, dim, n_tile):
|
||||
init_dim = x.size(dim)
|
||||
repeat_idx = [1] * x.dim()
|
||||
repeat_idx[dim] = n_tile
|
||||
x = x.repeat(*(repeat_idx))
|
||||
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))
|
||||
return torch.index_select(x, dim, order_index.to(x.device))
|
||||
|
||||
|
||||
955
models/med.py
Normal file
955
models/med.py
Normal file
@@ -0,0 +1,955 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
* Based on huggingface code base
|
||||
* https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
|
||||
'''
|
||||
|
||||
import math
|
||||
import os
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor, device, dtype, nn
|
||||
import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
import torch.nn.functional as F
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.file_utils import (
|
||||
ModelOutput,
|
||||
)
|
||||
from transformers.modeling_outputs import (
|
||||
BaseModelOutputWithPastAndCrossAttentions,
|
||||
BaseModelOutputWithPoolingAndCrossAttentions,
|
||||
CausalLMOutputWithCrossAttentions,
|
||||
MaskedLMOutput,
|
||||
MultipleChoiceModelOutput,
|
||||
NextSentencePredictorOutput,
|
||||
QuestionAnsweringModelOutput,
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from transformers.modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
from transformers.models.bert.configuration_bert import BertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
class BertEmbeddings(nn.Module):
|
||||
"""Construct the embeddings from word and position embeddings."""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
||||
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
||||
|
||||
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
||||
# any TensorFlow checkpoint file
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
||||
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
||||
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
||||
|
||||
self.config = config
|
||||
|
||||
def forward(
|
||||
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
||||
):
|
||||
if input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
else:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
|
||||
seq_length = input_shape[1]
|
||||
|
||||
if position_ids is None:
|
||||
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
||||
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.word_embeddings(input_ids)
|
||||
|
||||
embeddings = inputs_embeds
|
||||
|
||||
if self.position_embedding_type == "absolute":
|
||||
position_embeddings = self.position_embeddings(position_ids)
|
||||
embeddings += position_embeddings
|
||||
embeddings = self.LayerNorm(embeddings)
|
||||
embeddings = self.dropout(embeddings)
|
||||
return embeddings
|
||||
|
||||
|
||||
class BertSelfAttention(nn.Module):
|
||||
def __init__(self, config, is_cross_attention):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
||||
raise ValueError(
|
||||
"The hidden size (%d) is not a multiple of the number of attention "
|
||||
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
||||
)
|
||||
|
||||
self.num_attention_heads = config.num_attention_heads
|
||||
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
||||
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
||||
|
||||
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
if is_cross_attention:
|
||||
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
||||
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
||||
else:
|
||||
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
|
||||
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
||||
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
||||
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
||||
self.max_position_embeddings = config.max_position_embeddings
|
||||
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
||||
self.save_attention = False
|
||||
|
||||
def save_attn_gradients(self, attn_gradients):
|
||||
self.attn_gradients = attn_gradients
|
||||
|
||||
def get_attn_gradients(self):
|
||||
return self.attn_gradients
|
||||
|
||||
def save_attention_map(self, attention_map):
|
||||
self.attention_map = attention_map
|
||||
|
||||
def get_attention_map(self):
|
||||
return self.attention_map
|
||||
|
||||
def transpose_for_scores(self, x):
|
||||
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
||||
x = x.view(*new_x_shape)
|
||||
return x.permute(0, 2, 1, 3)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
mixed_query_layer = self.query(hidden_states)
|
||||
|
||||
# If this is instantiated as a cross-attention module, the keys
|
||||
# and values come from an encoder; the attention mask needs to be
|
||||
# such that the encoder's padding tokens are not attended to.
|
||||
is_cross_attention = encoder_hidden_states is not None
|
||||
|
||||
if is_cross_attention:
|
||||
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
||||
attention_mask = encoder_attention_mask
|
||||
elif past_key_value is not None:
|
||||
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
||||
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
||||
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
||||
else:
|
||||
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
||||
|
||||
query_layer = self.transpose_for_scores(mixed_query_layer)
|
||||
|
||||
past_key_value = (key_layer, value_layer)
|
||||
|
||||
# Take the dot product between "query" and "key" to get the raw attention scores.
|
||||
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
||||
|
||||
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
||||
seq_length = hidden_states.size()[1]
|
||||
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
||||
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
||||
distance = position_ids_l - position_ids_r
|
||||
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
||||
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
||||
|
||||
if self.position_embedding_type == "relative_key":
|
||||
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
||||
attention_scores = attention_scores + relative_position_scores
|
||||
elif self.position_embedding_type == "relative_key_query":
|
||||
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
||||
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
||||
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
||||
|
||||
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
||||
if attention_mask is not None:
|
||||
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
||||
attention_scores = attention_scores + attention_mask
|
||||
|
||||
# Normalize the attention scores to probabilities.
|
||||
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
||||
|
||||
if is_cross_attention and self.save_attention:
|
||||
self.save_attention_map(attention_probs)
|
||||
attention_probs.register_hook(self.save_attn_gradients)
|
||||
|
||||
# This is actually dropping out entire tokens to attend to, which might
|
||||
# seem a bit unusual, but is taken from the original Transformer paper.
|
||||
attention_probs_dropped = self.dropout(attention_probs)
|
||||
|
||||
# Mask heads if we want to
|
||||
if head_mask is not None:
|
||||
attention_probs_dropped = attention_probs_dropped * head_mask
|
||||
|
||||
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
||||
|
||||
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
||||
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
||||
context_layer = context_layer.view(*new_context_layer_shape)
|
||||
|
||||
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
||||
|
||||
outputs = outputs + (past_key_value,)
|
||||
return outputs
|
||||
|
||||
|
||||
class BertSelfOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertAttention(nn.Module):
|
||||
def __init__(self, config, is_cross_attention=False):
|
||||
super().__init__()
|
||||
self.self = BertSelfAttention(config, is_cross_attention)
|
||||
self.output = BertSelfOutput(config)
|
||||
self.pruned_heads = set()
|
||||
|
||||
def prune_heads(self, heads):
|
||||
if len(heads) == 0:
|
||||
return
|
||||
heads, index = find_pruneable_heads_and_indices(
|
||||
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
||||
)
|
||||
|
||||
# Prune linear layers
|
||||
self.self.query = prune_linear_layer(self.self.query, index)
|
||||
self.self.key = prune_linear_layer(self.self.key, index)
|
||||
self.self.value = prune_linear_layer(self.self.value, index)
|
||||
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
||||
|
||||
# Update hyper params and store pruned heads
|
||||
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
||||
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
||||
self.pruned_heads = self.pruned_heads.union(heads)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
self_outputs = self.self(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
)
|
||||
attention_output = self.output(self_outputs[0], hidden_states)
|
||||
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
||||
return outputs
|
||||
|
||||
|
||||
class BertIntermediate(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.intermediate_act_fn = config.hidden_act
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.intermediate_act_fn(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLayer(nn.Module):
|
||||
def __init__(self, config, layer_num):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
||||
self.seq_len_dim = 1
|
||||
self.attention = BertAttention(config)
|
||||
self.layer_num = layer_num
|
||||
if self.config.add_cross_attention:
|
||||
self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
|
||||
self.intermediate = BertIntermediate(config)
|
||||
self.output = BertOutput(config)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
mode=None,
|
||||
):
|
||||
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
||||
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
||||
self_attention_outputs = self.attention(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
output_attentions=output_attentions,
|
||||
past_key_value=self_attn_past_key_value,
|
||||
)
|
||||
attention_output = self_attention_outputs[0]
|
||||
|
||||
outputs = self_attention_outputs[1:-1]
|
||||
present_key_value = self_attention_outputs[-1]
|
||||
|
||||
if mode=='multimodal':
|
||||
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
|
||||
|
||||
cross_attention_outputs = self.crossattention(
|
||||
attention_output,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
output_attentions=output_attentions,
|
||||
)
|
||||
attention_output = cross_attention_outputs[0]
|
||||
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
||||
layer_output = apply_chunking_to_forward(
|
||||
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
||||
)
|
||||
outputs = (layer_output,) + outputs
|
||||
|
||||
outputs = outputs + (present_key_value,)
|
||||
|
||||
return outputs
|
||||
|
||||
def feed_forward_chunk(self, attention_output):
|
||||
intermediate_output = self.intermediate(attention_output)
|
||||
layer_output = self.output(intermediate_output, attention_output)
|
||||
return layer_output
|
||||
|
||||
|
||||
class BertEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
|
||||
self.gradient_checkpointing = False
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
return_dict=True,
|
||||
mode='multimodal',
|
||||
):
|
||||
all_hidden_states = () if output_hidden_states else None
|
||||
all_self_attentions = () if output_attentions else None
|
||||
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
||||
|
||||
next_decoder_cache = () if use_cache else None
|
||||
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
layer_module = self.layer[i]
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
layer_head_mask = head_mask[i] if head_mask is not None else None
|
||||
past_key_value = past_key_values[i] if past_key_values is not None else None
|
||||
|
||||
if self.gradient_checkpointing and self.training:
|
||||
|
||||
if use_cache:
|
||||
logger.warn(
|
||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
||||
)
|
||||
use_cache = False
|
||||
|
||||
def create_custom_forward(module):
|
||||
def custom_forward(*inputs):
|
||||
return module(*inputs, past_key_value, output_attentions)
|
||||
|
||||
return custom_forward
|
||||
|
||||
layer_outputs = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(layer_module),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
mode=mode,
|
||||
)
|
||||
else:
|
||||
layer_outputs = layer_module(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
hidden_states = layer_outputs[0]
|
||||
if use_cache:
|
||||
next_decoder_cache += (layer_outputs[-1],)
|
||||
if output_attentions:
|
||||
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
||||
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
if not return_dict:
|
||||
return tuple(
|
||||
v
|
||||
for v in [
|
||||
hidden_states,
|
||||
next_decoder_cache,
|
||||
all_hidden_states,
|
||||
all_self_attentions,
|
||||
all_cross_attentions,
|
||||
]
|
||||
if v is not None
|
||||
)
|
||||
return BaseModelOutputWithPastAndCrossAttentions(
|
||||
last_hidden_state=hidden_states,
|
||||
past_key_values=next_decoder_cache,
|
||||
hidden_states=all_hidden_states,
|
||||
attentions=all_self_attentions,
|
||||
cross_attentions=all_cross_attentions,
|
||||
)
|
||||
|
||||
|
||||
class BertPooler(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.activation = nn.Tanh()
|
||||
|
||||
def forward(self, hidden_states):
|
||||
# We "pool" the model by simply taking the hidden state corresponding
|
||||
# to the first token.
|
||||
first_token_tensor = hidden_states[:, 0]
|
||||
pooled_output = self.dense(first_token_tensor)
|
||||
pooled_output = self.activation(pooled_output)
|
||||
return pooled_output
|
||||
|
||||
|
||||
class BertPredictionHeadTransform(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.transform_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.transform_act_fn = config.hidden_act
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.transform_act_fn(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLMPredictionHead(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.transform = BertPredictionHeadTransform(config)
|
||||
|
||||
# The output weights are the same as the input embeddings, but there is
|
||||
# an output-only bias for each token.
|
||||
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
|
||||
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
||||
|
||||
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
||||
self.decoder.bias = self.bias
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.transform(hidden_states)
|
||||
hidden_states = self.decoder(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOnlyMLMHead(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.predictions = BertLMPredictionHead(config)
|
||||
|
||||
def forward(self, sequence_output):
|
||||
prediction_scores = self.predictions(sequence_output)
|
||||
return prediction_scores
|
||||
|
||||
|
||||
class BertPreTrainedModel(PreTrainedModel):
|
||||
"""
|
||||
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
||||
models.
|
||||
"""
|
||||
|
||||
config_class = BertConfig
|
||||
base_model_prefix = "bert"
|
||||
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
||||
|
||||
def _init_weights(self, module):
|
||||
""" Initialize the weights """
|
||||
if isinstance(module, (nn.Linear, nn.Embedding)):
|
||||
# Slightly different from the TF version which uses truncated_normal for initialization
|
||||
# cf https://github.com/pytorch/pytorch/pull/5617
|
||||
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||
elif isinstance(module, nn.LayerNorm):
|
||||
module.bias.data.zero_()
|
||||
module.weight.data.fill_(1.0)
|
||||
if isinstance(module, nn.Linear) and module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
|
||||
|
||||
class BertModel(BertPreTrainedModel):
|
||||
"""
|
||||
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
||||
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
||||
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
||||
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
||||
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
||||
input to the forward pass.
|
||||
"""
|
||||
|
||||
def __init__(self, config, add_pooling_layer=True):
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
|
||||
self.embeddings = BertEmbeddings(config)
|
||||
|
||||
self.encoder = BertEncoder(config)
|
||||
|
||||
self.pooler = BertPooler(config) if add_pooling_layer else None
|
||||
|
||||
self.init_weights()
|
||||
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.embeddings.word_embeddings
|
||||
|
||||
def set_input_embeddings(self, value):
|
||||
self.embeddings.word_embeddings = value
|
||||
|
||||
def _prune_heads(self, heads_to_prune):
|
||||
"""
|
||||
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
||||
class PreTrainedModel
|
||||
"""
|
||||
for layer, heads in heads_to_prune.items():
|
||||
self.encoder.layer[layer].attention.prune_heads(heads)
|
||||
|
||||
|
||||
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
|
||||
"""
|
||||
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
||||
|
||||
Arguments:
|
||||
attention_mask (:obj:`torch.Tensor`):
|
||||
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
||||
input_shape (:obj:`Tuple[int]`):
|
||||
The shape of the input to the model.
|
||||
device: (:obj:`torch.device`):
|
||||
The device of the input to the model.
|
||||
|
||||
Returns:
|
||||
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
||||
"""
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
if attention_mask.dim() == 3:
|
||||
extended_attention_mask = attention_mask[:, None, :, :]
|
||||
elif attention_mask.dim() == 2:
|
||||
# Provided a padding mask of dimensions [batch_size, seq_length]
|
||||
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
||||
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if is_decoder:
|
||||
batch_size, seq_length = input_shape
|
||||
|
||||
seq_ids = torch.arange(seq_length, device=device)
|
||||
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
|
||||
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
|
||||
# causal and attention masks must have same type with pytorch version < 1.3
|
||||
causal_mask = causal_mask.to(attention_mask.dtype)
|
||||
|
||||
if causal_mask.shape[1] < attention_mask.shape[1]:
|
||||
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
||||
causal_mask = torch.cat(
|
||||
[
|
||||
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
|
||||
causal_mask,
|
||||
],
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
||||
else:
|
||||
extended_attention_mask = attention_mask[:, None, None, :]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
||||
input_shape, attention_mask.shape
|
||||
)
|
||||
)
|
||||
|
||||
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
||||
# masked positions, this operation will create a tensor which is 0.0 for
|
||||
# positions we want to attend and -10000.0 for masked positions.
|
||||
# Since we are adding it to the raw scores before the softmax, this is
|
||||
# effectively the same as removing these entirely.
|
||||
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
||||
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
||||
return extended_attention_mask
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
is_decoder=False,
|
||||
mode='multimodal',
|
||||
):
|
||||
r"""
|
||||
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
||||
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
||||
the model is configured as a decoder.
|
||||
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
||||
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
||||
- 1 for tokens that are **not masked**,
|
||||
- 0 for tokens that are **masked**.
|
||||
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
||||
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
||||
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
||||
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
||||
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
||||
use_cache (:obj:`bool`, `optional`):
|
||||
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
||||
decoding (see :obj:`past_key_values`).
|
||||
"""
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
output_hidden_states = (
|
||||
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||
)
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
if is_decoder:
|
||||
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
||||
else:
|
||||
use_cache = False
|
||||
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
batch_size, seq_length = input_shape
|
||||
device = input_ids.device
|
||||
elif inputs_embeds is not None:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
batch_size, seq_length = input_shape
|
||||
device = inputs_embeds.device
|
||||
elif encoder_embeds is not None:
|
||||
input_shape = encoder_embeds.size()[:-1]
|
||||
batch_size, seq_length = input_shape
|
||||
device = encoder_embeds.device
|
||||
else:
|
||||
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
|
||||
|
||||
# past_key_values_length
|
||||
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
||||
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
||||
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
|
||||
device, is_decoder)
|
||||
|
||||
# If a 2D or 3D attention mask is provided for the cross-attention
|
||||
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if encoder_hidden_states is not None:
|
||||
if type(encoder_hidden_states) == list:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
||||
else:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
||||
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
||||
|
||||
if type(encoder_attention_mask) == list:
|
||||
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
||||
elif encoder_attention_mask is None:
|
||||
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = None
|
||||
|
||||
# Prepare head mask if needed
|
||||
# 1.0 in head_mask indicate we keep the head
|
||||
# attention_probs has shape bsz x n_heads x N x N
|
||||
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
||||
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
||||
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
||||
|
||||
if encoder_embeds is None:
|
||||
embedding_output = self.embeddings(
|
||||
input_ids=input_ids,
|
||||
position_ids=position_ids,
|
||||
inputs_embeds=inputs_embeds,
|
||||
past_key_values_length=past_key_values_length,
|
||||
)
|
||||
else:
|
||||
embedding_output = encoder_embeds
|
||||
|
||||
encoder_outputs = self.encoder(
|
||||
embedding_output,
|
||||
attention_mask=extended_attention_mask,
|
||||
head_mask=head_mask,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_extended_attention_mask,
|
||||
past_key_values=past_key_values,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
mode=mode,
|
||||
)
|
||||
sequence_output = encoder_outputs[0]
|
||||
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
||||
|
||||
if not return_dict:
|
||||
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
||||
|
||||
return BaseModelOutputWithPoolingAndCrossAttentions(
|
||||
last_hidden_state=sequence_output,
|
||||
pooler_output=pooled_output,
|
||||
past_key_values=encoder_outputs.past_key_values,
|
||||
hidden_states=encoder_outputs.hidden_states,
|
||||
attentions=encoder_outputs.attentions,
|
||||
cross_attentions=encoder_outputs.cross_attentions,
|
||||
)
|
||||
|
||||
|
||||
|
||||
class BertLMHeadModel(BertPreTrainedModel):
|
||||
|
||||
_keys_to_ignore_on_load_unexpected = [r"pooler"]
|
||||
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
self.bert = BertModel(config, add_pooling_layer=False)
|
||||
self.cls = BertOnlyMLMHead(config)
|
||||
|
||||
self.init_weights()
|
||||
|
||||
def get_output_embeddings(self):
|
||||
return self.cls.predictions.decoder
|
||||
|
||||
def set_output_embeddings(self, new_embeddings):
|
||||
self.cls.predictions.decoder = new_embeddings
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
labels=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
return_logits=False,
|
||||
is_decoder=True,
|
||||
reduction='mean',
|
||||
mode='multimodal',
|
||||
):
|
||||
r"""
|
||||
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
||||
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
||||
the model is configured as a decoder.
|
||||
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
||||
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
||||
- 1 for tokens that are **not masked**,
|
||||
- 0 for tokens that are **masked**.
|
||||
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
|
||||
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
|
||||
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
|
||||
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
||||
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
||||
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
||||
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
||||
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
||||
use_cache (:obj:`bool`, `optional`):
|
||||
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
||||
decoding (see :obj:`past_key_values`).
|
||||
Returns:
|
||||
Example::
|
||||
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
|
||||
>>> import torch
|
||||
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
|
||||
>>> config = BertConfig.from_pretrained("bert-base-cased")
|
||||
>>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
|
||||
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> prediction_logits = outputs.logits
|
||||
"""
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
if labels is not None:
|
||||
use_cache = False
|
||||
|
||||
outputs = self.bert(
|
||||
input_ids,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
head_mask=head_mask,
|
||||
inputs_embeds=inputs_embeds,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_attention_mask,
|
||||
past_key_values=past_key_values,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
is_decoder=is_decoder,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
sequence_output = outputs[0]
|
||||
prediction_scores = self.cls(sequence_output)
|
||||
|
||||
if return_logits:
|
||||
return prediction_scores[:, :-1, :].contiguous()
|
||||
|
||||
lm_loss = None
|
||||
if labels is not None:
|
||||
# we are doing next-token prediction; shift prediction scores and input ids by one
|
||||
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
|
||||
labels = labels[:, 1:].contiguous()
|
||||
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
|
||||
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
|
||||
if reduction=='none':
|
||||
lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
|
||||
|
||||
if not return_dict:
|
||||
output = (prediction_scores,) + outputs[2:]
|
||||
return ((lm_loss,) + output) if lm_loss is not None else output
|
||||
|
||||
return CausalLMOutputWithCrossAttentions(
|
||||
loss=lm_loss,
|
||||
logits=prediction_scores,
|
||||
past_key_values=outputs.past_key_values,
|
||||
hidden_states=outputs.hidden_states,
|
||||
attentions=outputs.attentions,
|
||||
cross_attentions=outputs.cross_attentions,
|
||||
)
|
||||
|
||||
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
|
||||
input_shape = input_ids.shape
|
||||
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
|
||||
if attention_mask is None:
|
||||
attention_mask = input_ids.new_ones(input_shape)
|
||||
|
||||
# cut decoder_input_ids if past is used
|
||||
if past is not None:
|
||||
input_ids = input_ids[:, -1:]
|
||||
|
||||
return {
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"past_key_values": past,
|
||||
"encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
|
||||
"encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
|
||||
"is_decoder": True,
|
||||
}
|
||||
|
||||
def _reorder_cache(self, past, beam_idx):
|
||||
reordered_past = ()
|
||||
for layer_past in past:
|
||||
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
|
||||
return reordered_past
|
||||
843
models/nlvr_encoder.py
Normal file
843
models/nlvr_encoder.py
Normal file
@@ -0,0 +1,843 @@
|
||||
import math
|
||||
import os
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor, device, dtype, nn
|
||||
import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
import torch.nn.functional as F
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.file_utils import (
|
||||
ModelOutput,
|
||||
)
|
||||
from transformers.modeling_outputs import (
|
||||
BaseModelOutputWithPastAndCrossAttentions,
|
||||
BaseModelOutputWithPoolingAndCrossAttentions,
|
||||
CausalLMOutputWithCrossAttentions,
|
||||
MaskedLMOutput,
|
||||
MultipleChoiceModelOutput,
|
||||
NextSentencePredictorOutput,
|
||||
QuestionAnsweringModelOutput,
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from transformers.modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
from transformers.models.bert.configuration_bert import BertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
class BertEmbeddings(nn.Module):
|
||||
"""Construct the embeddings from word and position embeddings."""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
||||
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
||||
|
||||
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
||||
# any TensorFlow checkpoint file
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
||||
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
||||
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
||||
|
||||
self.config = config
|
||||
|
||||
def forward(
|
||||
self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
|
||||
):
|
||||
if input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
else:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
|
||||
seq_length = input_shape[1]
|
||||
|
||||
if position_ids is None:
|
||||
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
|
||||
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.word_embeddings(input_ids)
|
||||
|
||||
embeddings = inputs_embeds
|
||||
|
||||
if self.position_embedding_type == "absolute":
|
||||
position_embeddings = self.position_embeddings(position_ids)
|
||||
embeddings += position_embeddings
|
||||
embeddings = self.LayerNorm(embeddings)
|
||||
embeddings = self.dropout(embeddings)
|
||||
return embeddings
|
||||
|
||||
|
||||
class BertSelfAttention(nn.Module):
|
||||
def __init__(self, config, is_cross_attention):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
||||
raise ValueError(
|
||||
"The hidden size (%d) is not a multiple of the number of attention "
|
||||
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
||||
)
|
||||
|
||||
self.num_attention_heads = config.num_attention_heads
|
||||
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
||||
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
||||
|
||||
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
if is_cross_attention:
|
||||
self.key = nn.Linear(config.encoder_width, self.all_head_size)
|
||||
self.value = nn.Linear(config.encoder_width, self.all_head_size)
|
||||
else:
|
||||
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
|
||||
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
||||
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
|
||||
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
||||
self.max_position_embeddings = config.max_position_embeddings
|
||||
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
|
||||
self.save_attention = False
|
||||
|
||||
def save_attn_gradients(self, attn_gradients):
|
||||
self.attn_gradients = attn_gradients
|
||||
|
||||
def get_attn_gradients(self):
|
||||
return self.attn_gradients
|
||||
|
||||
def save_attention_map(self, attention_map):
|
||||
self.attention_map = attention_map
|
||||
|
||||
def get_attention_map(self):
|
||||
return self.attention_map
|
||||
|
||||
def transpose_for_scores(self, x):
|
||||
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
||||
x = x.view(*new_x_shape)
|
||||
return x.permute(0, 2, 1, 3)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
mixed_query_layer = self.query(hidden_states)
|
||||
|
||||
# If this is instantiated as a cross-attention module, the keys
|
||||
# and values come from an encoder; the attention mask needs to be
|
||||
# such that the encoder's padding tokens are not attended to.
|
||||
is_cross_attention = encoder_hidden_states is not None
|
||||
|
||||
if is_cross_attention:
|
||||
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
|
||||
attention_mask = encoder_attention_mask
|
||||
elif past_key_value is not None:
|
||||
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
||||
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
|
||||
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
|
||||
else:
|
||||
key_layer = self.transpose_for_scores(self.key(hidden_states))
|
||||
value_layer = self.transpose_for_scores(self.value(hidden_states))
|
||||
|
||||
query_layer = self.transpose_for_scores(mixed_query_layer)
|
||||
|
||||
past_key_value = (key_layer, value_layer)
|
||||
|
||||
# Take the dot product between "query" and "key" to get the raw attention scores.
|
||||
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
||||
|
||||
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
|
||||
seq_length = hidden_states.size()[1]
|
||||
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
|
||||
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
|
||||
distance = position_ids_l - position_ids_r
|
||||
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
|
||||
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
|
||||
|
||||
if self.position_embedding_type == "relative_key":
|
||||
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
||||
attention_scores = attention_scores + relative_position_scores
|
||||
elif self.position_embedding_type == "relative_key_query":
|
||||
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
|
||||
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
|
||||
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
|
||||
|
||||
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
||||
if attention_mask is not None:
|
||||
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
||||
attention_scores = attention_scores + attention_mask
|
||||
|
||||
# Normalize the attention scores to probabilities.
|
||||
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
||||
|
||||
if is_cross_attention and self.save_attention:
|
||||
self.save_attention_map(attention_probs)
|
||||
attention_probs.register_hook(self.save_attn_gradients)
|
||||
|
||||
# This is actually dropping out entire tokens to attend to, which might
|
||||
# seem a bit unusual, but is taken from the original Transformer paper.
|
||||
attention_probs_dropped = self.dropout(attention_probs)
|
||||
|
||||
# Mask heads if we want to
|
||||
if head_mask is not None:
|
||||
attention_probs_dropped = attention_probs_dropped * head_mask
|
||||
|
||||
context_layer = torch.matmul(attention_probs_dropped, value_layer)
|
||||
|
||||
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
||||
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
||||
context_layer = context_layer.view(*new_context_layer_shape)
|
||||
|
||||
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
||||
|
||||
outputs = outputs + (past_key_value,)
|
||||
return outputs
|
||||
|
||||
|
||||
class BertSelfOutput(nn.Module):
|
||||
def __init__(self, config, twin=False, merge=False):
|
||||
super().__init__()
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
if twin:
|
||||
self.dense0 = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.dense1 = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
else:
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
if merge:
|
||||
self.act = ACT2FN[config.hidden_act]
|
||||
self.merge_layer = nn.Linear(config.hidden_size * 2, config.hidden_size)
|
||||
self.merge = True
|
||||
else:
|
||||
self.merge = False
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
if type(hidden_states) == list:
|
||||
hidden_states0 = self.dense0(hidden_states[0])
|
||||
hidden_states1 = self.dense1(hidden_states[1])
|
||||
if self.merge:
|
||||
#hidden_states = self.merge_layer(self.act(torch.cat([hidden_states0,hidden_states1],dim=-1)))
|
||||
hidden_states = self.merge_layer(torch.cat([hidden_states0,hidden_states1],dim=-1))
|
||||
else:
|
||||
hidden_states = (hidden_states0+hidden_states1)/2
|
||||
else:
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertAttention(nn.Module):
|
||||
def __init__(self, config, is_cross_attention=False, layer_num=-1):
|
||||
super().__init__()
|
||||
if is_cross_attention:
|
||||
self.self0 = BertSelfAttention(config, is_cross_attention)
|
||||
self.self1 = BertSelfAttention(config, is_cross_attention)
|
||||
else:
|
||||
self.self = BertSelfAttention(config, is_cross_attention)
|
||||
self.output = BertSelfOutput(config, twin=is_cross_attention, merge=(is_cross_attention and layer_num>=6))
|
||||
self.pruned_heads = set()
|
||||
|
||||
def prune_heads(self, heads):
|
||||
if len(heads) == 0:
|
||||
return
|
||||
heads, index = find_pruneable_heads_and_indices(
|
||||
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
||||
)
|
||||
|
||||
# Prune linear layers
|
||||
self.self.query = prune_linear_layer(self.self.query, index)
|
||||
self.self.key = prune_linear_layer(self.self.key, index)
|
||||
self.self.value = prune_linear_layer(self.self.value, index)
|
||||
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
||||
|
||||
# Update hyper params and store pruned heads
|
||||
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
||||
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
||||
self.pruned_heads = self.pruned_heads.union(heads)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
if type(encoder_hidden_states)==list:
|
||||
self_outputs0 = self.self0(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states[0],
|
||||
encoder_attention_mask[0],
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
)
|
||||
self_outputs1 = self.self1(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states[1],
|
||||
encoder_attention_mask[1],
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
)
|
||||
attention_output = self.output([self_outputs0[0],self_outputs1[0]], hidden_states)
|
||||
|
||||
outputs = (attention_output,) + self_outputs0[1:] # add attentions if we output them
|
||||
else:
|
||||
self_outputs = self.self(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
)
|
||||
attention_output = self.output(self_outputs[0], hidden_states)
|
||||
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
||||
return outputs
|
||||
|
||||
|
||||
class BertIntermediate(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.intermediate_act_fn = config.hidden_act
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.intermediate_act_fn(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLayer(nn.Module):
|
||||
def __init__(self, config, layer_num):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
||||
self.seq_len_dim = 1
|
||||
self.attention = BertAttention(config)
|
||||
self.layer_num = layer_num
|
||||
if self.config.add_cross_attention:
|
||||
self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention, layer_num=layer_num)
|
||||
self.intermediate = BertIntermediate(config)
|
||||
self.output = BertOutput(config)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_value=None,
|
||||
output_attentions=False,
|
||||
mode=None,
|
||||
):
|
||||
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
|
||||
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
|
||||
self_attention_outputs = self.attention(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
output_attentions=output_attentions,
|
||||
past_key_value=self_attn_past_key_value,
|
||||
)
|
||||
attention_output = self_attention_outputs[0]
|
||||
|
||||
outputs = self_attention_outputs[1:-1]
|
||||
present_key_value = self_attention_outputs[-1]
|
||||
|
||||
if mode=='multimodal':
|
||||
assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
|
||||
cross_attention_outputs = self.crossattention(
|
||||
attention_output,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
output_attentions=output_attentions,
|
||||
)
|
||||
attention_output = cross_attention_outputs[0]
|
||||
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
|
||||
layer_output = apply_chunking_to_forward(
|
||||
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
||||
)
|
||||
outputs = (layer_output,) + outputs
|
||||
|
||||
outputs = outputs + (present_key_value,)
|
||||
|
||||
return outputs
|
||||
|
||||
def feed_forward_chunk(self, attention_output):
|
||||
intermediate_output = self.intermediate(attention_output)
|
||||
layer_output = self.output(intermediate_output, attention_output)
|
||||
return layer_output
|
||||
|
||||
|
||||
class BertEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
|
||||
self.gradient_checkpointing = False
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
return_dict=True,
|
||||
mode='multimodal',
|
||||
):
|
||||
all_hidden_states = () if output_hidden_states else None
|
||||
all_self_attentions = () if output_attentions else None
|
||||
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
||||
|
||||
next_decoder_cache = () if use_cache else None
|
||||
|
||||
for i in range(self.config.num_hidden_layers):
|
||||
layer_module = self.layer[i]
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
layer_head_mask = head_mask[i] if head_mask is not None else None
|
||||
past_key_value = past_key_values[i] if past_key_values is not None else None
|
||||
|
||||
if self.gradient_checkpointing and self.training:
|
||||
|
||||
if use_cache:
|
||||
logger.warn(
|
||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
||||
)
|
||||
use_cache = False
|
||||
|
||||
def create_custom_forward(module):
|
||||
def custom_forward(*inputs):
|
||||
return module(*inputs, past_key_value, output_attentions)
|
||||
|
||||
return custom_forward
|
||||
|
||||
layer_outputs = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(layer_module),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
mode=mode,
|
||||
)
|
||||
else:
|
||||
layer_outputs = layer_module(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
past_key_value,
|
||||
output_attentions,
|
||||
mode=mode,
|
||||
)
|
||||
|
||||
hidden_states = layer_outputs[0]
|
||||
if use_cache:
|
||||
next_decoder_cache += (layer_outputs[-1],)
|
||||
if output_attentions:
|
||||
all_self_attentions = all_self_attentions + (layer_outputs[1],)
|
||||
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
if not return_dict:
|
||||
return tuple(
|
||||
v
|
||||
for v in [
|
||||
hidden_states,
|
||||
next_decoder_cache,
|
||||
all_hidden_states,
|
||||
all_self_attentions,
|
||||
all_cross_attentions,
|
||||
]
|
||||
if v is not None
|
||||
)
|
||||
return BaseModelOutputWithPastAndCrossAttentions(
|
||||
last_hidden_state=hidden_states,
|
||||
past_key_values=next_decoder_cache,
|
||||
hidden_states=all_hidden_states,
|
||||
attentions=all_self_attentions,
|
||||
cross_attentions=all_cross_attentions,
|
||||
)
|
||||
|
||||
|
||||
class BertPooler(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.activation = nn.Tanh()
|
||||
|
||||
def forward(self, hidden_states):
|
||||
# We "pool" the model by simply taking the hidden state corresponding
|
||||
# to the first token.
|
||||
first_token_tensor = hidden_states[:, 0]
|
||||
pooled_output = self.dense(first_token_tensor)
|
||||
pooled_output = self.activation(pooled_output)
|
||||
return pooled_output
|
||||
|
||||
|
||||
class BertPredictionHeadTransform(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.transform_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.transform_act_fn = config.hidden_act
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.transform_act_fn(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertLMPredictionHead(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.transform = BertPredictionHeadTransform(config)
|
||||
|
||||
# The output weights are the same as the input embeddings, but there is
|
||||
# an output-only bias for each token.
|
||||
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
|
||||
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
||||
|
||||
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
||||
self.decoder.bias = self.bias
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.transform(hidden_states)
|
||||
hidden_states = self.decoder(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
class BertOnlyMLMHead(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.predictions = BertLMPredictionHead(config)
|
||||
|
||||
def forward(self, sequence_output):
|
||||
prediction_scores = self.predictions(sequence_output)
|
||||
return prediction_scores
|
||||
|
||||
|
||||
class BertPreTrainedModel(PreTrainedModel):
|
||||
"""
|
||||
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
||||
models.
|
||||
"""
|
||||
|
||||
config_class = BertConfig
|
||||
base_model_prefix = "bert"
|
||||
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
||||
|
||||
def _init_weights(self, module):
|
||||
""" Initialize the weights """
|
||||
if isinstance(module, (nn.Linear, nn.Embedding)):
|
||||
# Slightly different from the TF version which uses truncated_normal for initialization
|
||||
# cf https://github.com/pytorch/pytorch/pull/5617
|
||||
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||
elif isinstance(module, nn.LayerNorm):
|
||||
module.bias.data.zero_()
|
||||
module.weight.data.fill_(1.0)
|
||||
if isinstance(module, nn.Linear) and module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
|
||||
|
||||
class BertModel(BertPreTrainedModel):
|
||||
"""
|
||||
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
|
||||
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
|
||||
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
|
||||
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
||||
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
|
||||
input to the forward pass.
|
||||
"""
|
||||
|
||||
def __init__(self, config, add_pooling_layer=True):
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
|
||||
self.embeddings = BertEmbeddings(config)
|
||||
|
||||
self.encoder = BertEncoder(config)
|
||||
|
||||
self.pooler = BertPooler(config) if add_pooling_layer else None
|
||||
|
||||
self.init_weights()
|
||||
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.embeddings.word_embeddings
|
||||
|
||||
def set_input_embeddings(self, value):
|
||||
self.embeddings.word_embeddings = value
|
||||
|
||||
def _prune_heads(self, heads_to_prune):
|
||||
"""
|
||||
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
|
||||
class PreTrainedModel
|
||||
"""
|
||||
for layer, heads in heads_to_prune.items():
|
||||
self.encoder.layer[layer].attention.prune_heads(heads)
|
||||
|
||||
|
||||
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
|
||||
"""
|
||||
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
|
||||
|
||||
Arguments:
|
||||
attention_mask (:obj:`torch.Tensor`):
|
||||
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
|
||||
input_shape (:obj:`Tuple[int]`):
|
||||
The shape of the input to the model.
|
||||
device: (:obj:`torch.device`):
|
||||
The device of the input to the model.
|
||||
|
||||
Returns:
|
||||
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
|
||||
"""
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
if attention_mask.dim() == 3:
|
||||
extended_attention_mask = attention_mask[:, None, :, :]
|
||||
elif attention_mask.dim() == 2:
|
||||
# Provided a padding mask of dimensions [batch_size, seq_length]
|
||||
# - if the model is a decoder, apply a causal mask in addition to the padding mask
|
||||
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if is_decoder:
|
||||
batch_size, seq_length = input_shape
|
||||
|
||||
seq_ids = torch.arange(seq_length, device=device)
|
||||
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
|
||||
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
|
||||
# causal and attention masks must have same type with pytorch version < 1.3
|
||||
causal_mask = causal_mask.to(attention_mask.dtype)
|
||||
|
||||
if causal_mask.shape[1] < attention_mask.shape[1]:
|
||||
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
|
||||
causal_mask = torch.cat(
|
||||
[
|
||||
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
|
||||
causal_mask,
|
||||
],
|
||||
axis=-1,
|
||||
)
|
||||
|
||||
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
|
||||
else:
|
||||
extended_attention_mask = attention_mask[:, None, None, :]
|
||||
else:
|
||||
raise ValueError(
|
||||
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
|
||||
input_shape, attention_mask.shape
|
||||
)
|
||||
)
|
||||
|
||||
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
||||
# masked positions, this operation will create a tensor which is 0.0 for
|
||||
# positions we want to attend and -10000.0 for masked positions.
|
||||
# Since we are adding it to the raw scores before the softmax, this is
|
||||
# effectively the same as removing these entirely.
|
||||
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
||||
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
|
||||
return extended_attention_mask
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
past_key_values=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
is_decoder=False,
|
||||
mode='multimodal',
|
||||
):
|
||||
r"""
|
||||
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
||||
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
|
||||
the model is configured as a decoder.
|
||||
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
|
||||
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
|
||||
- 1 for tokens that are **not masked**,
|
||||
- 0 for tokens that are **masked**.
|
||||
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
|
||||
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
|
||||
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
|
||||
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
|
||||
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
|
||||
use_cache (:obj:`bool`, `optional`):
|
||||
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
||||
decoding (see :obj:`past_key_values`).
|
||||
"""
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
output_hidden_states = (
|
||||
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||
)
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
if is_decoder:
|
||||
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
||||
else:
|
||||
use_cache = False
|
||||
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
batch_size, seq_length = input_shape
|
||||
device = input_ids.device
|
||||
elif inputs_embeds is not None:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
batch_size, seq_length = input_shape
|
||||
device = inputs_embeds.device
|
||||
elif encoder_embeds is not None:
|
||||
input_shape = encoder_embeds.size()[:-1]
|
||||
batch_size, seq_length = input_shape
|
||||
device = encoder_embeds.device
|
||||
else:
|
||||
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
|
||||
|
||||
# past_key_values_length
|
||||
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
|
||||
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
|
||||
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
|
||||
device, is_decoder)
|
||||
|
||||
# If a 2D or 3D attention mask is provided for the cross-attention
|
||||
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if encoder_hidden_states is not None:
|
||||
if type(encoder_hidden_states) == list:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
|
||||
else:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
||||
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
||||
|
||||
if type(encoder_attention_mask) == list:
|
||||
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
|
||||
elif encoder_attention_mask is None:
|
||||
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = None
|
||||
|
||||
# Prepare head mask if needed
|
||||
# 1.0 in head_mask indicate we keep the head
|
||||
# attention_probs has shape bsz x n_heads x N x N
|
||||
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
||||
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
||||
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
||||
|
||||
if encoder_embeds is None:
|
||||
embedding_output = self.embeddings(
|
||||
input_ids=input_ids,
|
||||
position_ids=position_ids,
|
||||
inputs_embeds=inputs_embeds,
|
||||
past_key_values_length=past_key_values_length,
|
||||
)
|
||||
else:
|
||||
embedding_output = encoder_embeds
|
||||
|
||||
encoder_outputs = self.encoder(
|
||||
embedding_output,
|
||||
attention_mask=extended_attention_mask,
|
||||
head_mask=head_mask,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_extended_attention_mask,
|
||||
past_key_values=past_key_values,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
mode=mode,
|
||||
)
|
||||
sequence_output = encoder_outputs[0]
|
||||
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
||||
|
||||
if not return_dict:
|
||||
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
||||
|
||||
return BaseModelOutputWithPoolingAndCrossAttentions(
|
||||
last_hidden_state=sequence_output,
|
||||
pooler_output=pooled_output,
|
||||
past_key_values=encoder_outputs.past_key_values,
|
||||
hidden_states=encoder_outputs.hidden_states,
|
||||
attentions=encoder_outputs.attentions,
|
||||
cross_attentions=encoder_outputs.cross_attentions,
|
||||
)
|
||||
|
||||
305
models/vit.py
Normal file
305
models/vit.py
Normal file
@@ -0,0 +1,305 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
* Based on timm code base
|
||||
* https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
||||
'''
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from functools import partial
|
||||
|
||||
from timm.models.vision_transformer import _cfg, PatchEmbed
|
||||
from timm.models.registry import register_model
|
||||
from timm.models.layers import trunc_normal_, DropPath
|
||||
from timm.models.helpers import named_apply, adapt_input_conv
|
||||
|
||||
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
|
||||
|
||||
class Mlp(nn.Module):
|
||||
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
||||
"""
|
||||
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
||||
super().__init__()
|
||||
out_features = out_features or in_features
|
||||
hidden_features = hidden_features or in_features
|
||||
self.fc1 = nn.Linear(in_features, hidden_features)
|
||||
self.act = act_layer()
|
||||
self.fc2 = nn.Linear(hidden_features, out_features)
|
||||
self.drop = nn.Dropout(drop)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.fc1(x)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
x = self.fc2(x)
|
||||
x = self.drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
|
||||
self.scale = qk_scale or head_dim ** -0.5
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
self.attn_gradients = None
|
||||
self.attention_map = None
|
||||
|
||||
def save_attn_gradients(self, attn_gradients):
|
||||
self.attn_gradients = attn_gradients
|
||||
|
||||
def get_attn_gradients(self):
|
||||
return self.attn_gradients
|
||||
|
||||
def save_attention_map(self, attention_map):
|
||||
self.attention_map = attention_map
|
||||
|
||||
def get_attention_map(self):
|
||||
return self.attention_map
|
||||
|
||||
def forward(self, x, register_hook=False):
|
||||
B, N, C = x.shape
|
||||
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
||||
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
|
||||
if register_hook:
|
||||
self.save_attention_map(attn)
|
||||
attn.register_hook(self.save_attn_gradients)
|
||||
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
|
||||
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
||||
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False):
|
||||
super().__init__()
|
||||
self.norm1 = norm_layer(dim)
|
||||
self.attn = Attention(
|
||||
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
||||
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
||||
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.norm2 = norm_layer(dim)
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
||||
|
||||
if use_grad_checkpointing:
|
||||
self.attn = checkpoint_wrapper(self.attn)
|
||||
self.mlp = checkpoint_wrapper(self.mlp)
|
||||
|
||||
def forward(self, x, register_hook=False):
|
||||
x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook))
|
||||
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
||||
return x
|
||||
|
||||
|
||||
class VisionTransformer(nn.Module):
|
||||
""" Vision Transformer
|
||||
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
|
||||
https://arxiv.org/abs/2010.11929
|
||||
"""
|
||||
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
||||
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
|
||||
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None,
|
||||
use_grad_checkpointing=False, ckpt_layer=0):
|
||||
"""
|
||||
Args:
|
||||
img_size (int, tuple): input image size
|
||||
patch_size (int, tuple): patch size
|
||||
in_chans (int): number of input channels
|
||||
num_classes (int): number of classes for classification head
|
||||
embed_dim (int): embedding dimension
|
||||
depth (int): depth of transformer
|
||||
num_heads (int): number of attention heads
|
||||
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
||||
qkv_bias (bool): enable bias for qkv if True
|
||||
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
|
||||
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
|
||||
drop_rate (float): dropout rate
|
||||
attn_drop_rate (float): attention dropout rate
|
||||
drop_path_rate (float): stochastic depth rate
|
||||
norm_layer: (nn.Module): normalization layer
|
||||
"""
|
||||
super().__init__()
|
||||
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
||||
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
||||
|
||||
num_patches = self.patch_embed.num_patches
|
||||
|
||||
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
||||
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
||||
self.pos_drop = nn.Dropout(p=drop_rate)
|
||||
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
||||
self.blocks = nn.ModuleList([
|
||||
Block(
|
||||
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
||||
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
||||
use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)
|
||||
)
|
||||
for i in range(depth)])
|
||||
self.norm = norm_layer(embed_dim)
|
||||
|
||||
trunc_normal_(self.pos_embed, std=.02)
|
||||
trunc_normal_(self.cls_token, std=.02)
|
||||
self.apply(self._init_weights)
|
||||
|
||||
def _init_weights(self, m):
|
||||
if isinstance(m, nn.Linear):
|
||||
trunc_normal_(m.weight, std=.02)
|
||||
if isinstance(m, nn.Linear) and m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay(self):
|
||||
return {'pos_embed', 'cls_token'}
|
||||
|
||||
def forward(self, x, register_blk=-1):
|
||||
B = x.shape[0]
|
||||
x = self.patch_embed(x)
|
||||
|
||||
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
||||
x = torch.cat((cls_tokens, x), dim=1)
|
||||
|
||||
x = x + self.pos_embed[:,:x.size(1),:]
|
||||
x = self.pos_drop(x)
|
||||
|
||||
for i,blk in enumerate(self.blocks):
|
||||
x = blk(x, register_blk==i)
|
||||
x = self.norm(x)
|
||||
|
||||
return x
|
||||
|
||||
@torch.jit.ignore()
|
||||
def load_pretrained(self, checkpoint_path, prefix=''):
|
||||
_load_weights(self, checkpoint_path, prefix)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
|
||||
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
def _n2p(w, t=True):
|
||||
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
|
||||
w = w.flatten()
|
||||
if t:
|
||||
if w.ndim == 4:
|
||||
w = w.transpose([3, 2, 0, 1])
|
||||
elif w.ndim == 3:
|
||||
w = w.transpose([2, 0, 1])
|
||||
elif w.ndim == 2:
|
||||
w = w.transpose([1, 0])
|
||||
return torch.from_numpy(w)
|
||||
|
||||
w = np.load(checkpoint_path)
|
||||
if not prefix and 'opt/target/embedding/kernel' in w:
|
||||
prefix = 'opt/target/'
|
||||
|
||||
if hasattr(model.patch_embed, 'backbone'):
|
||||
# hybrid
|
||||
backbone = model.patch_embed.backbone
|
||||
stem_only = not hasattr(backbone, 'stem')
|
||||
stem = backbone if stem_only else backbone.stem
|
||||
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
|
||||
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
|
||||
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
|
||||
if not stem_only:
|
||||
for i, stage in enumerate(backbone.stages):
|
||||
for j, block in enumerate(stage.blocks):
|
||||
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
|
||||
for r in range(3):
|
||||
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
|
||||
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
|
||||
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
|
||||
if block.downsample is not None:
|
||||
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
|
||||
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
|
||||
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
|
||||
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
|
||||
else:
|
||||
embed_conv_w = adapt_input_conv(
|
||||
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
|
||||
model.patch_embed.proj.weight.copy_(embed_conv_w)
|
||||
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
|
||||
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
|
||||
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
|
||||
if pos_embed_w.shape != model.pos_embed.shape:
|
||||
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
|
||||
pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)
|
||||
model.pos_embed.copy_(pos_embed_w)
|
||||
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
|
||||
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
|
||||
# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
|
||||
# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
|
||||
# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
|
||||
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
|
||||
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
|
||||
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
|
||||
for i, block in enumerate(model.blocks.children()):
|
||||
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
|
||||
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
|
||||
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
|
||||
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
|
||||
block.attn.qkv.weight.copy_(torch.cat([
|
||||
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
|
||||
block.attn.qkv.bias.copy_(torch.cat([
|
||||
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
|
||||
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
|
||||
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
|
||||
for r in range(2):
|
||||
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
|
||||
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
|
||||
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
|
||||
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
|
||||
|
||||
|
||||
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
|
||||
# interpolate position embedding
|
||||
embedding_size = pos_embed_checkpoint.shape[-1]
|
||||
num_patches = visual_encoder.patch_embed.num_patches
|
||||
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
|
||||
# height (== width) for the checkpoint position embedding
|
||||
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
||||
# height (== width) for the new position embedding
|
||||
new_size = int(num_patches ** 0.5)
|
||||
|
||||
if orig_size!=new_size:
|
||||
# class_token and dist_token are kept unchanged
|
||||
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
||||
# only the position tokens are interpolated
|
||||
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
||||
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
||||
pos_tokens = torch.nn.functional.interpolate(
|
||||
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
||||
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
||||
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
||||
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
|
||||
|
||||
return new_pos_embed
|
||||
else:
|
||||
return pos_embed_checkpoint
|
||||
173
pretrain.py
Normal file
173
pretrain.py
Normal file
@@ -0,0 +1,173 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import argparse
|
||||
import os
|
||||
import ruamel_yaml as yaml
|
||||
import numpy as np
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from models.blip_pretrain import blip_pretrain
|
||||
import utils
|
||||
from utils import warmup_lr_schedule, step_lr_schedule
|
||||
from data import create_dataset, create_sampler, create_loader
|
||||
|
||||
def train(model, data_loader, optimizer, epoch, device, config):
|
||||
# train
|
||||
model.train()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
|
||||
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
|
||||
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
|
||||
metric_logger.add_meter('loss_lm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
|
||||
|
||||
header = 'Train Epoch: [{}]'.format(epoch)
|
||||
print_freq = 50
|
||||
|
||||
if config['laion_path']:
|
||||
data_loader.dataset.reload_laion(epoch)
|
||||
|
||||
data_loader.sampler.set_epoch(epoch)
|
||||
|
||||
for i, (image, caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
||||
|
||||
if epoch==0:
|
||||
warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
image = image.to(device,non_blocking=True)
|
||||
|
||||
# ramp up alpha in the first 2 epochs
|
||||
alpha = config['alpha']*min(1,(epoch*len(data_loader)+i)/(2*len(data_loader)))
|
||||
|
||||
loss_ita, loss_itm, loss_lm = model(image, caption, alpha = alpha)
|
||||
loss = loss_ita + loss_itm + loss_lm
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
metric_logger.update(loss_ita=loss_ita.item())
|
||||
metric_logger.update(loss_itm=loss_itm.item())
|
||||
metric_logger.update(loss_lm=loss_lm.item())
|
||||
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
||||
|
||||
|
||||
# gather the stats from all processes
|
||||
metric_logger.synchronize_between_processes()
|
||||
print("Averaged stats:", metric_logger.global_avg())
|
||||
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
||||
|
||||
|
||||
def main(args, config):
|
||||
utils.init_distributed_mode(args)
|
||||
|
||||
device = torch.device(args.device)
|
||||
|
||||
# fix the seed for reproducibility
|
||||
seed = args.seed + utils.get_rank()
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
cudnn.benchmark = True
|
||||
|
||||
#### Dataset ####
|
||||
print("Creating dataset")
|
||||
datasets = [create_dataset('pretrain', config, min_scale=0.2)]
|
||||
print('number of training samples: %d'%len(datasets[0]))
|
||||
|
||||
num_tasks = utils.get_world_size()
|
||||
global_rank = utils.get_rank()
|
||||
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
|
||||
|
||||
data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
|
||||
|
||||
#### Model ####
|
||||
print("Creating model")
|
||||
model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
|
||||
vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size'])
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
|
||||
|
||||
start_epoch = 0
|
||||
if args.checkpoint:
|
||||
checkpoint = torch.load(args.checkpoint, map_location='cpu')
|
||||
state_dict = checkpoint['model']
|
||||
model.load_state_dict(state_dict)
|
||||
|
||||
optimizer.load_state_dict(checkpoint['optimizer'])
|
||||
start_epoch = checkpoint['epoch']+1
|
||||
print('resume checkpoint from %s'%args.checkpoint)
|
||||
|
||||
model_without_ddp = model
|
||||
if args.distributed:
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
|
||||
model_without_ddp = model.module
|
||||
|
||||
print("Start training")
|
||||
start_time = time.time()
|
||||
for epoch in range(start_epoch, config['max_epoch']):
|
||||
|
||||
step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
|
||||
|
||||
train_stats = train(model, data_loader, optimizer, epoch, device, config)
|
||||
if utils.is_main_process():
|
||||
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
||||
'epoch': epoch,
|
||||
}
|
||||
save_obj = {
|
||||
'model': model_without_ddp.state_dict(),
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'config': config,
|
||||
'epoch': epoch,
|
||||
}
|
||||
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
|
||||
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
|
||||
dist.barrier()
|
||||
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('Training time {}'.format(total_time_str))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', default='./configs/pretrain.yaml')
|
||||
parser.add_argument('--output_dir', default='output/Pretrain')
|
||||
parser.add_argument('--checkpoint', default='')
|
||||
parser.add_argument('--evaluate', action='store_true')
|
||||
parser.add_argument('--device', default='cuda')
|
||||
parser.add_argument('--seed', default=42, type=int)
|
||||
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
|
||||
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
||||
parser.add_argument('--distributed', default=True, type=bool)
|
||||
args = parser.parse_args()
|
||||
|
||||
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
|
||||
|
||||
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
|
||||
|
||||
main(args, config)
|
||||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
timm==0.4.12
|
||||
transformers==4.15.0
|
||||
fairscale==0.4.4
|
||||
pycocotools
|
||||
206
train_caption.py
Normal file
206
train_caption.py
Normal file
@@ -0,0 +1,206 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import argparse
|
||||
import os
|
||||
import ruamel_yaml as yaml
|
||||
import numpy as np
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from models.blip import blip_decoder
|
||||
import utils
|
||||
from utils import cosine_lr_schedule
|
||||
from data import create_dataset, create_sampler, create_loader
|
||||
from data.utils import save_result, coco_caption_eval
|
||||
|
||||
def train(model, data_loader, optimizer, epoch, device):
|
||||
# train
|
||||
model.train()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
||||
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
|
||||
header = 'Train Caption Epoch: [{}]'.format(epoch)
|
||||
print_freq = 50
|
||||
|
||||
for i, (image, caption, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
||||
image = image.to(device)
|
||||
|
||||
loss = model(image, caption)
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
metric_logger.update(loss=loss.item())
|
||||
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
||||
|
||||
# gather the stats from all processes
|
||||
metric_logger.synchronize_between_processes()
|
||||
print("Averaged stats:", metric_logger.global_avg())
|
||||
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def evaluate(model, data_loader, device, config):
|
||||
# evaluate
|
||||
model.eval()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
header = 'Caption generation:'
|
||||
print_freq = 10
|
||||
|
||||
result = []
|
||||
for image, image_id in metric_logger.log_every(data_loader, print_freq, header):
|
||||
|
||||
image = image.to(device)
|
||||
|
||||
captions = model.generate(image, sample=False, num_beams=config['num_beams'], max_length=config['max_length'],
|
||||
min_length=config['min_length'])
|
||||
|
||||
for caption, img_id in zip(captions, image_id):
|
||||
result.append({"image_id": img_id.item(), "caption": caption})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main(args, config):
|
||||
utils.init_distributed_mode(args)
|
||||
|
||||
device = torch.device(args.device)
|
||||
|
||||
# fix the seed for reproducibility
|
||||
seed = args.seed + utils.get_rank()
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
cudnn.benchmark = True
|
||||
|
||||
#### Dataset ####
|
||||
print("Creating captioning dataset")
|
||||
train_dataset, val_dataset, test_dataset = create_dataset('caption_coco', config)
|
||||
|
||||
if args.distributed:
|
||||
num_tasks = utils.get_world_size()
|
||||
global_rank = utils.get_rank()
|
||||
samplers = create_sampler([train_dataset,val_dataset,test_dataset], [True,False,False], num_tasks, global_rank)
|
||||
else:
|
||||
samplers = [None, None, None]
|
||||
|
||||
train_loader, val_loader, test_loader = create_loader([train_dataset, val_dataset, test_dataset],samplers,
|
||||
batch_size=[config['batch_size']]*3,num_workers=[4,4,4],
|
||||
is_trains=[True, False, False], collate_fns=[None,None,None])
|
||||
|
||||
#### Model ####
|
||||
print("Creating model")
|
||||
model = blip_decoder(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'],
|
||||
vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'],
|
||||
prompt=config['prompt'])
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
model_without_ddp = model
|
||||
if args.distributed:
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
|
||||
model_without_ddp = model.module
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
|
||||
|
||||
best = 0
|
||||
best_epoch = 0
|
||||
|
||||
print("Start training")
|
||||
start_time = time.time()
|
||||
for epoch in range(0, config['max_epoch']):
|
||||
if not args.evaluate:
|
||||
if args.distributed:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
|
||||
cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
|
||||
|
||||
train_stats = train(model, train_loader, optimizer, epoch, device)
|
||||
|
||||
val_result = evaluate(model_without_ddp, val_loader, device, config)
|
||||
val_result_file = save_result(val_result, args.result_dir, 'val_epoch%d'%epoch, remove_duplicate='image_id')
|
||||
|
||||
test_result = evaluate(model_without_ddp, test_loader, device, config)
|
||||
test_result_file = save_result(test_result, args.result_dir, 'test_epoch%d'%epoch, remove_duplicate='image_id')
|
||||
|
||||
if utils.is_main_process():
|
||||
coco_val = coco_caption_eval(config['coco_gt_root'],val_result_file,'val')
|
||||
coco_test = coco_caption_eval(config['coco_gt_root'],test_result_file,'test')
|
||||
|
||||
if args.evaluate:
|
||||
log_stats = {**{f'val_{k}': v for k, v in coco_val.eval.items()},
|
||||
**{f'test_{k}': v for k, v in coco_test.eval.items()},
|
||||
}
|
||||
with open(os.path.join(args.output_dir, "evaluate.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
else:
|
||||
save_obj = {
|
||||
'model': model_without_ddp.state_dict(),
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'config': config,
|
||||
'epoch': epoch,
|
||||
}
|
||||
|
||||
if coco_val.eval['CIDEr'] + coco_val.eval['Bleu_4'] > best:
|
||||
best = coco_val.eval['CIDEr'] + coco_val.eval['Bleu_4']
|
||||
best_epoch = epoch
|
||||
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
|
||||
|
||||
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
||||
**{f'val_{k}': v for k, v in coco_val.eval.items()},
|
||||
**{f'test_{k}': v for k, v in coco_test.eval.items()},
|
||||
'epoch': epoch,
|
||||
'best_epoch': best_epoch,
|
||||
}
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
|
||||
if args.evaluate:
|
||||
break
|
||||
dist.barrier()
|
||||
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('Training time {}'.format(total_time_str))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', default='./configs/caption_coco.yaml')
|
||||
parser.add_argument('--output_dir', default='output/Caption_coco')
|
||||
parser.add_argument('--evaluate', action='store_true')
|
||||
parser.add_argument('--device', default='cuda')
|
||||
parser.add_argument('--seed', default=42, type=int)
|
||||
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
|
||||
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
||||
parser.add_argument('--distributed', default=True, type=bool)
|
||||
args = parser.parse_args()
|
||||
|
||||
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
|
||||
|
||||
args.result_dir = os.path.join(args.output_dir, 'result')
|
||||
|
||||
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
||||
Path(args.result_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
|
||||
|
||||
main(args, config)
|
||||
213
train_nlvr.py
Normal file
213
train_nlvr.py
Normal file
@@ -0,0 +1,213 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import argparse
|
||||
import os
|
||||
import ruamel_yaml as yaml
|
||||
import numpy as np
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
import json
|
||||
import pickle
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.utils.data import DataLoader
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
|
||||
from models.blip_nlvr import blip_nlvr
|
||||
|
||||
import utils
|
||||
from utils import cosine_lr_schedule, warmup_lr_schedule
|
||||
from data import create_dataset, create_sampler, create_loader
|
||||
|
||||
def train(model, data_loader, optimizer, epoch, device, config):
|
||||
# train
|
||||
model.train()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
|
||||
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
|
||||
|
||||
header = 'Train Epoch: [{}]'.format(epoch)
|
||||
print_freq = 50
|
||||
step_size = 10
|
||||
|
||||
for i,(image0, image1, text, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
||||
|
||||
images = torch.cat([image0, image1], dim=0)
|
||||
images, targets = images.to(device), targets.to(device)
|
||||
|
||||
loss = model(images, text, targets=targets, train=True)
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
||||
metric_logger.update(loss=loss.item())
|
||||
|
||||
# gather the stats from all processes
|
||||
metric_logger.synchronize_between_processes()
|
||||
print("Averaged stats:", metric_logger.global_avg())
|
||||
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def evaluate(model, data_loader, device, config):
|
||||
# test
|
||||
model.eval()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
|
||||
header = 'Evaluation:'
|
||||
print_freq = 50
|
||||
|
||||
for image0, image1, text, targets in metric_logger.log_every(data_loader, print_freq, header):
|
||||
images = torch.cat([image0, image1], dim=0)
|
||||
images, targets = images.to(device), targets.to(device)
|
||||
|
||||
prediction = model(images, text, targets=targets, train=False)
|
||||
|
||||
_, pred_class = prediction.max(1)
|
||||
accuracy = (targets==pred_class).sum() / targets.size(0)
|
||||
|
||||
metric_logger.meters['acc'].update(accuracy.item(), n=image0.size(0))
|
||||
|
||||
# gather the stats from all processes
|
||||
metric_logger.synchronize_between_processes()
|
||||
|
||||
print("Averaged stats:", metric_logger.global_avg())
|
||||
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
||||
|
||||
|
||||
|
||||
def main(args, config):
|
||||
utils.init_distributed_mode(args)
|
||||
|
||||
device = torch.device(args.device)
|
||||
|
||||
# fix the seed for reproducibility
|
||||
seed = args.seed + utils.get_rank()
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
cudnn.benchmark = True
|
||||
|
||||
#### Dataset ####
|
||||
print("Creating dataset")
|
||||
datasets = create_dataset('nlvr', config)
|
||||
|
||||
if args.distributed:
|
||||
num_tasks = utils.get_world_size()
|
||||
global_rank = utils.get_rank()
|
||||
samplers = create_sampler(datasets, [True,False,False], num_tasks, global_rank)
|
||||
else:
|
||||
samplers = [None, None, None]
|
||||
|
||||
batch_size=[config['batch_size_train'],config['batch_size_test'],config['batch_size_test']]
|
||||
train_loader, val_loader, test_loader = create_loader(datasets,samplers,batch_size=batch_size,
|
||||
num_workers=[4,4,4],is_trains=[True,False,False],
|
||||
collate_fns=[None,None,None])
|
||||
|
||||
#### Model ####
|
||||
print("Creating model")
|
||||
model = blip_nlvr(pretrained=config['pretrained'], image_size=config['image_size'],
|
||||
vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'])
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
model_without_ddp = model
|
||||
if args.distributed:
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
|
||||
model_without_ddp = model.module
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
|
||||
|
||||
print("Start training")
|
||||
start_time = time.time()
|
||||
best = 0
|
||||
best_epoch = 0
|
||||
|
||||
for epoch in range(0, config['max_epoch']):
|
||||
if not args.evaluate:
|
||||
if args.distributed:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
|
||||
cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
|
||||
|
||||
train_stats = train(model, train_loader, optimizer, epoch, device, config)
|
||||
|
||||
val_stats = evaluate(model, val_loader, device, config)
|
||||
test_stats = evaluate(model, test_loader, device, config)
|
||||
|
||||
if utils.is_main_process():
|
||||
if args.evaluate:
|
||||
log_stats = {**{f'val_{k}': v for k, v in val_stats.items()},
|
||||
**{f'test_{k}': v for k, v in test_stats.items()},
|
||||
}
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
|
||||
else:
|
||||
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
||||
**{f'val_{k}': v for k, v in val_stats.items()},
|
||||
**{f'test_{k}': v for k, v in test_stats.items()},
|
||||
'epoch': epoch,
|
||||
}
|
||||
|
||||
if float(val_stats['acc'])>best:
|
||||
save_obj = {
|
||||
'model': model_without_ddp.state_dict(),
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'config': config,
|
||||
'epoch': epoch,
|
||||
}
|
||||
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
|
||||
best = float(val_stats['acc'])
|
||||
best_epoch = epoch
|
||||
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
if args.evaluate:
|
||||
break
|
||||
|
||||
dist.barrier()
|
||||
|
||||
if utils.is_main_process():
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write("best epoch: %d"%best_epoch)
|
||||
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('Training time {}'.format(total_time_str))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', default='./configs/nlvr.yaml')
|
||||
parser.add_argument('--output_dir', default='output/NLVR')
|
||||
parser.add_argument('--evaluate', action='store_true')
|
||||
parser.add_argument('--device', default='cuda')
|
||||
parser.add_argument('--seed', default=42, type=int)
|
||||
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
|
||||
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
||||
parser.add_argument('--distributed', default=True, type=bool)
|
||||
args = parser.parse_args()
|
||||
|
||||
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
|
||||
|
||||
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
|
||||
|
||||
main(args, config)
|
||||
345
train_retrieval.py
Normal file
345
train_retrieval.py
Normal file
@@ -0,0 +1,345 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import argparse
|
||||
import os
|
||||
import ruamel_yaml as yaml
|
||||
import numpy as np
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from models.blip_retrieval import blip_retrieval
|
||||
import utils
|
||||
from utils import cosine_lr_schedule
|
||||
from data import create_dataset, create_sampler, create_loader
|
||||
|
||||
|
||||
def train(model, data_loader, optimizer, epoch, device, config):
|
||||
# train
|
||||
model.train()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
||||
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
|
||||
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
|
||||
header = 'Train Epoch: [{}]'.format(epoch)
|
||||
print_freq = 50
|
||||
|
||||
for i,(image, caption, idx) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
||||
image = image.to(device,non_blocking=True)
|
||||
idx = idx.to(device,non_blocking=True)
|
||||
|
||||
if epoch>0:
|
||||
alpha = config['alpha']
|
||||
else:
|
||||
alpha = config['alpha']*min(1,i/len(data_loader))
|
||||
|
||||
loss_ita, loss_itm = model(image, caption, alpha=alpha, idx=idx)
|
||||
loss = loss_ita + loss_itm
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
metric_logger.update(loss_itm=loss_itm.item())
|
||||
metric_logger.update(loss_ita=loss_ita.item())
|
||||
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
||||
|
||||
# gather the stats from all processes
|
||||
metric_logger.synchronize_between_processes()
|
||||
print("Averaged stats:", metric_logger.global_avg())
|
||||
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def evaluation(model, data_loader, device, config):
|
||||
# test
|
||||
model.eval()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
header = 'Evaluation:'
|
||||
|
||||
print('Computing features for evaluation...')
|
||||
start_time = time.time()
|
||||
|
||||
texts = data_loader.dataset.text
|
||||
num_text = len(texts)
|
||||
text_bs = 256
|
||||
text_ids = []
|
||||
text_embeds = []
|
||||
text_atts = []
|
||||
for i in range(0, num_text, text_bs):
|
||||
text = texts[i: min(num_text, i+text_bs)]
|
||||
text_input = model.tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(device)
|
||||
text_output = model.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text')
|
||||
text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:,0,:]))
|
||||
text_embeds.append(text_embed)
|
||||
text_ids.append(text_input.input_ids)
|
||||
text_atts.append(text_input.attention_mask)
|
||||
|
||||
text_embeds = torch.cat(text_embeds,dim=0)
|
||||
text_ids = torch.cat(text_ids,dim=0)
|
||||
text_atts = torch.cat(text_atts,dim=0)
|
||||
text_ids[:,0] = model.tokenizer.enc_token_id
|
||||
|
||||
image_feats = []
|
||||
image_embeds = []
|
||||
for image, img_id in data_loader:
|
||||
image = image.to(device)
|
||||
image_feat = model.visual_encoder(image)
|
||||
image_embed = model.vision_proj(image_feat[:,0,:])
|
||||
image_embed = F.normalize(image_embed,dim=-1)
|
||||
|
||||
image_feats.append(image_feat.cpu())
|
||||
image_embeds.append(image_embed)
|
||||
|
||||
image_feats = torch.cat(image_feats,dim=0)
|
||||
image_embeds = torch.cat(image_embeds,dim=0)
|
||||
|
||||
sims_matrix = image_embeds @ text_embeds.t()
|
||||
score_matrix_i2t = torch.full((len(data_loader.dataset.image),len(texts)),-100.0).to(device)
|
||||
|
||||
num_tasks = utils.get_world_size()
|
||||
rank = utils.get_rank()
|
||||
step = sims_matrix.size(0)//num_tasks + 1
|
||||
start = rank*step
|
||||
end = min(sims_matrix.size(0),start+step)
|
||||
|
||||
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
|
||||
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
|
||||
|
||||
encoder_output = image_feats[start+i].repeat(config['k_test'],1,1).to(device)
|
||||
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device)
|
||||
output = model.text_encoder(text_ids[topk_idx],
|
||||
attention_mask = text_atts[topk_idx],
|
||||
encoder_hidden_states = encoder_output,
|
||||
encoder_attention_mask = encoder_att,
|
||||
return_dict = True,
|
||||
)
|
||||
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
|
||||
score_matrix_i2t[start+i,topk_idx] = score + topk_sim
|
||||
|
||||
sims_matrix = sims_matrix.t()
|
||||
score_matrix_t2i = torch.full((len(texts),len(data_loader.dataset.image)),-100.0).to(device)
|
||||
|
||||
step = sims_matrix.size(0)//num_tasks + 1
|
||||
start = rank*step
|
||||
end = min(sims_matrix.size(0),start+step)
|
||||
|
||||
for i,sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
|
||||
|
||||
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
|
||||
encoder_output = image_feats[topk_idx].to(device)
|
||||
encoder_att = torch.ones(encoder_output.size()[:-1],dtype=torch.long).to(device)
|
||||
output = model.text_encoder(text_ids[start+i].repeat(config['k_test'],1),
|
||||
attention_mask = text_atts[start+i].repeat(config['k_test'],1),
|
||||
encoder_hidden_states = encoder_output,
|
||||
encoder_attention_mask = encoder_att,
|
||||
return_dict = True,
|
||||
)
|
||||
score = model.itm_head(output.last_hidden_state[:,0,:])[:,1]
|
||||
score_matrix_t2i[start+i,topk_idx] = score + topk_sim
|
||||
|
||||
if args.distributed:
|
||||
dist.barrier()
|
||||
torch.distributed.all_reduce(score_matrix_i2t, op=torch.distributed.ReduceOp.SUM)
|
||||
torch.distributed.all_reduce(score_matrix_t2i, op=torch.distributed.ReduceOp.SUM)
|
||||
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('Evaluation time {}'.format(total_time_str))
|
||||
|
||||
return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
|
||||
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def itm_eval(scores_i2t, scores_t2i, txt2img, img2txt):
|
||||
|
||||
#Images->Text
|
||||
ranks = np.zeros(scores_i2t.shape[0])
|
||||
for index,score in enumerate(scores_i2t):
|
||||
inds = np.argsort(score)[::-1]
|
||||
# Score
|
||||
rank = 1e20
|
||||
for i in img2txt[index]:
|
||||
tmp = np.where(inds == i)[0][0]
|
||||
if tmp < rank:
|
||||
rank = tmp
|
||||
ranks[index] = rank
|
||||
|
||||
# Compute metrics
|
||||
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
|
||||
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
|
||||
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
|
||||
|
||||
#Text->Images
|
||||
ranks = np.zeros(scores_t2i.shape[0])
|
||||
|
||||
for index,score in enumerate(scores_t2i):
|
||||
inds = np.argsort(score)[::-1]
|
||||
ranks[index] = np.where(inds == txt2img[index])[0][0]
|
||||
|
||||
# Compute metrics
|
||||
ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
|
||||
ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
|
||||
ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
|
||||
|
||||
tr_mean = (tr1 + tr5 + tr10) / 3
|
||||
ir_mean = (ir1 + ir5 + ir10) / 3
|
||||
r_mean = (tr_mean + ir_mean) / 2
|
||||
|
||||
eval_result = {'txt_r1': tr1,
|
||||
'txt_r5': tr5,
|
||||
'txt_r10': tr10,
|
||||
'txt_r_mean': tr_mean,
|
||||
'img_r1': ir1,
|
||||
'img_r5': ir5,
|
||||
'img_r10': ir10,
|
||||
'img_r_mean': ir_mean,
|
||||
'r_mean': r_mean}
|
||||
return eval_result
|
||||
|
||||
|
||||
def main(args, config):
|
||||
utils.init_distributed_mode(args)
|
||||
|
||||
device = torch.device(args.device)
|
||||
|
||||
# fix the seed for reproducibility
|
||||
seed = args.seed + utils.get_rank()
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
cudnn.benchmark = True
|
||||
|
||||
#### Dataset ####
|
||||
print("Creating retrieval dataset")
|
||||
train_dataset, val_dataset, test_dataset = create_dataset('retrieval_%s'%config['dataset'], config)
|
||||
|
||||
if args.distributed:
|
||||
num_tasks = utils.get_world_size()
|
||||
global_rank = utils.get_rank()
|
||||
samplers = create_sampler([train_dataset], [True], num_tasks, global_rank) + [None, None]
|
||||
else:
|
||||
samplers = [None, None, None]
|
||||
|
||||
train_loader, val_loader, test_loader = create_loader([train_dataset, val_dataset, test_dataset],samplers,
|
||||
batch_size=[config['batch_size_train']]+[config['batch_size_test']]*2,
|
||||
num_workers=[4,4,4],
|
||||
is_trains=[True, False, False],
|
||||
collate_fns=[None,None,None])
|
||||
|
||||
|
||||
#### Model ####
|
||||
print("Creating model")
|
||||
model = blip_retrieval(pretrained=config['pretrained'], image_size=config['image_size'], vit=config['vit'],
|
||||
vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'],
|
||||
queue_size=config['queue_size'], negative_all_rank=config['negative_all_rank'])
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
model_without_ddp = model
|
||||
if args.distributed:
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
|
||||
model_without_ddp = model.module
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
|
||||
|
||||
best = 0
|
||||
best_epoch = 0
|
||||
|
||||
print("Start training")
|
||||
start_time = time.time()
|
||||
|
||||
for epoch in range(0, config['max_epoch']):
|
||||
if not args.evaluate:
|
||||
if args.distributed:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
|
||||
cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
|
||||
|
||||
train_stats = train(model, train_loader, optimizer, epoch, device, config)
|
||||
|
||||
score_val_i2t, score_val_t2i, = evaluation(model_without_ddp, val_loader, device, config)
|
||||
score_test_i2t, score_test_t2i = evaluation(model_without_ddp, test_loader, device, config)
|
||||
|
||||
if utils.is_main_process():
|
||||
|
||||
val_result = itm_eval(score_val_i2t, score_val_t2i, val_loader.dataset.txt2img, val_loader.dataset.img2txt)
|
||||
print(val_result)
|
||||
|
||||
if val_result['r_mean']>best:
|
||||
save_obj = {
|
||||
'model': model_without_ddp.state_dict(),
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'config': config,
|
||||
'epoch': epoch,
|
||||
}
|
||||
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
|
||||
best = val_result['r_mean']
|
||||
best_epoch = epoch
|
||||
|
||||
test_result = itm_eval(score_test_i2t, score_test_t2i, test_loader.dataset.txt2img, test_loader.dataset.img2txt)
|
||||
print(test_result)
|
||||
|
||||
if args.evaluate:
|
||||
log_stats = {**{f'val_{k}': v for k, v in val_result.items()},
|
||||
**{f'test_{k}': v for k, v in test_result.items()},
|
||||
}
|
||||
with open(os.path.join(args.output_dir, "evaluate.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
else:
|
||||
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
||||
**{f'val_{k}': v for k, v in val_result.items()},
|
||||
**{f'test_{k}': v for k, v in test_result.items()},
|
||||
'epoch': epoch,
|
||||
'best_epoch': best_epoch,
|
||||
}
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
|
||||
if args.evaluate:
|
||||
break
|
||||
|
||||
dist.barrier()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('Training time {}'.format(total_time_str))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', default='./configs/retrieval_flickr.yaml')
|
||||
parser.add_argument('--output_dir', default='output/Retrieval_flickr')
|
||||
parser.add_argument('--evaluate', action='store_true')
|
||||
parser.add_argument('--device', default='cuda')
|
||||
parser.add_argument('--seed', default=42, type=int)
|
||||
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
|
||||
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
||||
parser.add_argument('--distributed', default=True, type=bool)
|
||||
args = parser.parse_args()
|
||||
|
||||
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
|
||||
|
||||
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
|
||||
|
||||
main(args, config)
|
||||
202
train_vqa.py
Normal file
202
train_vqa.py
Normal file
@@ -0,0 +1,202 @@
|
||||
'''
|
||||
* Copyright (c) 2022, salesforce.com, inc.
|
||||
* All rights reserved.
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
|
||||
* By Junnan Li
|
||||
'''
|
||||
import argparse
|
||||
import os
|
||||
import ruamel_yaml as yaml
|
||||
import numpy as np
|
||||
import random
|
||||
import time
|
||||
import datetime
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.utils.data import DataLoader
|
||||
import torch.backends.cudnn as cudnn
|
||||
import torch.distributed as dist
|
||||
|
||||
from models.blip_vqa import blip_vqa
|
||||
import utils
|
||||
from utils import cosine_lr_schedule
|
||||
from data import create_dataset, create_sampler, create_loader
|
||||
from data.vqa_dataset import vqa_collate_fn
|
||||
from data.utils import save_result
|
||||
|
||||
|
||||
def train(model, data_loader, optimizer, epoch, device):
|
||||
# train
|
||||
model.train()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
||||
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
|
||||
|
||||
header = 'Train Epoch: [{}]'.format(epoch)
|
||||
print_freq = 50
|
||||
|
||||
for i,(image, question, answer, weights, n) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
||||
image, weights = image.to(device,non_blocking=True), weights.to(device,non_blocking=True)
|
||||
|
||||
loss = model(image, question, answer, train=True, n=n, weights=weights)
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
metric_logger.update(loss=loss.item())
|
||||
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
||||
|
||||
# gather the stats from all processes
|
||||
metric_logger.synchronize_between_processes()
|
||||
print("Averaged stats:", metric_logger.global_avg())
|
||||
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def evaluation(model, data_loader, device, config) :
|
||||
# test
|
||||
model.eval()
|
||||
|
||||
metric_logger = utils.MetricLogger(delimiter=" ")
|
||||
header = 'Generate VQA test result:'
|
||||
print_freq = 50
|
||||
|
||||
result = []
|
||||
|
||||
if config['inference']=='rank':
|
||||
answer_list = data_loader.dataset.answer_list
|
||||
answer_candidates = model.tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
|
||||
answer_candidates.input_ids[:,0] = model.tokenizer.bos_token_id
|
||||
|
||||
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
|
||||
image = image.to(device,non_blocking=True)
|
||||
|
||||
if config['inference']=='generate':
|
||||
answers = model(image, question, train=False, inference='generate')
|
||||
|
||||
for answer, ques_id in zip(answers, question_id):
|
||||
ques_id = int(ques_id.item())
|
||||
result.append({"question_id":ques_id, "answer":answer})
|
||||
|
||||
elif config['inference']=='rank':
|
||||
answer_ids = model(image, question, answer_candidates, train=False, inference='rank', k_test=config['k_test'])
|
||||
|
||||
for ques_id, answer_id in zip(question_id, answer_ids):
|
||||
result.append({"question_id":int(ques_id.item()), "answer":answer_list[answer_id]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main(args, config):
|
||||
utils.init_distributed_mode(args)
|
||||
|
||||
device = torch.device(args.device)
|
||||
|
||||
# fix the seed for reproducibility
|
||||
seed = args.seed + utils.get_rank()
|
||||
torch.manual_seed(seed)
|
||||
np.random.seed(seed)
|
||||
random.seed(seed)
|
||||
cudnn.benchmark = True
|
||||
|
||||
#### Dataset ####
|
||||
print("Creating vqa datasets")
|
||||
datasets = create_dataset('vqa', config)
|
||||
|
||||
if args.distributed:
|
||||
num_tasks = utils.get_world_size()
|
||||
global_rank = utils.get_rank()
|
||||
samplers = create_sampler(datasets, [True, False], num_tasks, global_rank)
|
||||
else:
|
||||
samplers = [None, None]
|
||||
|
||||
train_loader, test_loader = create_loader(datasets,samplers,
|
||||
batch_size=[config['batch_size_train'],config['batch_size_test']],
|
||||
num_workers=[4,4],is_trains=[True, False],
|
||||
collate_fns=[vqa_collate_fn,None])
|
||||
#### Model ####
|
||||
print("Creating model")
|
||||
model = blip_vqa(pretrained=config['pretrained'], image_size=config['image_size'],
|
||||
vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'])
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
model_without_ddp = model
|
||||
if args.distributed:
|
||||
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
|
||||
model_without_ddp = model.module
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
|
||||
|
||||
best = 0
|
||||
best_epoch = 0
|
||||
|
||||
print("Start training")
|
||||
start_time = time.time()
|
||||
for epoch in range(0, config['max_epoch']):
|
||||
if not args.evaluate:
|
||||
if args.distributed:
|
||||
train_loader.sampler.set_epoch(epoch)
|
||||
|
||||
cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
|
||||
|
||||
train_stats = train(model, train_loader, optimizer, epoch, device)
|
||||
|
||||
else:
|
||||
break
|
||||
|
||||
if utils.is_main_process():
|
||||
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
|
||||
'epoch': epoch,
|
||||
}
|
||||
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
|
||||
f.write(json.dumps(log_stats) + "\n")
|
||||
|
||||
save_obj = {
|
||||
'model': model_without_ddp.state_dict(),
|
||||
'optimizer': optimizer.state_dict(),
|
||||
'config': config,
|
||||
'epoch': epoch,
|
||||
}
|
||||
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
|
||||
|
||||
dist.barrier()
|
||||
|
||||
vqa_result = evaluation(model_without_ddp, test_loader, device, config)
|
||||
result_file = save_result(vqa_result, args.result_dir, 'vqa_result')
|
||||
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('Training time {}'.format(total_time_str))
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--config', default='./configs/vqa.yaml')
|
||||
parser.add_argument('--output_dir', default='output/VQA')
|
||||
parser.add_argument('--evaluate', action='store_true')
|
||||
parser.add_argument('--device', default='cuda')
|
||||
parser.add_argument('--seed', default=42, type=int)
|
||||
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
|
||||
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
|
||||
parser.add_argument('--distributed', default=True, type=bool)
|
||||
args = parser.parse_args()
|
||||
|
||||
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
|
||||
|
||||
args.result_dir = os.path.join(args.output_dir, 'result')
|
||||
|
||||
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
||||
Path(args.result_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
|
||||
|
||||
main(args, config)
|
||||
BIN
transform/__pycache__/randaugment.cpython-36.pyc
Normal file
BIN
transform/__pycache__/randaugment.cpython-36.pyc
Normal file
Binary file not shown.
BIN
transform/__pycache__/randaugment.cpython-38.pyc
Normal file
BIN
transform/__pycache__/randaugment.cpython-38.pyc
Normal file
Binary file not shown.
340
transform/randaugment.py
Normal file
340
transform/randaugment.py
Normal file
@@ -0,0 +1,340 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
## aug functions
|
||||
def identity_func(img):
|
||||
return img
|
||||
|
||||
|
||||
def autocontrast_func(img, cutoff=0):
|
||||
'''
|
||||
same output as PIL.ImageOps.autocontrast
|
||||
'''
|
||||
n_bins = 256
|
||||
|
||||
def tune_channel(ch):
|
||||
n = ch.size
|
||||
cut = cutoff * n // 100
|
||||
if cut == 0:
|
||||
high, low = ch.max(), ch.min()
|
||||
else:
|
||||
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
|
||||
low = np.argwhere(np.cumsum(hist) > cut)
|
||||
low = 0 if low.shape[0] == 0 else low[0]
|
||||
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
|
||||
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
|
||||
if high <= low:
|
||||
table = np.arange(n_bins)
|
||||
else:
|
||||
scale = (n_bins - 1) / (high - low)
|
||||
offset = -low * scale
|
||||
table = np.arange(n_bins) * scale + offset
|
||||
table[table < 0] = 0
|
||||
table[table > n_bins - 1] = n_bins - 1
|
||||
table = table.clip(0, 255).astype(np.uint8)
|
||||
return table[ch]
|
||||
|
||||
channels = [tune_channel(ch) for ch in cv2.split(img)]
|
||||
out = cv2.merge(channels)
|
||||
return out
|
||||
|
||||
|
||||
def equalize_func(img):
|
||||
'''
|
||||
same output as PIL.ImageOps.equalize
|
||||
PIL's implementation is different from cv2.equalize
|
||||
'''
|
||||
n_bins = 256
|
||||
|
||||
def tune_channel(ch):
|
||||
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
|
||||
non_zero_hist = hist[hist != 0].reshape(-1)
|
||||
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
|
||||
if step == 0: return ch
|
||||
n = np.empty_like(hist)
|
||||
n[0] = step // 2
|
||||
n[1:] = hist[:-1]
|
||||
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
|
||||
return table[ch]
|
||||
|
||||
channels = [tune_channel(ch) for ch in cv2.split(img)]
|
||||
out = cv2.merge(channels)
|
||||
return out
|
||||
|
||||
|
||||
def rotate_func(img, degree, fill=(0, 0, 0)):
|
||||
'''
|
||||
like PIL, rotate by degree, not radians
|
||||
'''
|
||||
H, W = img.shape[0], img.shape[1]
|
||||
center = W / 2, H / 2
|
||||
M = cv2.getRotationMatrix2D(center, degree, 1)
|
||||
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
|
||||
return out
|
||||
|
||||
|
||||
def solarize_func(img, thresh=128):
|
||||
'''
|
||||
same output as PIL.ImageOps.posterize
|
||||
'''
|
||||
table = np.array([el if el < thresh else 255 - el for el in range(256)])
|
||||
table = table.clip(0, 255).astype(np.uint8)
|
||||
out = table[img]
|
||||
return out
|
||||
|
||||
|
||||
def color_func(img, factor):
|
||||
'''
|
||||
same output as PIL.ImageEnhance.Color
|
||||
'''
|
||||
## implementation according to PIL definition, quite slow
|
||||
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
|
||||
# out = blend(degenerate, img, factor)
|
||||
# M = (
|
||||
# np.eye(3) * factor
|
||||
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
|
||||
# )[np.newaxis, np.newaxis, :]
|
||||
M = (
|
||||
np.float32([
|
||||
[0.886, -0.114, -0.114],
|
||||
[-0.587, 0.413, -0.587],
|
||||
[-0.299, -0.299, 0.701]]) * factor
|
||||
+ np.float32([[0.114], [0.587], [0.299]])
|
||||
)
|
||||
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
|
||||
return out
|
||||
|
||||
|
||||
def contrast_func(img, factor):
|
||||
"""
|
||||
same output as PIL.ImageEnhance.Contrast
|
||||
"""
|
||||
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
|
||||
table = np.array([(
|
||||
el - mean) * factor + mean
|
||||
for el in range(256)
|
||||
]).clip(0, 255).astype(np.uint8)
|
||||
out = table[img]
|
||||
return out
|
||||
|
||||
|
||||
def brightness_func(img, factor):
|
||||
'''
|
||||
same output as PIL.ImageEnhance.Contrast
|
||||
'''
|
||||
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
|
||||
out = table[img]
|
||||
return out
|
||||
|
||||
|
||||
def sharpness_func(img, factor):
|
||||
'''
|
||||
The differences the this result and PIL are all on the 4 boundaries, the center
|
||||
areas are same
|
||||
'''
|
||||
kernel = np.ones((3, 3), dtype=np.float32)
|
||||
kernel[1][1] = 5
|
||||
kernel /= 13
|
||||
degenerate = cv2.filter2D(img, -1, kernel)
|
||||
if factor == 0.0:
|
||||
out = degenerate
|
||||
elif factor == 1.0:
|
||||
out = img
|
||||
else:
|
||||
out = img.astype(np.float32)
|
||||
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
|
||||
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
|
||||
out = out.astype(np.uint8)
|
||||
return out
|
||||
|
||||
|
||||
def shear_x_func(img, factor, fill=(0, 0, 0)):
|
||||
H, W = img.shape[0], img.shape[1]
|
||||
M = np.float32([[1, factor, 0], [0, 1, 0]])
|
||||
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
||||
return out
|
||||
|
||||
|
||||
def translate_x_func(img, offset, fill=(0, 0, 0)):
|
||||
'''
|
||||
same output as PIL.Image.transform
|
||||
'''
|
||||
H, W = img.shape[0], img.shape[1]
|
||||
M = np.float32([[1, 0, -offset], [0, 1, 0]])
|
||||
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
||||
return out
|
||||
|
||||
|
||||
def translate_y_func(img, offset, fill=(0, 0, 0)):
|
||||
'''
|
||||
same output as PIL.Image.transform
|
||||
'''
|
||||
H, W = img.shape[0], img.shape[1]
|
||||
M = np.float32([[1, 0, 0], [0, 1, -offset]])
|
||||
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
||||
return out
|
||||
|
||||
|
||||
def posterize_func(img, bits):
|
||||
'''
|
||||
same output as PIL.ImageOps.posterize
|
||||
'''
|
||||
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
|
||||
return out
|
||||
|
||||
|
||||
def shear_y_func(img, factor, fill=(0, 0, 0)):
|
||||
H, W = img.shape[0], img.shape[1]
|
||||
M = np.float32([[1, 0, 0], [factor, 1, 0]])
|
||||
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
|
||||
return out
|
||||
|
||||
|
||||
def cutout_func(img, pad_size, replace=(0, 0, 0)):
|
||||
replace = np.array(replace, dtype=np.uint8)
|
||||
H, W = img.shape[0], img.shape[1]
|
||||
rh, rw = np.random.random(2)
|
||||
pad_size = pad_size // 2
|
||||
ch, cw = int(rh * H), int(rw * W)
|
||||
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
|
||||
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
|
||||
out = img.copy()
|
||||
out[x1:x2, y1:y2, :] = replace
|
||||
return out
|
||||
|
||||
|
||||
### level to args
|
||||
def enhance_level_to_args(MAX_LEVEL):
|
||||
def level_to_args(level):
|
||||
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
|
||||
return level_to_args
|
||||
|
||||
|
||||
def shear_level_to_args(MAX_LEVEL, replace_value):
|
||||
def level_to_args(level):
|
||||
level = (level / MAX_LEVEL) * 0.3
|
||||
if np.random.random() > 0.5: level = -level
|
||||
return (level, replace_value)
|
||||
|
||||
return level_to_args
|
||||
|
||||
|
||||
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
|
||||
def level_to_args(level):
|
||||
level = (level / MAX_LEVEL) * float(translate_const)
|
||||
if np.random.random() > 0.5: level = -level
|
||||
return (level, replace_value)
|
||||
|
||||
return level_to_args
|
||||
|
||||
|
||||
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
|
||||
def level_to_args(level):
|
||||
level = int((level / MAX_LEVEL) * cutout_const)
|
||||
return (level, replace_value)
|
||||
|
||||
return level_to_args
|
||||
|
||||
|
||||
def solarize_level_to_args(MAX_LEVEL):
|
||||
def level_to_args(level):
|
||||
level = int((level / MAX_LEVEL) * 256)
|
||||
return (level, )
|
||||
return level_to_args
|
||||
|
||||
|
||||
def none_level_to_args(level):
|
||||
return ()
|
||||
|
||||
|
||||
def posterize_level_to_args(MAX_LEVEL):
|
||||
def level_to_args(level):
|
||||
level = int((level / MAX_LEVEL) * 4)
|
||||
return (level, )
|
||||
return level_to_args
|
||||
|
||||
|
||||
def rotate_level_to_args(MAX_LEVEL, replace_value):
|
||||
def level_to_args(level):
|
||||
level = (level / MAX_LEVEL) * 30
|
||||
if np.random.random() < 0.5:
|
||||
level = -level
|
||||
return (level, replace_value)
|
||||
|
||||
return level_to_args
|
||||
|
||||
|
||||
func_dict = {
|
||||
'Identity': identity_func,
|
||||
'AutoContrast': autocontrast_func,
|
||||
'Equalize': equalize_func,
|
||||
'Rotate': rotate_func,
|
||||
'Solarize': solarize_func,
|
||||
'Color': color_func,
|
||||
'Contrast': contrast_func,
|
||||
'Brightness': brightness_func,
|
||||
'Sharpness': sharpness_func,
|
||||
'ShearX': shear_x_func,
|
||||
'TranslateX': translate_x_func,
|
||||
'TranslateY': translate_y_func,
|
||||
'Posterize': posterize_func,
|
||||
'ShearY': shear_y_func,
|
||||
}
|
||||
|
||||
translate_const = 10
|
||||
MAX_LEVEL = 10
|
||||
replace_value = (128, 128, 128)
|
||||
arg_dict = {
|
||||
'Identity': none_level_to_args,
|
||||
'AutoContrast': none_level_to_args,
|
||||
'Equalize': none_level_to_args,
|
||||
'Rotate': rotate_level_to_args(MAX_LEVEL, replace_value),
|
||||
'Solarize': solarize_level_to_args(MAX_LEVEL),
|
||||
'Color': enhance_level_to_args(MAX_LEVEL),
|
||||
'Contrast': enhance_level_to_args(MAX_LEVEL),
|
||||
'Brightness': enhance_level_to_args(MAX_LEVEL),
|
||||
'Sharpness': enhance_level_to_args(MAX_LEVEL),
|
||||
'ShearX': shear_level_to_args(MAX_LEVEL, replace_value),
|
||||
'TranslateX': translate_level_to_args(
|
||||
translate_const, MAX_LEVEL, replace_value
|
||||
),
|
||||
'TranslateY': translate_level_to_args(
|
||||
translate_const, MAX_LEVEL, replace_value
|
||||
),
|
||||
'Posterize': posterize_level_to_args(MAX_LEVEL),
|
||||
'ShearY': shear_level_to_args(MAX_LEVEL, replace_value),
|
||||
}
|
||||
|
||||
|
||||
class RandomAugment(object):
|
||||
|
||||
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
|
||||
self.N = N
|
||||
self.M = M
|
||||
self.isPIL = isPIL
|
||||
if augs:
|
||||
self.augs = augs
|
||||
else:
|
||||
self.augs = list(arg_dict.keys())
|
||||
|
||||
def get_random_ops(self):
|
||||
sampled_ops = np.random.choice(self.augs, self.N)
|
||||
return [(op, 0.5, self.M) for op in sampled_ops]
|
||||
|
||||
def __call__(self, img):
|
||||
if self.isPIL:
|
||||
img = np.array(img)
|
||||
ops = self.get_random_ops()
|
||||
for name, prob, level in ops:
|
||||
if np.random.random() > prob:
|
||||
continue
|
||||
args = arg_dict[name](level)
|
||||
img = func_dict[name](img, *args)
|
||||
return img
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
a = RandomAugment()
|
||||
img = np.random.randn(32, 32, 3)
|
||||
a(img)
|
||||
278
utils.py
Normal file
278
utils.py
Normal file
@@ -0,0 +1,278 @@
|
||||
import math
|
||||
def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
|
||||
"""Decay the learning rate"""
|
||||
lr = (init_lr - min_lr) * 0.5 * (1. + math.cos(math.pi * epoch / max_epoch)) + min_lr
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group['lr'] = lr
|
||||
|
||||
def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
|
||||
"""Warmup the learning rate"""
|
||||
lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max_step)
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group['lr'] = lr
|
||||
|
||||
def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
|
||||
"""Decay the learning rate"""
|
||||
lr = max(min_lr, init_lr * (decay_rate**epoch))
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group['lr'] = lr
|
||||
|
||||
import numpy as np
|
||||
import io
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
import datetime
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
class SmoothedValue(object):
|
||||
"""Track a series of values and provide access to smoothed values over a
|
||||
window or the global series average.
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=20, fmt=None):
|
||||
if fmt is None:
|
||||
fmt = "{median:.4f} ({global_avg:.4f})"
|
||||
self.deque = deque(maxlen=window_size)
|
||||
self.total = 0.0
|
||||
self.count = 0
|
||||
self.fmt = fmt
|
||||
|
||||
def update(self, value, n=1):
|
||||
self.deque.append(value)
|
||||
self.count += n
|
||||
self.total += value * n
|
||||
|
||||
def synchronize_between_processes(self):
|
||||
"""
|
||||
Warning: does not synchronize the deque!
|
||||
"""
|
||||
if not is_dist_avail_and_initialized():
|
||||
return
|
||||
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
|
||||
dist.barrier()
|
||||
dist.all_reduce(t)
|
||||
t = t.tolist()
|
||||
self.count = int(t[0])
|
||||
self.total = t[1]
|
||||
|
||||
@property
|
||||
def median(self):
|
||||
d = torch.tensor(list(self.deque))
|
||||
return d.median().item()
|
||||
|
||||
@property
|
||||
def avg(self):
|
||||
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
||||
return d.mean().item()
|
||||
|
||||
@property
|
||||
def global_avg(self):
|
||||
return self.total / self.count
|
||||
|
||||
@property
|
||||
def max(self):
|
||||
return max(self.deque)
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self.deque[-1]
|
||||
|
||||
def __str__(self):
|
||||
return self.fmt.format(
|
||||
median=self.median,
|
||||
avg=self.avg,
|
||||
global_avg=self.global_avg,
|
||||
max=self.max,
|
||||
value=self.value)
|
||||
|
||||
|
||||
class MetricLogger(object):
|
||||
def __init__(self, delimiter="\t"):
|
||||
self.meters = defaultdict(SmoothedValue)
|
||||
self.delimiter = delimiter
|
||||
|
||||
def update(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
if isinstance(v, torch.Tensor):
|
||||
v = v.item()
|
||||
assert isinstance(v, (float, int))
|
||||
self.meters[k].update(v)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr in self.meters:
|
||||
return self.meters[attr]
|
||||
if attr in self.__dict__:
|
||||
return self.__dict__[attr]
|
||||
raise AttributeError("'{}' object has no attribute '{}'".format(
|
||||
type(self).__name__, attr))
|
||||
|
||||
def __str__(self):
|
||||
loss_str = []
|
||||
for name, meter in self.meters.items():
|
||||
loss_str.append(
|
||||
"{}: {}".format(name, str(meter))
|
||||
)
|
||||
return self.delimiter.join(loss_str)
|
||||
|
||||
def global_avg(self):
|
||||
loss_str = []
|
||||
for name, meter in self.meters.items():
|
||||
loss_str.append(
|
||||
"{}: {:.4f}".format(name, meter.global_avg)
|
||||
)
|
||||
return self.delimiter.join(loss_str)
|
||||
|
||||
def synchronize_between_processes(self):
|
||||
for meter in self.meters.values():
|
||||
meter.synchronize_between_processes()
|
||||
|
||||
def add_meter(self, name, meter):
|
||||
self.meters[name] = meter
|
||||
|
||||
def log_every(self, iterable, print_freq, header=None):
|
||||
i = 0
|
||||
if not header:
|
||||
header = ''
|
||||
start_time = time.time()
|
||||
end = time.time()
|
||||
iter_time = SmoothedValue(fmt='{avg:.4f}')
|
||||
data_time = SmoothedValue(fmt='{avg:.4f}')
|
||||
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
|
||||
log_msg = [
|
||||
header,
|
||||
'[{0' + space_fmt + '}/{1}]',
|
||||
'eta: {eta}',
|
||||
'{meters}',
|
||||
'time: {time}',
|
||||
'data: {data}'
|
||||
]
|
||||
if torch.cuda.is_available():
|
||||
log_msg.append('max mem: {memory:.0f}')
|
||||
log_msg = self.delimiter.join(log_msg)
|
||||
MB = 1024.0 * 1024.0
|
||||
for obj in iterable:
|
||||
data_time.update(time.time() - end)
|
||||
yield obj
|
||||
iter_time.update(time.time() - end)
|
||||
if i % print_freq == 0 or i == len(iterable) - 1:
|
||||
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
||||
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
||||
if torch.cuda.is_available():
|
||||
print(log_msg.format(
|
||||
i, len(iterable), eta=eta_string,
|
||||
meters=str(self),
|
||||
time=str(iter_time), data=str(data_time),
|
||||
memory=torch.cuda.max_memory_allocated() / MB))
|
||||
else:
|
||||
print(log_msg.format(
|
||||
i, len(iterable), eta=eta_string,
|
||||
meters=str(self),
|
||||
time=str(iter_time), data=str(data_time)))
|
||||
i += 1
|
||||
end = time.time()
|
||||
total_time = time.time() - start_time
|
||||
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
||||
print('{} Total time: {} ({:.4f} s / it)'.format(
|
||||
header, total_time_str, total_time / len(iterable)))
|
||||
|
||||
|
||||
class AttrDict(dict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AttrDict, self).__init__(*args, **kwargs)
|
||||
self.__dict__ = self
|
||||
|
||||
|
||||
def compute_acc(logits, label, reduction='mean'):
|
||||
ret = (torch.argmax(logits, dim=1) == label).float()
|
||||
if reduction == 'none':
|
||||
return ret.detach()
|
||||
elif reduction == 'mean':
|
||||
return ret.mean().item()
|
||||
|
||||
def compute_n_params(model, return_str=True):
|
||||
tot = 0
|
||||
for p in model.parameters():
|
||||
w = 1
|
||||
for x in p.shape:
|
||||
w *= x
|
||||
tot += w
|
||||
if return_str:
|
||||
if tot >= 1e6:
|
||||
return '{:.1f}M'.format(tot / 1e6)
|
||||
else:
|
||||
return '{:.1f}K'.format(tot / 1e3)
|
||||
else:
|
||||
return tot
|
||||
|
||||
def setup_for_distributed(is_master):
|
||||
"""
|
||||
This function disables printing when not in master process
|
||||
"""
|
||||
import builtins as __builtin__
|
||||
builtin_print = __builtin__.print
|
||||
|
||||
def print(*args, **kwargs):
|
||||
force = kwargs.pop('force', False)
|
||||
if is_master or force:
|
||||
builtin_print(*args, **kwargs)
|
||||
|
||||
__builtin__.print = print
|
||||
|
||||
|
||||
def is_dist_avail_and_initialized():
|
||||
if not dist.is_available():
|
||||
return False
|
||||
if not dist.is_initialized():
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_world_size():
|
||||
if not is_dist_avail_and_initialized():
|
||||
return 1
|
||||
return dist.get_world_size()
|
||||
|
||||
|
||||
def get_rank():
|
||||
if not is_dist_avail_and_initialized():
|
||||
return 0
|
||||
return dist.get_rank()
|
||||
|
||||
|
||||
def is_main_process():
|
||||
return get_rank() == 0
|
||||
|
||||
|
||||
def save_on_master(*args, **kwargs):
|
||||
if is_main_process():
|
||||
torch.save(*args, **kwargs)
|
||||
|
||||
|
||||
def init_distributed_mode(args):
|
||||
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
||||
args.rank = int(os.environ["RANK"])
|
||||
args.world_size = int(os.environ['WORLD_SIZE'])
|
||||
args.gpu = int(os.environ['LOCAL_RANK'])
|
||||
elif 'SLURM_PROCID' in os.environ:
|
||||
args.rank = int(os.environ['SLURM_PROCID'])
|
||||
args.gpu = args.rank % torch.cuda.device_count()
|
||||
else:
|
||||
print('Not using distributed mode')
|
||||
args.distributed = False
|
||||
return
|
||||
|
||||
args.distributed = True
|
||||
|
||||
torch.cuda.set_device(args.gpu)
|
||||
args.dist_backend = 'nccl'
|
||||
print('| distributed init (rank {}, word {}): {}'.format(
|
||||
args.rank, args.world_size, args.dist_url), flush=True)
|
||||
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
||||
world_size=args.world_size, rank=args.rank)
|
||||
torch.distributed.barrier()
|
||||
setup_for_distributed(args.rank == 0)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user