med.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. '''
  2. * Copyright (c) 2022, salesforce.com, inc.
  3. * All rights reserved.
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
  6. * By Junnan Li
  7. * Based on huggingface code base
  8. * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
  9. '''
  10. import math
  11. import os
  12. import warnings
  13. from dataclasses import dataclass
  14. from typing import Optional, Tuple
  15. import torch
  16. from torch import Tensor, device, dtype, nn
  17. import torch.utils.checkpoint
  18. from torch import nn
  19. from torch.nn import CrossEntropyLoss
  20. import torch.nn.functional as F
  21. from transformers.activations import ACT2FN
  22. from transformers.file_utils import (
  23. ModelOutput,
  24. )
  25. from transformers.modeling_outputs import (
  26. BaseModelOutputWithPastAndCrossAttentions,
  27. BaseModelOutputWithPoolingAndCrossAttentions,
  28. CausalLMOutputWithCrossAttentions,
  29. MaskedLMOutput,
  30. MultipleChoiceModelOutput,
  31. NextSentencePredictorOutput,
  32. QuestionAnsweringModelOutput,
  33. SequenceClassifierOutput,
  34. TokenClassifierOutput,
  35. )
  36. from transformers.modeling_utils import (
  37. PreTrainedModel,
  38. apply_chunking_to_forward,
  39. find_pruneable_heads_and_indices,
  40. prune_linear_layer,
  41. )
  42. from transformers.utils import logging
  43. from transformers.models.bert.configuration_bert import BertConfig
  44. logger = logging.get_logger(__name__)
  45. class BertEmbeddings(nn.Module):
  46. """Construct the embeddings from word and position embeddings."""
  47. def __init__(self, config):
  48. super().__init__()
  49. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
  50. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  51. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  52. # any TensorFlow checkpoint file
  53. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  54. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  55. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  56. self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
  57. self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
  58. self.config = config
  59. def forward(
  60. self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
  61. ):
  62. if input_ids is not None:
  63. input_shape = input_ids.size()
  64. else:
  65. input_shape = inputs_embeds.size()[:-1]
  66. seq_length = input_shape[1]
  67. if position_ids is None:
  68. position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
  69. if inputs_embeds is None:
  70. inputs_embeds = self.word_embeddings(input_ids)
  71. embeddings = inputs_embeds
  72. if self.position_embedding_type == "absolute":
  73. position_embeddings = self.position_embeddings(position_ids)
  74. embeddings += position_embeddings
  75. embeddings = self.LayerNorm(embeddings)
  76. embeddings = self.dropout(embeddings)
  77. return embeddings
  78. class BertSelfAttention(nn.Module):
  79. def __init__(self, config, is_cross_attention):
  80. super().__init__()
  81. self.config = config
  82. if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
  83. raise ValueError(
  84. "The hidden size (%d) is not a multiple of the number of attention "
  85. "heads (%d)" % (config.hidden_size, config.num_attention_heads)
  86. )
  87. self.num_attention_heads = config.num_attention_heads
  88. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  89. self.all_head_size = self.num_attention_heads * self.attention_head_size
  90. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  91. if is_cross_attention:
  92. self.key = nn.Linear(config.encoder_width, self.all_head_size)
  93. self.value = nn.Linear(config.encoder_width, self.all_head_size)
  94. else:
  95. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  96. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  97. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  98. self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
  99. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  100. self.max_position_embeddings = config.max_position_embeddings
  101. self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
  102. self.save_attention = False
  103. def save_attn_gradients(self, attn_gradients):
  104. self.attn_gradients = attn_gradients
  105. def get_attn_gradients(self):
  106. return self.attn_gradients
  107. def save_attention_map(self, attention_map):
  108. self.attention_map = attention_map
  109. def get_attention_map(self):
  110. return self.attention_map
  111. def transpose_for_scores(self, x):
  112. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  113. x = x.view(*new_x_shape)
  114. return x.permute(0, 2, 1, 3)
  115. def forward(
  116. self,
  117. hidden_states,
  118. attention_mask=None,
  119. head_mask=None,
  120. encoder_hidden_states=None,
  121. encoder_attention_mask=None,
  122. past_key_value=None,
  123. output_attentions=False,
  124. ):
  125. mixed_query_layer = self.query(hidden_states)
  126. # If this is instantiated as a cross-attention module, the keys
  127. # and values come from an encoder; the attention mask needs to be
  128. # such that the encoder's padding tokens are not attended to.
  129. is_cross_attention = encoder_hidden_states is not None
  130. if is_cross_attention:
  131. key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
  132. value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
  133. attention_mask = encoder_attention_mask
  134. elif past_key_value is not None:
  135. key_layer = self.transpose_for_scores(self.key(hidden_states))
  136. value_layer = self.transpose_for_scores(self.value(hidden_states))
  137. key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
  138. value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
  139. else:
  140. key_layer = self.transpose_for_scores(self.key(hidden_states))
  141. value_layer = self.transpose_for_scores(self.value(hidden_states))
  142. query_layer = self.transpose_for_scores(mixed_query_layer)
  143. past_key_value = (key_layer, value_layer)
  144. # Take the dot product between "query" and "key" to get the raw attention scores.
  145. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  146. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  147. seq_length = hidden_states.size()[1]
  148. position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
  149. position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
  150. distance = position_ids_l - position_ids_r
  151. positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
  152. positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
  153. if self.position_embedding_type == "relative_key":
  154. relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  155. attention_scores = attention_scores + relative_position_scores
  156. elif self.position_embedding_type == "relative_key_query":
  157. relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  158. relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
  159. attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
  160. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  161. if attention_mask is not None:
  162. # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
  163. attention_scores = attention_scores + attention_mask
  164. # Normalize the attention scores to probabilities.
  165. attention_probs = nn.Softmax(dim=-1)(attention_scores)
  166. if is_cross_attention and self.save_attention:
  167. self.save_attention_map(attention_probs)
  168. attention_probs.register_hook(self.save_attn_gradients)
  169. # This is actually dropping out entire tokens to attend to, which might
  170. # seem a bit unusual, but is taken from the original Transformer paper.
  171. attention_probs_dropped = self.dropout(attention_probs)
  172. # Mask heads if we want to
  173. if head_mask is not None:
  174. attention_probs_dropped = attention_probs_dropped * head_mask
  175. context_layer = torch.matmul(attention_probs_dropped, value_layer)
  176. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  177. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  178. context_layer = context_layer.view(*new_context_layer_shape)
  179. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  180. outputs = outputs + (past_key_value,)
  181. return outputs
  182. class BertSelfOutput(nn.Module):
  183. def __init__(self, config):
  184. super().__init__()
  185. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  186. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  187. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  188. def forward(self, hidden_states, input_tensor):
  189. hidden_states = self.dense(hidden_states)
  190. hidden_states = self.dropout(hidden_states)
  191. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  192. return hidden_states
  193. class BertAttention(nn.Module):
  194. def __init__(self, config, is_cross_attention=False):
  195. super().__init__()
  196. self.self = BertSelfAttention(config, is_cross_attention)
  197. self.output = BertSelfOutput(config)
  198. self.pruned_heads = set()
  199. def prune_heads(self, heads):
  200. if len(heads) == 0:
  201. return
  202. heads, index = find_pruneable_heads_and_indices(
  203. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  204. )
  205. # Prune linear layers
  206. self.self.query = prune_linear_layer(self.self.query, index)
  207. self.self.key = prune_linear_layer(self.self.key, index)
  208. self.self.value = prune_linear_layer(self.self.value, index)
  209. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  210. # Update hyper params and store pruned heads
  211. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  212. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  213. self.pruned_heads = self.pruned_heads.union(heads)
  214. def forward(
  215. self,
  216. hidden_states,
  217. attention_mask=None,
  218. head_mask=None,
  219. encoder_hidden_states=None,
  220. encoder_attention_mask=None,
  221. past_key_value=None,
  222. output_attentions=False,
  223. ):
  224. self_outputs = self.self(
  225. hidden_states,
  226. attention_mask,
  227. head_mask,
  228. encoder_hidden_states,
  229. encoder_attention_mask,
  230. past_key_value,
  231. output_attentions,
  232. )
  233. attention_output = self.output(self_outputs[0], hidden_states)
  234. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  235. return outputs
  236. class BertIntermediate(nn.Module):
  237. def __init__(self, config):
  238. super().__init__()
  239. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  240. if isinstance(config.hidden_act, str):
  241. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  242. else:
  243. self.intermediate_act_fn = config.hidden_act
  244. def forward(self, hidden_states):
  245. hidden_states = self.dense(hidden_states)
  246. hidden_states = self.intermediate_act_fn(hidden_states)
  247. return hidden_states
  248. class BertOutput(nn.Module):
  249. def __init__(self, config):
  250. super().__init__()
  251. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  252. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  253. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  254. def forward(self, hidden_states, input_tensor):
  255. hidden_states = self.dense(hidden_states)
  256. hidden_states = self.dropout(hidden_states)
  257. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  258. return hidden_states
  259. class BertLayer(nn.Module):
  260. def __init__(self, config, layer_num):
  261. super().__init__()
  262. self.config = config
  263. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  264. self.seq_len_dim = 1
  265. self.attention = BertAttention(config)
  266. self.layer_num = layer_num
  267. if self.config.add_cross_attention:
  268. self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
  269. self.intermediate = BertIntermediate(config)
  270. self.output = BertOutput(config)
  271. def forward(
  272. self,
  273. hidden_states,
  274. attention_mask=None,
  275. head_mask=None,
  276. encoder_hidden_states=None,
  277. encoder_attention_mask=None,
  278. past_key_value=None,
  279. output_attentions=False,
  280. mode=None,
  281. ):
  282. # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
  283. self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
  284. self_attention_outputs = self.attention(
  285. hidden_states,
  286. attention_mask,
  287. head_mask,
  288. output_attentions=output_attentions,
  289. past_key_value=self_attn_past_key_value,
  290. )
  291. attention_output = self_attention_outputs[0]
  292. outputs = self_attention_outputs[1:-1]
  293. present_key_value = self_attention_outputs[-1]
  294. if mode=='multimodal':
  295. assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
  296. cross_attention_outputs = self.crossattention(
  297. attention_output,
  298. attention_mask,
  299. head_mask,
  300. encoder_hidden_states,
  301. encoder_attention_mask,
  302. output_attentions=output_attentions,
  303. )
  304. attention_output = cross_attention_outputs[0]
  305. outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
  306. layer_output = apply_chunking_to_forward(
  307. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
  308. )
  309. outputs = (layer_output,) + outputs
  310. outputs = outputs + (present_key_value,)
  311. return outputs
  312. def feed_forward_chunk(self, attention_output):
  313. intermediate_output = self.intermediate(attention_output)
  314. layer_output = self.output(intermediate_output, attention_output)
  315. return layer_output
  316. class BertEncoder(nn.Module):
  317. def __init__(self, config):
  318. super().__init__()
  319. self.config = config
  320. self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)])
  321. self.gradient_checkpointing = False
  322. def forward(
  323. self,
  324. hidden_states,
  325. attention_mask=None,
  326. head_mask=None,
  327. encoder_hidden_states=None,
  328. encoder_attention_mask=None,
  329. past_key_values=None,
  330. use_cache=None,
  331. output_attentions=False,
  332. output_hidden_states=False,
  333. return_dict=True,
  334. mode='multimodal',
  335. ):
  336. all_hidden_states = () if output_hidden_states else None
  337. all_self_attentions = () if output_attentions else None
  338. all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
  339. next_decoder_cache = () if use_cache else None
  340. for i in range(self.config.num_hidden_layers):
  341. layer_module = self.layer[i]
  342. if output_hidden_states:
  343. all_hidden_states = all_hidden_states + (hidden_states,)
  344. layer_head_mask = head_mask[i] if head_mask is not None else None
  345. past_key_value = past_key_values[i] if past_key_values is not None else None
  346. if self.gradient_checkpointing and self.training:
  347. if use_cache:
  348. logger.warn(
  349. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
  350. )
  351. use_cache = False
  352. def create_custom_forward(module):
  353. def custom_forward(*inputs):
  354. return module(*inputs, past_key_value, output_attentions)
  355. return custom_forward
  356. layer_outputs = torch.utils.checkpoint.checkpoint(
  357. create_custom_forward(layer_module),
  358. hidden_states,
  359. attention_mask,
  360. layer_head_mask,
  361. encoder_hidden_states,
  362. encoder_attention_mask,
  363. mode=mode,
  364. )
  365. else:
  366. layer_outputs = layer_module(
  367. hidden_states,
  368. attention_mask,
  369. layer_head_mask,
  370. encoder_hidden_states,
  371. encoder_attention_mask,
  372. past_key_value,
  373. output_attentions,
  374. mode=mode,
  375. )
  376. hidden_states = layer_outputs[0]
  377. if use_cache:
  378. next_decoder_cache += (layer_outputs[-1],)
  379. if output_attentions:
  380. all_self_attentions = all_self_attentions + (layer_outputs[1],)
  381. if output_hidden_states:
  382. all_hidden_states = all_hidden_states + (hidden_states,)
  383. if not return_dict:
  384. return tuple(
  385. v
  386. for v in [
  387. hidden_states,
  388. next_decoder_cache,
  389. all_hidden_states,
  390. all_self_attentions,
  391. all_cross_attentions,
  392. ]
  393. if v is not None
  394. )
  395. return BaseModelOutputWithPastAndCrossAttentions(
  396. last_hidden_state=hidden_states,
  397. past_key_values=next_decoder_cache,
  398. hidden_states=all_hidden_states,
  399. attentions=all_self_attentions,
  400. cross_attentions=all_cross_attentions,
  401. )
  402. class BertPooler(nn.Module):
  403. def __init__(self, config):
  404. super().__init__()
  405. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  406. self.activation = nn.Tanh()
  407. def forward(self, hidden_states):
  408. # We "pool" the model by simply taking the hidden state corresponding
  409. # to the first token.
  410. first_token_tensor = hidden_states[:, 0]
  411. pooled_output = self.dense(first_token_tensor)
  412. pooled_output = self.activation(pooled_output)
  413. return pooled_output
  414. class BertPredictionHeadTransform(nn.Module):
  415. def __init__(self, config):
  416. super().__init__()
  417. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  418. if isinstance(config.hidden_act, str):
  419. self.transform_act_fn = ACT2FN[config.hidden_act]
  420. else:
  421. self.transform_act_fn = config.hidden_act
  422. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  423. def forward(self, hidden_states):
  424. hidden_states = self.dense(hidden_states)
  425. hidden_states = self.transform_act_fn(hidden_states)
  426. hidden_states = self.LayerNorm(hidden_states)
  427. return hidden_states
  428. class BertLMPredictionHead(nn.Module):
  429. def __init__(self, config):
  430. super().__init__()
  431. self.transform = BertPredictionHeadTransform(config)
  432. # The output weights are the same as the input embeddings, but there is
  433. # an output-only bias for each token.
  434. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
  435. self.bias = nn.Parameter(torch.zeros(config.vocab_size))
  436. # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
  437. self.decoder.bias = self.bias
  438. def forward(self, hidden_states):
  439. hidden_states = self.transform(hidden_states)
  440. hidden_states = self.decoder(hidden_states)
  441. return hidden_states
  442. class BertOnlyMLMHead(nn.Module):
  443. def __init__(self, config):
  444. super().__init__()
  445. self.predictions = BertLMPredictionHead(config)
  446. def forward(self, sequence_output):
  447. prediction_scores = self.predictions(sequence_output)
  448. return prediction_scores
  449. class BertPreTrainedModel(PreTrainedModel):
  450. """
  451. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  452. models.
  453. """
  454. config_class = BertConfig
  455. base_model_prefix = "bert"
  456. _keys_to_ignore_on_load_missing = [r"position_ids"]
  457. def _init_weights(self, module):
  458. """ Initialize the weights """
  459. if isinstance(module, (nn.Linear, nn.Embedding)):
  460. # Slightly different from the TF version which uses truncated_normal for initialization
  461. # cf https://github.com/pytorch/pytorch/pull/5617
  462. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  463. elif isinstance(module, nn.LayerNorm):
  464. module.bias.data.zero_()
  465. module.weight.data.fill_(1.0)
  466. if isinstance(module, nn.Linear) and module.bias is not None:
  467. module.bias.data.zero_()
  468. class BertModel(BertPreTrainedModel):
  469. """
  470. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
  471. cross-attention is added between the self-attention layers, following the architecture described in `Attention is
  472. all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
  473. Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
  474. argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
  475. input to the forward pass.
  476. """
  477. def __init__(self, config, add_pooling_layer=True):
  478. super().__init__(config)
  479. self.config = config
  480. self.embeddings = BertEmbeddings(config)
  481. self.encoder = BertEncoder(config)
  482. self.pooler = BertPooler(config) if add_pooling_layer else None
  483. self.init_weights()
  484. def get_input_embeddings(self):
  485. return self.embeddings.word_embeddings
  486. def set_input_embeddings(self, value):
  487. self.embeddings.word_embeddings = value
  488. def _prune_heads(self, heads_to_prune):
  489. """
  490. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  491. class PreTrainedModel
  492. """
  493. for layer, heads in heads_to_prune.items():
  494. self.encoder.layer[layer].attention.prune_heads(heads)
  495. def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
  496. """
  497. Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
  498. Arguments:
  499. attention_mask (:obj:`torch.Tensor`):
  500. Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
  501. input_shape (:obj:`Tuple[int]`):
  502. The shape of the input to the model.
  503. device: (:obj:`torch.device`):
  504. The device of the input to the model.
  505. Returns:
  506. :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
  507. """
  508. # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
  509. # ourselves in which case we just need to make it broadcastable to all heads.
  510. if attention_mask.dim() == 3:
  511. extended_attention_mask = attention_mask[:, None, :, :]
  512. elif attention_mask.dim() == 2:
  513. # Provided a padding mask of dimensions [batch_size, seq_length]
  514. # - if the model is a decoder, apply a causal mask in addition to the padding mask
  515. # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
  516. if is_decoder:
  517. batch_size, seq_length = input_shape
  518. seq_ids = torch.arange(seq_length, device=device)
  519. causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
  520. # in case past_key_values are used we need to add a prefix ones mask to the causal mask
  521. # causal and attention masks must have same type with pytorch version < 1.3
  522. causal_mask = causal_mask.to(attention_mask.dtype)
  523. if causal_mask.shape[1] < attention_mask.shape[1]:
  524. prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
  525. causal_mask = torch.cat(
  526. [
  527. torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
  528. causal_mask,
  529. ],
  530. axis=-1,
  531. )
  532. extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
  533. else:
  534. extended_attention_mask = attention_mask[:, None, None, :]
  535. else:
  536. raise ValueError(
  537. "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
  538. input_shape, attention_mask.shape
  539. )
  540. )
  541. # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
  542. # masked positions, this operation will create a tensor which is 0.0 for
  543. # positions we want to attend and -10000.0 for masked positions.
  544. # Since we are adding it to the raw scores before the softmax, this is
  545. # effectively the same as removing these entirely.
  546. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
  547. extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
  548. return extended_attention_mask
  549. def forward(
  550. self,
  551. input_ids=None,
  552. attention_mask=None,
  553. position_ids=None,
  554. head_mask=None,
  555. inputs_embeds=None,
  556. encoder_embeds=None,
  557. encoder_hidden_states=None,
  558. encoder_attention_mask=None,
  559. past_key_values=None,
  560. use_cache=None,
  561. output_attentions=None,
  562. output_hidden_states=None,
  563. return_dict=None,
  564. is_decoder=False,
  565. mode='multimodal',
  566. ):
  567. r"""
  568. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
  569. Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
  570. the model is configured as a decoder.
  571. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
  572. Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
  573. the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
  574. - 1 for tokens that are **not masked**,
  575. - 0 for tokens that are **masked**.
  576. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  577. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  578. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
  579. (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
  580. instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
  581. use_cache (:obj:`bool`, `optional`):
  582. If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
  583. decoding (see :obj:`past_key_values`).
  584. """
  585. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  586. output_hidden_states = (
  587. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  588. )
  589. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  590. if is_decoder:
  591. use_cache = use_cache if use_cache is not None else self.config.use_cache
  592. else:
  593. use_cache = False
  594. if input_ids is not None and inputs_embeds is not None:
  595. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  596. elif input_ids is not None:
  597. input_shape = input_ids.size()
  598. batch_size, seq_length = input_shape
  599. device = input_ids.device
  600. elif inputs_embeds is not None:
  601. input_shape = inputs_embeds.size()[:-1]
  602. batch_size, seq_length = input_shape
  603. device = inputs_embeds.device
  604. elif encoder_embeds is not None:
  605. input_shape = encoder_embeds.size()[:-1]
  606. batch_size, seq_length = input_shape
  607. device = encoder_embeds.device
  608. else:
  609. raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
  610. # past_key_values_length
  611. past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
  612. if attention_mask is None:
  613. attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
  614. # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
  615. # ourselves in which case we just need to make it broadcastable to all heads.
  616. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape,
  617. device, is_decoder)
  618. # If a 2D or 3D attention mask is provided for the cross-attention
  619. # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
  620. if encoder_hidden_states is not None:
  621. if type(encoder_hidden_states) == list:
  622. encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
  623. else:
  624. encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
  625. encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
  626. if type(encoder_attention_mask) == list:
  627. encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
  628. elif encoder_attention_mask is None:
  629. encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
  630. encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
  631. else:
  632. encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
  633. else:
  634. encoder_extended_attention_mask = None
  635. # Prepare head mask if needed
  636. # 1.0 in head_mask indicate we keep the head
  637. # attention_probs has shape bsz x n_heads x N x N
  638. # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
  639. # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
  640. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  641. if encoder_embeds is None:
  642. embedding_output = self.embeddings(
  643. input_ids=input_ids,
  644. position_ids=position_ids,
  645. inputs_embeds=inputs_embeds,
  646. past_key_values_length=past_key_values_length,
  647. )
  648. else:
  649. embedding_output = encoder_embeds
  650. encoder_outputs = self.encoder(
  651. embedding_output,
  652. attention_mask=extended_attention_mask,
  653. head_mask=head_mask,
  654. encoder_hidden_states=encoder_hidden_states,
  655. encoder_attention_mask=encoder_extended_attention_mask,
  656. past_key_values=past_key_values,
  657. use_cache=use_cache,
  658. output_attentions=output_attentions,
  659. output_hidden_states=output_hidden_states,
  660. return_dict=return_dict,
  661. mode=mode,
  662. )
  663. sequence_output = encoder_outputs[0]
  664. pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
  665. if not return_dict:
  666. return (sequence_output, pooled_output) + encoder_outputs[1:]
  667. return BaseModelOutputWithPoolingAndCrossAttentions(
  668. last_hidden_state=sequence_output,
  669. pooler_output=pooled_output,
  670. past_key_values=encoder_outputs.past_key_values,
  671. hidden_states=encoder_outputs.hidden_states,
  672. attentions=encoder_outputs.attentions,
  673. cross_attentions=encoder_outputs.cross_attentions,
  674. )
  675. class BertLMHeadModel(BertPreTrainedModel):
  676. _keys_to_ignore_on_load_unexpected = [r"pooler"]
  677. _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
  678. def __init__(self, config):
  679. super().__init__(config)
  680. self.bert = BertModel(config, add_pooling_layer=False)
  681. self.cls = BertOnlyMLMHead(config)
  682. self.init_weights()
  683. def get_output_embeddings(self):
  684. return self.cls.predictions.decoder
  685. def set_output_embeddings(self, new_embeddings):
  686. self.cls.predictions.decoder = new_embeddings
  687. def forward(
  688. self,
  689. input_ids=None,
  690. attention_mask=None,
  691. position_ids=None,
  692. head_mask=None,
  693. inputs_embeds=None,
  694. encoder_hidden_states=None,
  695. encoder_attention_mask=None,
  696. labels=None,
  697. past_key_values=None,
  698. use_cache=None,
  699. output_attentions=None,
  700. output_hidden_states=None,
  701. return_dict=None,
  702. return_logits=False,
  703. is_decoder=True,
  704. reduction='mean',
  705. mode='multimodal',
  706. ):
  707. r"""
  708. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
  709. Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
  710. the model is configured as a decoder.
  711. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
  712. Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
  713. the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
  714. - 1 for tokens that are **not masked**,
  715. - 0 for tokens that are **masked**.
  716. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
  717. Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
  718. ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
  719. ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
  720. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  721. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  722. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
  723. (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
  724. instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
  725. use_cache (:obj:`bool`, `optional`):
  726. If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
  727. decoding (see :obj:`past_key_values`).
  728. Returns:
  729. Example::
  730. >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
  731. >>> import torch
  732. >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
  733. >>> config = BertConfig.from_pretrained("bert-base-cased")
  734. >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
  735. >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
  736. >>> outputs = model(**inputs)
  737. >>> prediction_logits = outputs.logits
  738. """
  739. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  740. if labels is not None:
  741. use_cache = False
  742. outputs = self.bert(
  743. input_ids,
  744. attention_mask=attention_mask,
  745. position_ids=position_ids,
  746. head_mask=head_mask,
  747. inputs_embeds=inputs_embeds,
  748. encoder_hidden_states=encoder_hidden_states,
  749. encoder_attention_mask=encoder_attention_mask,
  750. past_key_values=past_key_values,
  751. use_cache=use_cache,
  752. output_attentions=output_attentions,
  753. output_hidden_states=output_hidden_states,
  754. return_dict=return_dict,
  755. is_decoder=is_decoder,
  756. mode=mode,
  757. )
  758. sequence_output = outputs[0]
  759. prediction_scores = self.cls(sequence_output)
  760. if return_logits:
  761. return prediction_scores[:, :-1, :].contiguous()
  762. lm_loss = None
  763. if labels is not None:
  764. # we are doing next-token prediction; shift prediction scores and input ids by one
  765. shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
  766. labels = labels[:, 1:].contiguous()
  767. loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
  768. lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  769. if reduction=='none':
  770. lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1)
  771. if not return_dict:
  772. output = (prediction_scores,) + outputs[2:]
  773. return ((lm_loss,) + output) if lm_loss is not None else output
  774. return CausalLMOutputWithCrossAttentions(
  775. loss=lm_loss,
  776. logits=prediction_scores,
  777. past_key_values=outputs.past_key_values,
  778. hidden_states=outputs.hidden_states,
  779. attentions=outputs.attentions,
  780. cross_attentions=outputs.cross_attentions,
  781. )
  782. def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
  783. input_shape = input_ids.shape
  784. # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
  785. if attention_mask is None:
  786. attention_mask = input_ids.new_ones(input_shape)
  787. # cut decoder_input_ids if past is used
  788. if past is not None:
  789. input_ids = input_ids[:, -1:]
  790. return {
  791. "input_ids": input_ids,
  792. "attention_mask": attention_mask,
  793. "past_key_values": past,
  794. "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
  795. "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
  796. "is_decoder": True,
  797. }
  798. def _reorder_cache(self, past, beam_idx):
  799. reordered_past = ()
  800. for layer_past in past:
  801. reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
  802. return reordered_past