updating model peptriever_2023-06-23T16:07:24.508460
Browse files- bi_encoder.py +88 -2
bi_encoder.py
CHANGED
|
@@ -1,7 +1,13 @@
|
|
| 1 |
-
from
|
|
|
|
|
|
|
|
|
|
| 2 |
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
|
| 3 |
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class BiEncoderConfig(BertEmbeddingConfig):
|
|
@@ -62,3 +68,83 @@ def _replace_max_length(config, length_key):
|
|
| 62 |
c1["max_position_embeddings"] = c1.pop(length_key)
|
| 63 |
config1 = BertEmbeddingConfig(**c1)
|
| 64 |
return config1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from transformers import BertConfig, BertModel, BertPreTrainedModel, PreTrainedModel
|
| 5 |
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
|
| 6 |
|
| 7 |
+
|
| 8 |
+
class BertEmbeddingConfig(BertConfig):
|
| 9 |
+
n_output_dims: int
|
| 10 |
+
distance_func: str = "euclidean"
|
| 11 |
|
| 12 |
|
| 13 |
class BiEncoderConfig(BertEmbeddingConfig):
|
|
|
|
| 68 |
c1["max_position_embeddings"] = c1.pop(length_key)
|
| 69 |
config1 = BertEmbeddingConfig(**c1)
|
| 70 |
return config1
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class L2Norm:
|
| 74 |
+
def __call__(self, x):
|
| 75 |
+
return x / torch.norm(x, p=2, dim=-1, keepdim=True)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class BertForEmbedding(BertPreTrainedModel):
|
| 79 |
+
config_class = BertEmbeddingConfig
|
| 80 |
+
|
| 81 |
+
def __init__(self, config: BertEmbeddingConfig):
|
| 82 |
+
super().__init__(config)
|
| 83 |
+
n_output_dims = config.n_output_dims
|
| 84 |
+
self.fc = torch.nn.Linear(config.hidden_size, n_output_dims)
|
| 85 |
+
self.bert = BertModel(config)
|
| 86 |
+
self.activation = _get_activation(config.distance_func)
|
| 87 |
+
self.post_init()
|
| 88 |
+
|
| 89 |
+
def forward(
|
| 90 |
+
self,
|
| 91 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 92 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 93 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 94 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 95 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 96 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 97 |
+
output_attentions: Optional[bool] = None,
|
| 98 |
+
output_hidden_states: Optional[bool] = None,
|
| 99 |
+
return_dict: Optional[bool] = None,
|
| 100 |
+
) -> torch.Tensor:
|
| 101 |
+
embedding, _ = self.forward_with_state(
|
| 102 |
+
input_ids=input_ids,
|
| 103 |
+
attention_mask=attention_mask,
|
| 104 |
+
token_type_ids=token_type_ids,
|
| 105 |
+
position_ids=position_ids,
|
| 106 |
+
head_mask=head_mask,
|
| 107 |
+
inputs_embeds=inputs_embeds,
|
| 108 |
+
output_attentions=output_attentions,
|
| 109 |
+
output_hidden_states=output_hidden_states,
|
| 110 |
+
return_dict=return_dict,
|
| 111 |
+
)
|
| 112 |
+
return embedding
|
| 113 |
+
|
| 114 |
+
def forward_with_state(
|
| 115 |
+
self,
|
| 116 |
+
input_ids: Optional[torch.Tensor] = None,
|
| 117 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 118 |
+
token_type_ids: Optional[torch.Tensor] = None,
|
| 119 |
+
position_ids: Optional[torch.Tensor] = None,
|
| 120 |
+
head_mask: Optional[torch.Tensor] = None,
|
| 121 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
| 122 |
+
output_attentions: Optional[bool] = None,
|
| 123 |
+
output_hidden_states: Optional[bool] = None,
|
| 124 |
+
return_dict: Optional[bool] = None,
|
| 125 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 126 |
+
encoded = self.bert(
|
| 127 |
+
input_ids,
|
| 128 |
+
attention_mask=attention_mask,
|
| 129 |
+
token_type_ids=token_type_ids,
|
| 130 |
+
position_ids=position_ids,
|
| 131 |
+
head_mask=head_mask,
|
| 132 |
+
inputs_embeds=inputs_embeds,
|
| 133 |
+
output_attentions=output_attentions,
|
| 134 |
+
output_hidden_states=output_hidden_states,
|
| 135 |
+
return_dict=return_dict,
|
| 136 |
+
)
|
| 137 |
+
pooler_output = encoded.pooler_output
|
| 138 |
+
logits = self.fc(pooler_output)
|
| 139 |
+
embedding = self.activation(logits)
|
| 140 |
+
return embedding, encoded.last_hidden_state
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _get_activation(distance_func: str):
|
| 144 |
+
if distance_func == "euclidean":
|
| 145 |
+
activation = torch.nn.Tanh()
|
| 146 |
+
elif distance_func == "angular":
|
| 147 |
+
activation = L2Norm() # type: ignore
|
| 148 |
+
else:
|
| 149 |
+
raise NotImplementedError()
|
| 150 |
+
return activation
|