| |
| |
|
|
| from copy import deepcopy |
| from torch.nn.init import xavier_uniform_ |
| import torch.nn.functional as F |
| from torch.nn import Parameter |
| from torch.nn.init import normal_ |
| import torch.utils.checkpoint |
| from torch import Tensor, device |
| from .TAAS_utils import * |
| from transformers.modeling_utils import ModuleUtilsMixin |
| from transformers import AutoTokenizer, AutoModel, BertTokenizer |
| from .graphormer import Graphormer3D |
| import pickle |
| import torch |
| import sys |
| from .ner_model import NER_model |
| import numpy as np |
|
|
|
|
| from .htc_loss import HTCLoss |
| from transformers.utils.hub import cached_file |
| remap_code_2_chn_file_path = cached_file( |
| 'Cainiao-AI/TAAS', |
| 'remap_code_2_chn.pkl' |
| ) |
| s2_label_dict_remap = { |
| 0: '0', |
| 1: '1', |
| 2: '2', |
| 3: '3', |
| 4: '4', |
| 5: '5', |
| 6: '6', |
| 7: '7', |
| 8: '8', |
| 9: '9', |
| 10: 'a', |
| 11: 'b', |
| 12: 'c', |
| 13: 'd', |
| 14: 'e', |
| 15: 'f'} |
|
|
| class StellarEmbedding(nn.Module): |
| """Construct the embeddings from word, position and token_type embeddings.""" |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) |
| self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) |
| self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) |
| self.ner_type_embeddings = nn.Embedding(10, config.hidden_size) |
| self.use_task_id = config.use_task_id |
| if config.use_task_id: |
| self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size) |
|
|
| |
| |
| self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
| self.dropout = nn.Dropout(config.hidden_dropout_prob) |
| |
| self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") |
| self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) |
| self.register_buffer("token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), |
| persistent=False) |
| self._reset_parameters() |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| ner_type_ids: Optional[torch.LongTensor] = None, |
| task_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values_length: int = 0, |
| ) -> torch.Tensor: |
| if input_ids is not None: |
| input_shape = input_ids.size() |
| else: |
| input_shape = inputs_embeds.size()[:-1] |
|
|
| seq_length = input_shape[1] |
|
|
| if position_ids is None: |
| position_ids = self.position_ids[:, past_key_values_length: seq_length + past_key_values_length] |
|
|
| |
| |
| |
| if token_type_ids is None: |
| if hasattr(self, "token_type_ids"): |
| buffered_token_type_ids = self.token_type_ids[:, :seq_length] |
| buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) |
| token_type_ids = buffered_token_type_ids_expanded |
| else: |
| token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.word_embeddings(input_ids) |
| token_type_embeddings = self.token_type_embeddings(token_type_ids) |
| if ner_type_ids is not None: |
| ner_type_embeddings = self.ner_type_embeddings(ner_type_ids) |
|
|
| embeddings = inputs_embeds + token_type_embeddings + ner_type_embeddings |
| else: |
| embeddings = inputs_embeds + token_type_embeddings |
| if self.position_embedding_type == "absolute": |
| position_embeddings = self.position_embeddings(position_ids) |
| embeddings += position_embeddings |
|
|
| |
| if self.use_task_id: |
| if task_type_ids is None: |
| task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) |
| task_type_embeddings = self.task_type_embeddings(task_type_ids) |
| embeddings += task_type_embeddings |
|
|
| embeddings = self.LayerNorm(embeddings) |
| embeddings = self.dropout(embeddings) |
| return embeddings |
|
|
| def _reset_parameters(self): |
| for p in self.parameters(): |
| if p.dim() > 1: |
| normal_(p, mean=0.0, std=0.02) |
|
|
| def set_pretrained_weights(self, path): |
| pre_train_weights = torch.load(path, map_location=torch.device('cpu')) |
| new_weights = dict() |
| for layer in self.state_dict().keys(): |
| if layer == 'position_ids': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.position_ids'] |
| elif layer == 'word_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.word_embeddings.weight'] |
| elif layer == 'position_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.position_embeddings.weight'] |
| elif layer == 'token_type_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.token_type_embeddings.weight'] |
| elif layer == 'task_type_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.task_type_embeddings.weight'] |
| elif layer == 'LayerNorm.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.LayerNorm.weight'] |
| elif layer == 'LayerNorm.bias': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.LayerNorm.bias'] |
| else: |
| new_weights[layer] = self.state_dict()[layer] |
| self.load_state_dict(new_weights) |
|
|
| def save_weights(self, path): |
| torch.save(self.state_dict(), path) |
|
|
| def load_weights(self, path): |
| self.load_state_dict(torch.load(path)) |
|
|
|
|
| |
| class StellarLayer(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.chunk_size_feed_forward = config.chunk_size_feed_forward |
| self.seq_len_dim = 1 |
| self.attention = ErnieAttention(config) |
| self.is_decoder = config.is_decoder |
| self.add_cross_attention = config.add_cross_attention |
| if self.add_cross_attention: |
| if not self.is_decoder: |
| raise ValueError(f"{self} should be used as a decoder model if cross attention is added") |
| self.crossattention = ErnieAttention(config, position_embedding_type="absolute") |
| self.intermediate = ErnieIntermediate(config) |
| self.output = ErnieOutput(config) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| output_attentions: Optional[bool] = False, |
| ) -> Tuple[torch.Tensor]: |
| |
| self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None |
| self_attention_outputs = self.attention( |
| hidden_states, |
| attention_mask, |
| head_mask, |
| output_attentions=output_attentions, |
| past_key_value=self_attn_past_key_value, |
| ) |
| attention_output = self_attention_outputs[0] |
|
|
| |
| if self.is_decoder: |
| outputs = self_attention_outputs[1:-1] |
| present_key_value = self_attention_outputs[-1] |
| else: |
| outputs = self_attention_outputs[1:] |
|
|
| cross_attn_present_key_value = None |
| if self.is_decoder and encoder_hidden_states is not None: |
| if not hasattr(self, "crossattention"): |
| raise ValueError( |
| f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" |
| " by setting `config.add_cross_attention=True`" |
| ) |
|
|
| |
| cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None |
| cross_attention_outputs = self.crossattention( |
| attention_output, |
| attention_mask, |
| head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| cross_attn_past_key_value, |
| output_attentions, |
| ) |
| attention_output = cross_attention_outputs[0] |
| outputs = outputs + cross_attention_outputs[1:-1] |
|
|
| |
| cross_attn_present_key_value = cross_attention_outputs[-1] |
| present_key_value = present_key_value + cross_attn_present_key_value |
|
|
| layer_output = apply_chunking_to_forward( |
| self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output |
| ) |
| outputs = (layer_output,) + outputs |
|
|
| |
| if self.is_decoder: |
| outputs = outputs + (present_key_value,) |
|
|
| return outputs |
|
|
| def feed_forward_chunk(self, attention_output): |
| intermediate_output = self.intermediate(attention_output) |
| layer_output = self.output(intermediate_output, attention_output) |
| return layer_output |
|
|
|
|
| class StellarEncoder(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
| self.layer = nn.ModuleList([StellarLayer(config) for _ in range(config.num_hidden_layers)]) |
| self.gradient_checkpointing = False |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = False, |
| output_hidden_states: Optional[bool] = False, |
| return_dict: Optional[bool] = True, |
| ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: |
| all_hidden_states = () if output_hidden_states else None |
| all_self_attentions = () if output_attentions else None |
| all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None |
|
|
| next_decoder_cache = () if use_cache else None |
| for i, layer_module in enumerate(self.layer): |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| layer_head_mask = head_mask[i] if head_mask is not None else None |
| past_key_value = past_key_values[i] if past_key_values is not None else None |
|
|
| if self.gradient_checkpointing and self.training: |
|
|
| if use_cache: |
| logger.warning( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| return module(*inputs, past_key_value, output_attentions) |
|
|
| return custom_forward |
|
|
| layer_outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(layer_module), |
| hidden_states, |
| attention_mask, |
| layer_head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| ) |
| else: |
| layer_outputs = layer_module( |
| hidden_states, |
| attention_mask, |
| layer_head_mask, |
| encoder_hidden_states, |
| encoder_attention_mask, |
| past_key_value, |
| output_attentions, |
| ) |
|
|
| hidden_states = layer_outputs[0] |
| if use_cache: |
| next_decoder_cache += (layer_outputs[-1],) |
| if output_attentions: |
| all_self_attentions = all_self_attentions + (layer_outputs[1],) |
| if self.config.add_cross_attention: |
| all_cross_attentions = all_cross_attentions + (layer_outputs[2],) |
|
|
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [ |
| hidden_states, |
| next_decoder_cache, |
| all_hidden_states, |
| all_self_attentions, |
| all_cross_attentions, |
| ] |
| if v is not None |
| ) |
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=next_decoder_cache, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
|
|
| |
| class StellarPooler(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.activation = nn.Tanh() |
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
| |
| |
| first_token_tensor = hidden_states[:, 0] |
| pooled_output = self.dense(first_token_tensor) |
| pooled_output = self.activation(pooled_output) |
| return pooled_output |
|
|
|
|
| class StellarModel(nn.Module): |
| """ |
| """ |
|
|
| def __init__(self, config, add_pooling_layer=True): |
| super().__init__() |
| self.config = config |
| self.encoder = StellarEncoder(config) |
| self.pooler = StellarPooler(config) if add_pooling_layer else None |
| |
| self._reset_parameters() |
|
|
| |
| def _prune_heads(self, heads_to_prune): |
| """ |
| Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
| class PreTrainedModel |
| """ |
| for layer, heads in heads_to_prune.items(): |
| self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
| def forward( |
| self, |
| h_input, |
| input_ids: Optional[torch.Tensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| token_type_ids: Optional[torch.Tensor] = None, |
| task_type_ids: Optional[torch.Tensor] = None, |
| position_ids: Optional[torch.Tensor] = None, |
| head_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.Tensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[List[torch.FloatTensor]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: |
| r""" |
| encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if |
| the model is configured as a decoder. |
| encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in |
| the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): |
| Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. |
| |
| If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that |
| don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all |
| `decoder_input_ids` of shape `(batch_size, sequence_length)`. |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| """ |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if self.config.is_decoder: |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| else: |
| use_cache = False |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| batch_size, seq_length = input_shape |
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| |
| past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 |
|
|
| if attention_mask is None: |
| attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) |
|
|
| if token_type_ids is None: |
| if hasattr(self.embeddings, "token_type_ids"): |
| buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] |
| buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) |
| token_type_ids = buffered_token_type_ids_expanded |
| else: |
| token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) |
|
|
| |
| |
| extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) |
|
|
| |
| |
| if self.config.is_decoder and encoder_hidden_states is not None: |
| encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() |
| encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| if encoder_attention_mask is None: |
| encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| else: |
| encoder_extended_attention_mask = None |
|
|
| |
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
| encoder_outputs = self.encoder( |
| h_input, |
| attention_mask=extended_attention_mask, |
| head_mask=head_mask, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_extended_attention_mask, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| sequence_output = encoder_outputs[0] |
| pooled_output = self.pooler(sequence_output) if self.pooler is not None else None |
|
|
| if not return_dict: |
| return (sequence_output, pooled_output) + encoder_outputs[1:] |
|
|
| return BaseModelOutputWithPoolingAndCrossAttentions( |
| last_hidden_state=sequence_output, |
| pooler_output=pooled_output, |
| past_key_values=encoder_outputs.past_key_values, |
| hidden_states=encoder_outputs.hidden_states, |
| attentions=encoder_outputs.attentions, |
| cross_attentions=encoder_outputs.cross_attentions, |
| ) |
|
|
| def get_extended_attention_mask( |
| self, attention_mask: Tensor, input_shape: Tuple[int], device: device = None, dtype: torch.float = None |
| ) -> Tensor: |
| """ |
| Makes broadcastable attention and causal masks so that future and masked tokens are ignored. |
| |
| Arguments: |
| attention_mask (`torch.Tensor`): |
| Mask with ones indicating tokens to attend to, zeros for tokens to ignore. |
| input_shape (`Tuple[int]`): |
| The shape of the input to the model. |
| |
| Returns: |
| `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. |
| """ |
| if dtype is None: |
| dtype = torch.float32 |
|
|
| if not (attention_mask.dim() == 2 and self.config.is_decoder): |
| |
| if device is not None: |
| warnings.warn( |
| "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning |
| ) |
| |
| |
| if attention_mask.dim() == 3: |
| extended_attention_mask = attention_mask[:, None, :, :] |
| elif attention_mask.dim() == 2: |
| |
| |
| |
| if self.config.is_decoder: |
| extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( |
| input_shape, attention_mask, device |
| ) |
| else: |
| extended_attention_mask = attention_mask[:, None, None, :] |
| else: |
| raise ValueError( |
| f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" |
| ) |
|
|
| |
| |
| |
| |
| |
| extended_attention_mask = extended_attention_mask.to(dtype=dtype) |
| extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min |
| return extended_attention_mask |
|
|
| def get_head_mask( |
| self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False |
| ) -> Tensor: |
| """ |
| Prepare the head mask if needed. |
| |
| Args: |
| head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): |
| The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). |
| num_hidden_layers (`int`): |
| The number of hidden layers in the model. |
| is_attention_chunked: (`bool`, *optional*, defaults to `False`): |
| Whether or not the attentions scores are computed by chunks or not. |
| |
| Returns: |
| `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with |
| `[None]` for each layer. |
| """ |
| if head_mask is not None: |
| head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) |
| if is_attention_chunked is True: |
| head_mask = head_mask.unsqueeze(-1) |
| else: |
| head_mask = [None] * num_hidden_layers |
|
|
| return head_mask |
|
|
| def _reset_parameters(self): |
| r"""Initiate parameters in the transformer model.""" |
| for p in self.parameters(): |
| if p.dim() > 1: |
| normal_(p, mean=0.0, std=self.config.initializer_range) |
|
|
| def save_weights(self, path): |
| torch.save(self.state_dict(), path) |
|
|
| def load_weights(self, path): |
| self.load_state_dict(torch.load(path)) |
|
|
|
|
| class TAAS(PreTrainedModel): |
| def __init__(self, config, return_last_hidden_state=False): |
| super(TAAS, self).__init__(config) |
|
|
| """ |
| :param d_model: d_k = d_v = d_model/nhead = 64, 模型中向量的维度,论文默认值为 512 |
| :param nhead: 多头注意力机制中多头的数量,论文默认为值 8 |
| :param num_encoder_layers: encoder堆叠的数量,也就是论文中的N,论文默认值为6 |
| :param num_decoder_layers: decoder堆叠的数量,也就是论文中的N,论文默认值为6 |
| :param dim_feedforward: 全连接中向量的维度,论文默认值为 2048 |
| :param dropout: 丢弃率,论文中的默认值为 0.1 |
| """ |
|
|
| self.config = deepcopy(config) |
| self.return_last_hidden_state = return_last_hidden_state |
| self.dropout = nn.Dropout(self.config.hidden_dropout_prob) |
| |
| self.embedding = StellarEmbedding(self.config) |
| self.embedding_weights = Parameter(torch.ones(1, 1, self.config.hidden_size)) |
| |
| self.stellar_config = deepcopy(config) |
| self.stellar_model = StellarModel(self.stellar_config) |
| |
| |
| self.graphormer = Graphormer3D() |
| |
| self.encoder_config = deepcopy(config) |
| self.encoder_config.num_hidden_layers = 1 |
| self.encoder = StellarModel(self.encoder_config) |
| self.encoder_out_dim = self.encoder_config.hidden_size |
| |
| self.gc_trans = nn.Linear(self.encoder_out_dim, 16 * 33, bias=True) |
| |
| self.cls = ErnieForMaskedLM(self.stellar_config).cls |
| |
| self.down_hidden_dim = 512 |
| self.down_kernel_num = 128 |
| self.alias_trans = nn.Linear(self.encoder_out_dim, self.down_hidden_dim, bias=True) |
| self.alias_trans2 = torch.nn.Conv2d(1, self.down_kernel_num, (2, self.down_hidden_dim), stride=1, bias=True) |
| self.alias_layer = nn.Linear(self.down_kernel_num * 5, 2 * 5, bias=True) |
| |
| self.aoi_trans = nn.Linear(self.encoder_out_dim, self.down_hidden_dim, bias=True) |
| self.aoi_trans2 = torch.nn.Conv2d(1, self.down_kernel_num, (2, self.down_hidden_dim), stride=1, bias=True) |
| self.aoi_layer = nn.Linear(self.down_kernel_num * 5, 2 * 5, bias=True) |
| |
| |
| self.htc_trans = nn.Linear(self.encoder_out_dim, 5 * 100, bias=True) |
|
|
| |
| |
| self.ner_model = NER_model(vocab_size=11) |
| |
|
|
|
|
| def forward(self, |
| input_ids, |
| attention_mask, |
| token_type_ids, |
| node_position_ids, |
| spatial_pos, in_degree, out_degree, edge_type_matrix, edge_input, |
| prov_city_mask: Optional[torch.Tensor] = None, |
| sequence_len=6, |
| labels: Optional[torch.Tensor] = None |
| ): |
| """ |
| :param input_ids: [sequence_len * batch_size, src_len] |
| :param attention_mask: [sequence_len * batch_size, src_len] |
| :param token_type_ids: [sequence_len * batch_size, src_len] |
| :param sequence_len: int |
| :param labels: |
| :param is_eval: bool |
| :return: |
| """ |
| batch_size_input = int(input_ids.shape[0] / sequence_len) |
|
|
| embedding_output = self.embedding(input_ids=input_ids, token_type_ids=token_type_ids) |
| |
| stellar_predictions = self.stellar_model(embedding_output, |
| input_ids=input_ids, |
| token_type_ids=token_type_ids, |
| attention_mask=attention_mask) |
| last_hidden_state = stellar_predictions[0].contiguous().view(batch_size_input, sequence_len, -1, |
| self.encoder_out_dim) |
| pooler_output = stellar_predictions[1].contiguous().view(batch_size_input, sequence_len, self.encoder_out_dim) |
| h_ = self.graphormer(pooler_output, spatial_pos, in_degree, out_degree, edge_type_matrix, edge_input, node_position_ids) |
| h_ = h_.unsqueeze(2) |
| new_hidden_state = torch.cat((h_, last_hidden_state[:, :, 1:, :]), dim=2) |
| new_hidden_state = new_hidden_state.contiguous().view(batch_size_input * sequence_len, -1, self.encoder_out_dim) |
| encoder_outputs = self.encoder(new_hidden_state, |
| input_ids=input_ids, |
| token_type_ids=token_type_ids, |
| attention_mask=attention_mask) |
| final_hidden_state = encoder_outputs[0] |
| final_pooler_output = encoder_outputs[1].contiguous().view(batch_size_input, sequence_len, self.encoder_out_dim) |
| prediction_scores = self.cls(final_hidden_state) |
|
|
| gc_layer_out = self.gc_trans(final_pooler_output) |
| gc_layer_out = gc_layer_out.contiguous().view(-1, 16) |
| |
| htc_layer_out = self.htc_trans(final_pooler_output) |
| htc_layer_out = htc_layer_out.contiguous().view(-1, 100) |
|
|
|
|
| |
| if labels is not None: |
| |
| loss_fct = CrossEntropyLoss() |
| masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) |
| return [gc_layer_out, masked_lm_loss, prediction_scores, htc_layer_out] |
|
|
| if self.return_last_hidden_state: |
| return final_pooler_output, pooler_output |
|
|
| return gc_layer_out, final_pooler_output, final_hidden_state, prediction_scores, last_hidden_state, htc_layer_out |
|
|
| def get_htc_code(self, htc_layer_out): |
| htc_loss_fct = HTCLoss(device=self.device, reduction='mean') |
| htc_pred = htc_loss_fct.get_htc_code(htc_layer_out) |
| return htc_pred |
|
|
| def decode_htc_code_2_chn(self, htc_pred): |
| arr = htc_pred |
| with open(remap_code_2_chn_file_path, 'rb') as fr: |
| remap_code_2_chn = pickle.loads(fr.read()) |
| return remap_code_2_chn['{:02d}{:02d}{:02d}{:01d}{:02d}'.format(arr[0], arr[1], arr[2], arr[3], arr[4])] |
|
|
| |
| def addr_standardize(self, address): |
| tokenizer = BertTokenizer.from_pretrained('nghuyong/ernie-3.0-base-zh') |
| encoded_input = tokenizer(address, return_tensors='pt', padding='max_length', |
| truncation=True, |
| max_length=60, |
| add_special_tokens=True).to(self.device) |
| word_ids = encoded_input['input_ids'] |
| attention_mask = encoded_input['attention_mask'] |
|
|
| length = len(word_ids) |
| node_position_ids = torch.tensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| spatial_pos = torch.LongTensor(np.zeros((length, 1, 1), dtype=np.int64)).to(self.device) |
| in_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| out_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| edge_type_matrix = torch.LongTensor(8*np.ones((length, 1, 1), dtype=np.int64)).to(self.device) |
| edge_input = torch.LongTensor(8*np.ones((length, 1, 1, 1), dtype=np.int64)).to(self.device) |
|
|
| logits = self.ner_model(**encoded_input, |
| node_position_ids = node_position_ids, |
| spatial_pos = spatial_pos, |
| in_degree = in_degree, |
| out_degree = out_degree, |
| edge_type_matrix = edge_type_matrix, |
| edge_input = edge_input,)[0] |
| output = [] |
| ner_labels = torch.argmax(logits, dim=-1) |
| if len(address) == 1: |
| ner_labels = ner_labels.unsqueeze(0) |
| for i in range(len(address)): |
| ner_label = ner_labels[i] |
| word_id = word_ids[i] |
| |
| idx = torch.where(attention_mask[i]>0) |
| ner_label = ner_label[idx][1:-1] |
| word_id = word_id[idx][1:-1] |
| |
| idx1 = torch.where(ner_label != 0) |
| ner_label = ner_label[idx1].tolist() |
| word_id = word_id[idx1].tolist() |
| |
| if 8 in ner_label: |
| idx2 = ''.join([str(i) for i in ner_label]).rfind('8') |
| word_id.insert(idx2+1, 2770) |
| ner_label.insert(idx2+1, 8) |
| if 9 in ner_label: |
| idx2 = ''.join([str(i) for i in ner_label]).rfind('9') |
| word_id.insert(idx2+1, 269) |
| word_id.insert(idx2+2, 183) |
| ner_label.insert(idx2+1, 9) |
| ner_label.insert(idx2+2, 9) |
| if 10 in ner_label: |
| idx2 = ''.join([str(i) for i in ner_label]).rfind('10') |
| word_id.insert(idx2+1, 485) |
| ner_label.insert(idx2+1, 10) |
|
|
| output.append(tokenizer.decode(word_id).replace(' ', '')) |
| |
| return output |
|
|
| |
| def addr_entity(self, address): |
| tokenizer = BertTokenizer.from_pretrained('nghuyong/ernie-3.0-base-zh') |
| encoded_input = tokenizer(address, return_tensors='pt', padding='max_length', |
| truncation=True, |
| max_length=60, |
| add_special_tokens=True).to(self.device) |
| word_ids = encoded_input['input_ids'] |
| attention_mask = encoded_input['attention_mask'] |
|
|
| length = len(word_ids) |
| node_position_ids = torch.tensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| spatial_pos = torch.LongTensor(np.zeros((length, 1, 1), dtype=np.int64)).to(self.device) |
| in_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| out_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| edge_type_matrix = torch.LongTensor(8*np.ones((length, 1, 1), dtype=np.int64)).to(self.device) |
| edge_input = torch.LongTensor(8*np.ones((length, 1, 1, 1), dtype=np.int64)).to(self.device) |
|
|
| logits = self.ner_model(**encoded_input, |
| node_position_ids = node_position_ids, |
| spatial_pos = spatial_pos, |
| in_degree = in_degree, |
| out_degree = out_degree, |
| edge_type_matrix = edge_type_matrix, |
| edge_input = edge_input,)[0] |
|
|
| ner_labels = torch.argmax(logits, dim=-1) |
| if len(address) == 1: |
| ner_labels = ner_labels.unsqueeze(0) |
|
|
| output = [] |
| tmp = {1:'省', 2:'市', 3:'区', 4:'街道/镇', 5:'道路', 6:'道路号', 7:'poi', 8:'楼栋号', 9:'单元号', 10:'门牌号'} |
| for i in range(len(address)): |
| ner_label = ner_labels[i] |
| word_id = word_ids[i] |
| idx = torch.where(attention_mask[i]>0) |
| ner_label = ner_label[idx][1:-1] |
| word_id = word_id[idx][1:-1] |
| |
| addr_dict = {} |
| addr_dict = dict.fromkeys(tmp.values(),'无') |
| for j in range(1,11): |
| idx = torch.where(ner_label == j) |
| addr_dict[tmp[j]] = ''.join(tokenizer.decode(word_id[idx]).replace(' ','')) |
|
|
| output.append(deepcopy(addr_dict)) |
| |
| return output |
|
|
| |
| def house_info(self, address): |
| tokenizer = BertTokenizer.from_pretrained('nghuyong/ernie-3.0-base-zh') |
| encoded_input = tokenizer(address, return_tensors='pt', padding='max_length', |
| truncation=True, |
| max_length=60, |
| add_special_tokens=True).to(self.device) |
| word_ids = encoded_input['input_ids'] |
| attention_mask = encoded_input['attention_mask'] |
|
|
| length = len(word_ids) |
| node_position_ids = torch.tensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| spatial_pos = torch.LongTensor(np.zeros((length, 1, 1), dtype=np.int64)).to(self.device) |
| in_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| out_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| edge_type_matrix = torch.LongTensor(8*np.ones((length, 1, 1), dtype=np.int64)).to(self.device) |
| edge_input = torch.LongTensor(8*np.ones((length, 1, 1, 1), dtype=np.int64)).to(self.device) |
|
|
| logits = self.ner_model(**encoded_input, |
| node_position_ids = node_position_ids, |
| spatial_pos = spatial_pos, |
| in_degree = in_degree, |
| out_degree = out_degree, |
| edge_type_matrix = edge_type_matrix, |
| edge_input = edge_input,)[0] |
|
|
| ner_labels = torch.argmax(logits, dim=-1) |
| if len(address) == 1: |
| ner_labels = ner_labels.unsqueeze(0) |
| output = [] |
| for i in range(len(address)): |
| ner_label = ner_labels[i] |
| word_id = word_ids[i] |
| idx = torch.where(attention_mask[i]>0) |
| ner_label = ner_label[idx][1:-1] |
| word_id = word_id[idx][1:-1] |
|
|
| building = [] |
| unit = [] |
| room = [] |
| for j in range(len(ner_label)): |
| if ner_label[j] == 8: |
| building.append(word_id[j]) |
| elif ner_label[j] == 9: |
| unit.append(word_id[j]) |
| elif ner_label[j] == 10: |
| room.append(word_id[j]) |
|
|
| output.append({'楼栋':tokenizer.decode(building).replace(' ',''), '单元':tokenizer.decode(unit).replace(' ',''), |
| '门牌号': tokenizer.decode(room).replace(' ','')}) |
| return output |
| |
|
|
| |
| def addr_complet(self, address): |
| tokenizer = BertTokenizer.from_pretrained('nghuyong/ernie-3.0-base-zh') |
| encoded_input = tokenizer(address, return_tensors='pt', padding='max_length', |
| truncation=True, |
| max_length=60, |
| add_special_tokens=True).to(self.device) |
| word_ids = encoded_input['input_ids'] |
| attention_mask = encoded_input['attention_mask'] |
|
|
| length = len(word_ids) |
| node_position_ids = torch.tensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| spatial_pos = torch.LongTensor(np.zeros((length, 1, 1), dtype=np.int64)).to(self.device) |
| in_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| out_degree = torch.LongTensor(np.ones((length, 1), dtype=np.int64)).to(self.device) |
| edge_type_matrix = torch.LongTensor(8*np.ones((length, 1, 1), dtype=np.int64)).to(self.device) |
| edge_input = torch.LongTensor(8*np.ones((length, 1, 1, 1), dtype=np.int64)).to(self.device) |
|
|
| logits = self.ner_model(**encoded_input, |
| node_position_ids = node_position_ids, |
| spatial_pos = spatial_pos, |
| in_degree = in_degree, |
| out_degree = out_degree, |
| edge_type_matrix = edge_type_matrix, |
| edge_input = edge_input,)[0] |
|
|
| ner_labels = torch.argmax(logits, dim=-1) |
| if len(address) == 1: |
| ner_labels = ner_labels.unsqueeze(0) |
| if isinstance(address, list): |
| address = address[0] |
|
|
| |
| g2ptl_model = AutoModel.from_pretrained('Cainiao-AI/G2PTL', trust_remote_code=True) |
| g2ptl_model.eval() |
| g2ptl_output = g2ptl_model(**encoded_input) |
| htc_layer_out = g2ptl_output.htc_layer_out |
| arr = g2ptl_model.get_htc_code(htc_layer_out) |
| htc_pred = '{:02d}{:02d}{:02d}{:01d}{:02d}'.format(arr[0], arr[1], arr[2], arr[3], arr[4]) |
| with open('remap_code_2_chn_with_all_htc.pkl', 'rb') as fr: |
| remap_code_2_chn = pickle.loads(fr.read()) |
| |
| try: |
| htc_list = remap_code_2_chn[htc_pred][-1] |
| except: |
| return address |
|
|
| |
| if htc_list[0] in ['北京','上海','重庆','天津']: |
| htc_list = htc_list[1:] |
| htc_list.append('') |
| |
| idx = torch.where(attention_mask>0) |
| ner_label = ner_labels[idx][1:-1].cpu().numpy().tolist() |
| word_id = word_ids[idx][1:-1] |
| |
| for i in range(1,5): |
| |
| if i not in ner_label: |
| if i == 1: |
| address = htc_list[0] + address |
| ner_label = [1] * len(htc_list[0]) + ner_label |
| else : |
| |
| idx = 0 |
| for j in range(len(ner_label)): |
| if ner_label[j] > i: |
| idx = j |
| break |
| address = address[:idx] + htc_list[i-1] + address[idx:] |
| ner_label = ner_label[:idx] + [i] * len(htc_list[i-1]) + ner_label[idx:] |
|
|
| return address |
|
|
| |
| def geolocate(self, address): |
| g2ptl_model = AutoModel.from_pretrained('Cainiao-AI/G2PTL', trust_remote_code=True) |
| tokenizer = AutoTokenizer.from_pretrained('Cainiao-AI/G2PTL', trust_remote_code=True) |
| encoded_input = tokenizer(address, return_tensors='pt') |
|
|
| g2ptl_model.eval() |
| output = g2ptl_model(**encoded_input) |
| geo_labels = torch.argmax(output.gc_layer_out, dim=-1) |
| output = [s2_label_dict_remap[int(i)] for i in geo_labels] |
|
|
| return 's2网格化结果:' + ''.join(output) |
|
|
| |
| def pickup_ETA(self, address): |
| print('Users can get the address embeddings using model.encode(address) and feed them to your own ETA model.') |
| |
| |
| def route_predict(self, route_data): |
| print('Users can get the address embeddings using model.encode(address) and feed them to your own Route Prediction model.') |
|
|
| |
| def encode(self, address): |
| tokenizer = AutoTokenizer.from_pretrained('Cainiao-AI/G2PTL', trust_remote_code=True) |
| g2ptl_model = AutoModel.from_pretrained('Cainiao-AI/G2PTL', trust_remote_code=True) |
| encoded_input = tokenizer(address, return_tensors='pt', padding='max_length', |
| truncation=True, |
| max_length=60, |
| add_special_tokens=True) |
| g2ptl_model.eval() |
| output = g2ptl_model(**encoded_input) |
|
|
| return output.final_hidden_state |
|
|
| def _reset_parameters(self): |
| for p in self.parameters(): |
| if p.dim() > 1: |
| xavier_uniform_(p) |
|
|
| def generate_square_subsequent_mask(self, sz): |
| mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) |
| mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) |
| return mask |
|
|
| def save_weights(self, path): |
| torch.save(self.state_dict(), path) |
|
|
| def load_weights(self, path): |
| self.load_state_dict(torch.load(path, map_location=torch.device('cpu')), False) |
|
|
| def set_pretrained_weights(self, path): |
| pre_train_weights = torch.load(path, map_location=torch.device('cpu')) |
| new_weights = dict() |
|
|
| for layer in self.state_dict().keys(): |
| if layer == 'embedding.position_ids': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.position_ids'] |
| elif layer == 'embedding.word_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.word_embeddings.weight'] |
| elif layer == 'embedding.position_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.position_embeddings.weight'] |
| elif layer == 'embedding.token_type_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.token_type_embeddings.weight'] |
| elif layer == 'embedding.task_type_embeddings.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.task_type_embeddings.weight'] |
| elif layer == 'embedding.LayerNorm.weight': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.LayerNorm.weight'] |
| elif layer == 'embedding.LayerNorm.bias': |
| new_weights[layer] = pre_train_weights['ernie_model.embeddings.LayerNorm.bias'] |
| elif 'stellar_model' in layer: |
| new_weights[layer] = pre_train_weights[layer.replace('stellar_model', 'ernie_model')] |
| elif layer in pre_train_weights.keys(): |
| new_weights[layer] = pre_train_weights[layer] |
| else: |
| new_weights[layer] = self.state_dict()[layer] |
|
|
| self.load_state_dict(new_weights) |
|
|