| import torch |
| import os |
| from transformers import PreTrainedModel, PretrainedConfig |
| |
|
|
| |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import math |
| import numpy as np |
| from math import sqrt |
| from datetime import timedelta |
|
|
| |
| class PositionalEmbedding(nn.Module): |
| def __init__(self, d_model, max_len=5000): |
| super(PositionalEmbedding, self).__init__() |
| pos_emb = torch.zeros(max_len, d_model).float() |
| pos_emb.require_grad = False |
| position = torch.arange(0, max_len).float().unsqueeze(1) |
| div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp() |
| pos_emb[:, 0::2] = torch.sin(position * div_term) |
| pos_emb[:, 1::2] = torch.cos(position * div_term) |
| pos_emb = pos_emb.unsqueeze(0) |
| self.register_buffer('pos_emb', pos_emb) |
| def forward(self, x): |
| return self.pos_emb[:, :x.size(1)] |
|
|
| class TokenEmbedding(nn.Module): |
| def __init__(self, d_model): |
| super(TokenEmbedding, self).__init__() |
| D_INP = 1 |
| self.conv = nn.Conv1d(in_channels=D_INP, out_channels=d_model, kernel_size=3, padding=1, padding_mode='circular') |
| def forward(self, x): |
| x = self.conv(x.transpose(-1, 1)).transpose(-1, 1) |
| return x |
|
|
| class TemporalEmbedding(nn.Module): |
| def __init__(self, d_model, num_features): |
| super(TemporalEmbedding, self).__init__() |
| self.embed = nn.Linear(num_features, d_model) |
| def forward(self, x): |
| x = x.float() |
| return self.embed(x) |
|
|
| class SubjectEmbedding(nn.Module): |
| def __init__(self, d_model): |
| super(SubjectEmbedding, self).__init__() |
| self.id_embedding = nn.Linear(1, d_model) |
| def forward(self, x): |
| x = x.float().unsqueeze(1) |
| embed_x = self.id_embedding(x) |
| return embed_x |
|
|
| class DataEmbedding(nn.Module): |
| def __init__(self, d_model, r_drop, num_features): |
| super(DataEmbedding, self).__init__() |
| self.value_embedding = TokenEmbedding(d_model) |
| self.time_embedding = TemporalEmbedding(d_model, num_features) |
| self.positional_embedding = PositionalEmbedding(d_model) |
| self.subject_embedding = SubjectEmbedding(d_model) |
| self.dropout = nn.Dropout(r_drop) |
| def forward(self, x_id, x, x_mark): |
| x = self.value_embedding(x) + self.positional_embedding(x) + self.time_embedding(x_mark) |
| x = torch.cat((self.subject_embedding(x_id).unsqueeze(1), x), dim=1) |
| return self.dropout(x) |
|
|
| |
| class CausalConv1d(torch.nn.Conv1d): |
| def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): |
| self.__padding = (kernel_size - 1) * dilation |
| super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias) |
| def forward(self, input): |
| result = super(CausalConv1d, self).forward(input) |
| if self.__padding != 0: |
| return result[:, :, :-self.__padding] |
| return result |
|
|
| class TriangularCausalMask(): |
| def __init__(self, b, n, device="cpu"): |
| mask_shape = [b, 1, n, n] |
| with torch.no_grad(): |
| self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) |
| @property |
| def mask(self): |
| return self._mask |
|
|
| class MultiheadAttention(nn.Module): |
| def __init__(self, d_model, n_heads, d_keys, mask_flag, r_att_drop=0.1): |
| super(MultiheadAttention, self).__init__() |
| self.h, self.d, self.mask_flag = n_heads, d_keys, mask_flag |
| self.proj_q = nn.Linear(d_model, self.h * self.d) |
| self.proj_k = nn.Linear(d_model, self.h * self.d) |
| self.proj_v = nn.Linear(d_model, self.h * self.d) |
| self.proj_out = nn.Linear(self.h * self.d, d_model) |
| self.dropout = nn.Dropout(r_att_drop) |
| def forward(self, q, k, v): |
| b, n_q, n_k, h, d = q.size(0), q.size(1), k.size(1), self.h, self.d |
| q, k, v = self.proj_q(q), self.proj_k(k), self.proj_v(v) |
| q, k, v = map(lambda x: x.reshape(b, -1, h, d), [q, k, v]) |
| scores = torch.einsum('bnhd,bmhd->bhnm', (q, k)) |
| if self.mask_flag: |
| att_mask = TriangularCausalMask(b, n_q, device=q.device) |
| scores.masked_fill_(att_mask.mask, -np.inf) |
| att = F.softmax(scores / (self.d ** .5), dim=-1) |
| att = self.dropout(att) |
| att_out = torch.einsum('bhnm,bmhd->bnhd', (att, v)) |
| att_out = att_out.reshape(b, -1, h * d) |
| out = self.proj_out(att_out) |
| return out |
|
|
| |
| class ConvLayer(nn.Module): |
| def __init__(self, d_model): |
| super(ConvLayer, self).__init__() |
| self.downConv = nn.Conv1d(in_channels=d_model, out_channels=d_model, kernel_size=3, padding=1, padding_mode='circular') |
| self.norm = nn.BatchNorm1d(d_model) |
| self.activ = nn.ELU() |
| self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) |
| def forward(self, x): |
| x = self.downConv(x.transpose(-1, 1)) |
| x = self.norm(x) |
| x = self.activ(x) |
| x = self.maxPool(x) |
| x = x.transpose(-1, 1) |
| return x |
|
|
| class EncoderLayer(nn.Module): |
| def __init__(self, att, d_model, d_fcn, r_drop, activ="relu"): |
| super(EncoderLayer, self).__init__() |
| self.att = att |
| self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_fcn, kernel_size=1) |
| self.conv2 = nn.Conv1d(in_channels=d_fcn, out_channels=d_model, kernel_size=1) |
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.dropout = nn.Dropout(r_drop) |
| self.activ = F.relu if activ == "relu" else F.gelu |
| def forward(self, x): |
| new_x = self.att(x, x, x) |
| x = x + self.dropout(new_x) |
| res = x = self.norm1(x) |
| res = self.dropout(self.activ(self.conv1(res.transpose(-1, 1)))) |
| res = self.dropout(self.conv2(res).transpose(-1, 1)) |
| return self.norm2(x + res) |
|
|
| class Encoder(nn.Module): |
| def __init__(self, enc_layers, conv_layers=None, norm_layer=None): |
| super(Encoder, self).__init__() |
| self.enc_layers = nn.ModuleList(enc_layers) |
| self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None |
| self.norm = norm_layer |
| def forward(self, x): |
| if self.conv_layers is not None: |
| for enc_layer, conv_layer in zip(self.enc_layers, self.conv_layers): |
| x = enc_layer(x) |
| x = conv_layer(x) |
| x = self.enc_layers[-1](x) |
| else: |
| for enc_layer in self.enc_layers: |
| x = enc_layer(x) |
| if self.norm is not None: |
| x = self.norm(x) |
| return x |
|
|
| |
| class DecoderLayer(nn.Module): |
| def __init__(self, self_att, cross_att, d_model, d_fcn, r_drop, activ="relu"): |
| super(DecoderLayer, self).__init__() |
| self.self_att = self_att |
| self.cross_att = cross_att |
| self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_fcn, kernel_size=1) |
| self.conv2 = nn.Conv1d(in_channels=d_fcn, out_channels=d_model, kernel_size=1) |
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.norm3 = nn.LayerNorm(d_model) |
| self.dropout = nn.Dropout(r_drop) |
| self.activ = F.relu if activ == "relu" else F.gelu |
| def forward(self, x_dec, x_enc): |
| x_dec = x_dec + self.self_att(x_dec, x_dec, x_dec) |
| x_dec = self.norm1(x_dec) |
| x_dec = x_dec + self.cross_att(x_dec, x_enc, x_enc) |
| res = x_dec = self.norm2(x_dec) |
| res = self.dropout(self.activ(self.conv1(res.transpose(-1, 1)))) |
| res = self.dropout(self.conv2(res).transpose(-1, 1)) |
| return self.norm3(x_dec + res) |
|
|
| class Decoder(nn.Module): |
| def __init__(self, layers, norm_layer=None): |
| super(Decoder, self).__init__() |
| self.layers = nn.ModuleList(layers) |
| self.norm = norm_layer |
| def forward(self, x_dec, x_enc): |
| for layer in self.layers: |
| x_dec = layer(x_dec, x_enc) |
| if self.norm is not None: |
| x_dec = self.norm(x_dec) |
| return x_dec |
|
|
| |
| class Variance(nn.Module): |
| def __init__(self, d_model, r_drop, len_seq): |
| super(Variance, self).__init__() |
| self.proj1 = nn.Linear(d_model, 1) |
| self.dropout = nn.Dropout(r_drop) |
| self.activ1 = nn.ReLU() |
| self.proj2 = nn.Linear(len_seq + 1, 1) |
| self.activ2 = nn.Tanh() |
| def forward(self, x): |
| x = self.proj1(x) |
| x = self.activ1(x) |
| x = self.dropout(x) |
| x = x.transpose(-1, 1) |
| x = self.proj2(x) |
| x = 10 * self.activ2(x) |
| return x |
|
|
| |
| class Gluformer(nn.Module): |
| def __init__(self, d_model, n_heads, d_fcn, r_drop, activ, num_enc_layers, num_dec_layers, distil, len_seq, len_pred, num_features=5): |
| super(Gluformer, self).__init__() |
| self.len_pred = len_pred |
| self.enc_embedding = DataEmbedding(d_model, r_drop, num_features) |
| self.dec_embedding = DataEmbedding(d_model, r_drop, num_features) |
| self.encoder = Encoder( |
| [ |
| EncoderLayer( |
| att=MultiheadAttention(d_model=d_model, n_heads=n_heads, d_keys=d_model // n_heads, mask_flag=False, r_att_drop=r_drop), |
| d_model=d_model, |
| d_fcn=d_fcn, |
| r_drop=r_drop, |
| activ=activ) for l in range(num_enc_layers) |
| ], |
| [ |
| ConvLayer(d_model) for l in range(num_enc_layers - 1) |
| ] if distil else None, |
| norm_layer=torch.nn.LayerNorm(d_model) |
| ) |
| self.decoder = Decoder( |
| [ |
| DecoderLayer( |
| self_att=MultiheadAttention(d_model=d_model, n_heads=n_heads, d_keys=d_model // n_heads, mask_flag=True, r_att_drop=r_drop), |
| cross_att=MultiheadAttention(d_model=d_model, n_heads=n_heads, d_keys=d_model // n_heads, mask_flag=False, r_att_drop=r_drop), |
| d_model=d_model, |
| d_fcn=d_fcn, |
| r_drop=r_drop, |
| activ=activ) for l in range(num_dec_layers) |
| ], |
| norm_layer=torch.nn.LayerNorm(d_model) |
| ) |
| D_OUT = 1 |
| self.projection = nn.Linear(d_model, D_OUT, bias=True) |
| self.var = Variance(d_model, r_drop, len_seq) |
|
|
| def forward(self, x_id, x_enc, x_mark_enc, x_dec, x_mark_dec): |
| enc_out = self.enc_embedding(x_id, x_enc, x_mark_enc) |
| var_out = self.var(enc_out) |
| enc_out = self.encoder(enc_out) |
| dec_out = self.dec_embedding(x_id, x_dec, x_mark_dec) |
| dec_out = self.decoder(dec_out, enc_out) |
| dec_out = self.projection(dec_out) |
| return dec_out[:, -self.len_pred:, :], var_out |
|
|
| class GluformerConfig(PretrainedConfig): |
| model_type = "gluformer" |
| def __init__(self, d_model=64, n_heads=4, d_fcn=128, r_drop=0.1, activ="relu", num_enc_layers=2, num_dec_layers=2, distil=False, len_seq=48, len_pred=12, num_features=5, **kwargs): |
| super().__init__(**kwargs) |
| self.d_model = d_model |
| self.n_heads = n_heads |
| self.d_fcn = d_fcn |
| self.r_drop = r_drop |
| self.activ = activ |
| self.num_enc_layers = num_enc_layers |
| self.num_dec_layers = num_dec_layers |
| self.distil = distil |
| self.len_seq = len_seq |
| self.len_pred = len_pred |
| self.num_features = num_features |
|
|
| |
| |
| |
| |
| |
| class Preprocessor: |
| UPPER = 402 |
| LOWER = 38 |
| SCALE_1 = 5 |
| SCALE_2 = 2 |
| def __init__(self, len_seq, len_pred, len_label): |
| self.len_seq = len_seq |
| self.len_pred = len_pred |
| self.len_label = len_label |
|
|
| def normalize_glucose(self, glucose): |
| return (glucose - self.LOWER) / (self.UPPER - self.LOWER) * (self.SCALE_1 * self.SCALE_2) - self.SCALE_1 |
|
|
| def unnormalize_glucose(self, glucose): |
| return (glucose + self.SCALE_1) / (self.SCALE_1 * self.SCALE_2) * (self.UPPER - self.LOWER) + self.LOWER |
|
|
| def normalize_datetime(self, ts: np.ndarray) -> np.ndarray: |
| ts = np.asarray(ts, dtype="datetime64[ns]") |
| d, y, m, h = ts.astype("datetime64[D]"), ts.astype("datetime64[Y]"), ts.astype("datetime64[M]"), ts.astype("datetime64[h]") |
| return np.stack(( |
| ((d - y).astype("timedelta64[D]").astype(np.int64) + 1) / 182.5 - 1.0, |
| ((d - m).astype("timedelta64[D]").astype(np.int64) + 1) / 15.5 - 1.0, |
| ((d.astype(np.int64) + 3) % 7) / 3.5 - 1.0, |
| ((h - d).astype("timedelta64[h]").astype(np.int64)) / 12.0 - 1.0, |
| ((ts.astype("datetime64[m]") - h).astype("timedelta64[m]").astype(np.int64)) / 30.0 - 1.0, |
| ), axis=-1).astype(float) |
|
|
| def __call__(self, subject_id, timestamps, glucose_values): |
| batch_size, seq_len = glucose_values.shape |
| subject_id = torch.full((batch_size,), subject_id, dtype=torch.float) |
| glucose_values = torch.tensor(glucose_values).reshape(-1, self.len_seq, 1).float() |
| glucose_values = self.normalize_glucose(glucose_values) |
| ts = np.asarray(timestamps, dtype=np.int64).reshape(batch_size, -1) |
|
|
| |
| |
| |
| nanos_per_interval = np.int64(5 * 60 * 1e9) |
| ts_deltas = np.arange(1, self.len_pred + 1, dtype=np.int64) * nanos_per_interval |
| ts_deltas = ts_deltas.reshape(1, -1).repeat(batch_size, axis=0) |
| y_timestamps = np.concatenate([ts[:, -self.len_label:], ts[:, -1:] + ts_deltas], axis=1) |
| decoder_input = torch.cat([glucose_values[:,-self.len_label:,:], torch.zeros(batch_size, self.len_pred, 1).float()], dim=1) |
|
|
| x_ts = torch.tensor(self.normalize_datetime(ts)).float() |
| y_ts = torch.tensor(self.normalize_datetime(y_timestamps)).float() |
| return subject_id, glucose_values, decoder_input, x_ts, y_ts |
|
|
| class GluformerForTimeSeries(PreTrainedModel): |
| config_class = GluformerConfig |
| base_model_prefix = "gluformer" |
|
|
| def __init__(self, config: GluformerConfig): |
| super().__init__(config) |
| self.model = Gluformer( |
| d_model=config.d_model, |
| n_heads=config.n_heads, |
| d_fcn=config.d_fcn, |
| r_drop=config.r_drop, |
| activ=config.activ, |
| num_enc_layers=config.num_enc_layers, |
| num_dec_layers=config.num_dec_layers, |
| distil=config.distil, |
| len_seq=config.len_seq, |
| len_pred=config.len_pred, |
| num_features=config.num_features |
| ) |
| self.preprocessor = Preprocessor(config.len_seq, config.len_pred, config.len_label) |
|
|
| def forward(self, subject_id, timestamps, glucose_values): |
| if len(glucose_values.shape) == 1: |
| subject_id = subject_id.unsqueeze(0) |
| timestamps = timestamps.unsqueeze(0) |
| glucose_values = glucose_values.unsqueeze(0) |
| x_id, x_enc, x_dec, x_mark_enc, y_mark_dec = self.preprocessor(subject_id, timestamps, glucose_values) |
| if self.device is not None: |
| x_id = x_id.to(self.device) |
| x_enc = x_enc.to(self.device) |
| x_dec = x_dec.to(self.device) |
| x_mark_enc = x_mark_enc.to(self.device) |
| y_mark_dec = y_mark_dec.to(self.device) |
| self.model.to(self.device) |
| output, log_var = self.model(x_id, x_enc, x_mark_enc, x_dec, y_mark_dec) |
| return self.preprocessor.unnormalize_glucose(output).cpu(), log_var.cpu() |
|
|