Tom Aarsen commited on
Commit
691a66c
·
1 Parent(s): c4c1b0e

Simplify changes further, revert transformers changes

Browse files

But the transformers path now works with eager/sdpa too

Files changed (5) hide show
  1. README.md +47 -0
  2. config.json +2 -1
  3. modeling_splade.py +0 -28
  4. splade.py +130 -0
  5. utils.py +128 -0
README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: cc-by-nc-sa-4.0
3
  tags:
4
  - sentence-transformers
 
5
  - splade
6
  - sparse-encoder
7
  - code
@@ -51,3 +52,49 @@ print(decoded)
51
  # ("ĠGroup", 2.1875),
52
  # ]]
53
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  license: cc-by-nc-sa-4.0
3
  tags:
4
  - sentence-transformers
5
+ - transformers
6
  - splade
7
  - sparse-encoder
8
  - code
 
52
  # ("ĠGroup", 2.1875),
53
  # ]]
54
  ```
55
+
56
+ ### Using Transformers
57
+
58
+ ```bash
59
+ pip install transformers
60
+ ```
61
+
62
+ ```python
63
+ from transformers import AutoModelForCausalLM, AutoModel
64
+ import os
65
+ import torch
66
+
67
+ splade = AutoModelForCausalLM.from_pretrained("naver/splade-code-06B", trust_remote_code=True)
68
+ device = (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"))
69
+ splade.to(device)
70
+ splade.eval()
71
+ queries = ["SELECT *\nFROM Student\nWHERE Age = (\nSELECT MAX(Age)\nFROM Student\nWHERE Group = 'specific_group'\n)\nAND Group = 'specific_group';"]
72
+ bow_dict = splade.encode(queries, prompt_type="query", top_k_q=10, return_dict=True, print_dict=True)
73
+ ```
74
+
75
+ ```
76
+ +--------------------------------------------------------------------+
77
+ | TOP ACTIVATED WORDS |
78
+ +--------------------------------------------------------------------+
79
+
80
+
81
+ * INPUT: SELECT *
82
+ FROM Student
83
+ WHERE Age = (
84
+ SELECT MAX(Age)
85
+ FROM Student
86
+ WHERE Group = 'specific_group'
87
+ )
88
+ AND Group = 'specific_group';
89
+
90
+ Ġgroup | ████████████████████ 2.34
91
+ Ġage | ███████████████████ 2.33
92
+ ĠAge | ███████████████████ 2.33
93
+ _group | ███████████████████ 2.30
94
+ ĠStudent | ███████████████████ 2.30
95
+ Ġspecific | ███████████████████ 2.28
96
+ Ġmax | ██████████████████ 2.22
97
+ ĠMax | ██████████████████ 2.22
98
+ Ġstudent | ██████████████████ 2.20
99
+ ĠGroup | ██████████████████ 2.19
100
+ ```
config.json CHANGED
@@ -29,6 +29,7 @@
29
  "use_sliding_window": false,
30
  "vocab_size": 151936,
31
  "auto_map": {
32
- "AutoModelForMaskedLM": "modeling_splade.Qwen3ForCausalLM"
 
33
  }
34
  }
 
29
  "use_sliding_window": false,
30
  "vocab_size": 151936,
31
  "auto_map": {
32
+ "AutoModelForMaskedLM": "splade.Qwen3ForCausalLM",
33
+ "AutoModelForCausalLM": "splade.Splade"
34
  }
35
  }
modeling_splade.py DELETED
@@ -1,28 +0,0 @@
1
- """
2
- This file exists solely to allow loading the Qwen3ForCausalLM via the AutoModelForMaskedLM class.
3
- Compared to standard Qwen3, we're using bidirectional attention and not causal attention, but it's specified
4
- with `is_causal=False` in the config.
5
- """
6
-
7
- from transformers import Qwen3ForCausalLM as _Qwen3ForCausalLM
8
-
9
-
10
- class Qwen3ForCausalLM(_Qwen3ForCausalLM):
11
- def tie_weights(self, *args, **kwargs):
12
- """Explicitly re-tie lm_head to embed_tokens to hopefully avoid meta tensor errors."""
13
- super().tie_weights(*args, **kwargs)
14
- if (
15
- self.config.tie_word_embeddings
16
- and hasattr(self, "lm_head")
17
- and hasattr(self, "model")
18
- ):
19
- self.lm_head.weight = self.model.embed_tokens.weight
20
-
21
- def _init_weights(self, module):
22
- """Skip lm_head init when it will be tied to embed_tokens later."""
23
- if module is getattr(self, "lm_head", None) and self.config.tie_word_embeddings:
24
- return
25
- super()._init_weights(module)
26
-
27
-
28
- __all__ = ["Qwen3ForCausalLM"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
splade.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Compared to standard Qwen3, we're using bidirectional attention and not causal attention, but it's specified
3
+ with `is_causal=False` in the config.
4
+
5
+ This file supports two loading paths:
6
+ 1. Sentence Transformers: `SparseEncoder("naver/splade-code-06B", trust_remote_code=True)` via AutoModelForMaskedLM -> Qwen3ForCausalLM
7
+ 2. Transformers: `AutoModelForCausalLM.from_pretrained("naver/splade-code-06B", trust_remote_code=True)` -> Splade
8
+ """
9
+
10
+ import torch
11
+ import os
12
+ from transformers import Qwen3ForCausalLM as TransformersQwen3ForCausalLM
13
+ from transformers import PretrainedConfig, PreTrainedModel, AutoConfig
14
+ from transformers.utils import is_flash_attn_2_available
15
+ from .utils import prepare_tokenizer, splade_max, similarity, encode
16
+
17
+
18
+ class Qwen3ForCausalLM(TransformersQwen3ForCausalLM):
19
+ def tie_weights(self, *args, **kwargs):
20
+ """Explicitly re-tie lm_head to embed_tokens to hopefully avoid meta tensor errors."""
21
+ if (
22
+ self.config.tie_word_embeddings
23
+ and hasattr(self, "lm_head")
24
+ and hasattr(self, "model")
25
+ ):
26
+ self.lm_head.weight = self.model.embed_tokens.weight
27
+ missing_keys = kwargs.get("missing_keys")
28
+ if missing_keys is not None:
29
+ missing_keys.discard("lm_head.weight")
30
+ else:
31
+ super().tie_weights(*args, **kwargs)
32
+
33
+ def _init_weights(self, module):
34
+ """Skip lm_head init when it will be tied to embed_tokens later."""
35
+ if module is getattr(self, "lm_head", None) and self.config.tie_word_embeddings:
36
+ return
37
+ super()._init_weights(module)
38
+
39
+
40
+ class SpladeConfig(PretrainedConfig):
41
+ model_type = "qwen3"
42
+
43
+ def __init__(
44
+ self,
45
+ model_name_or_path: str = "Qwen/Qwen3-0.6B",
46
+ attn_implementation: str = "flash_attention_2",
47
+ bidirectional: bool = True, # only for decoder models
48
+ padding_side: str = "left",
49
+ **kwargs,
50
+ ):
51
+ super().__init__(**kwargs)
52
+ self.model_name_or_path = model_name_or_path
53
+ self.attn_implementation = attn_implementation
54
+ self.bidirectional = bidirectional
55
+ self.padding_side = padding_side
56
+
57
+
58
+ class Splade(PreTrainedModel):
59
+ config_class = SpladeConfig
60
+
61
+ # methods for MTEB's interface
62
+ similarity = similarity
63
+ encode = encode
64
+
65
+ def __init__(self, config, weights_path=None, token=None):
66
+ super().__init__(config)
67
+ self.name = "splade"
68
+
69
+ base_cfg = AutoConfig.from_pretrained(
70
+ weights_path,
71
+ attn_implementation=config.attn_implementation,
72
+ torch_dtype="auto",
73
+ )
74
+
75
+ self.tokenizer = prepare_tokenizer(
76
+ weights_path, padding_side=config.padding_side
77
+ )
78
+
79
+ if is_flash_attn_2_available():
80
+ config.attn_implementation = "flash_attention_2"
81
+ else:
82
+ config.attn_implementation = "sdpa"
83
+
84
+ self.model = Qwen3ForCausalLM.from_pretrained(
85
+ weights_path,
86
+ config=base_cfg,
87
+ torch_dtype=torch.bfloat16,
88
+ attn_implementation=config.attn_implementation,
89
+ token=token,
90
+ )
91
+
92
+ def save_pretrained(self, save_directory, *args, **kwargs):
93
+ self.model.save_pretrained(os.path.join(save_directory, "lora"))
94
+ self.config.save_pretrained(save_directory)
95
+
96
+ @classmethod
97
+ def from_pretrained(cls, model_name_or_path, *args, **kwargs):
98
+ token = kwargs.get("token", None)
99
+
100
+ config = SpladeConfig.from_pretrained(
101
+ model_name_or_path,
102
+ token=token,
103
+ )
104
+
105
+ model = cls(config, weights_path=model_name_or_path, token=token)
106
+
107
+ model.reverse_voc = {v: k for k, v in model.tokenizer.vocab.items()}
108
+ return model
109
+
110
+ def forward(self, **tokens):
111
+ output = self.model(**tokens)
112
+ splade_reps, _ = splade_max(output.logits, tokens["attention_mask"])
113
+ return (splade_reps,)
114
+
115
+ def get_width(self):
116
+ return self.model.config.vocab_size
117
+
118
+ def create_batch_dict(self, input_texts, max_length):
119
+ return self.tokenizer(
120
+ input_texts,
121
+ add_special_tokens=True,
122
+ padding="longest",
123
+ truncation=True,
124
+ max_length=max_length,
125
+ return_attention_mask=True,
126
+ return_tensors="pt",
127
+ )
128
+
129
+
130
+ __all__ = ["Qwen3ForCausalLM", "Splade"]
utils.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from typing import Any
5
+ from transformers import AutoTokenizer
6
+
7
+
8
+ def splade_max(features, attention_mask):
9
+ """
10
+ SPLADE pooling operation
11
+ """
12
+ relu = torch.nn.ReLU(inplace=False)
13
+ values, ids_ = torch.max(
14
+ torch.log(1 + relu(features)) * attention_mask.unsqueeze(-1), dim=1
15
+ )
16
+ return values, ids_
17
+
18
+
19
+ def encode(
20
+ self,
21
+ sentences: list[str],
22
+ max_length: int = 1024,
23
+ prompt_type: str = "document",
24
+ return_dict: bool = False,
25
+ print_dict: bool = False,
26
+ batch_size: int = 8,
27
+ top_k_q: int = -1,
28
+ top_k_d: int = -1,
29
+ **kwargs: Any,
30
+ ) -> np.ndarray:
31
+ all_embeddings = []
32
+ for i in range(0, len(sentences), batch_size):
33
+ batch_texts = sentences[i : i + batch_size]
34
+ batch_dict = self.create_batch_dict(batch_texts, max_length)
35
+ batch_dict = {
36
+ key: value.to(self.model.device) for key, value in batch_dict.items()
37
+ }
38
+ with torch.no_grad():
39
+ splare_reps = self(**batch_dict)[0]
40
+ if prompt_type == "query" and top_k_q > 0:
41
+ splare_reps = top_k(splare_reps, top_k_q)
42
+ if prompt_type == "document" and top_k_d > 0:
43
+ splare_reps = top_k(splare_reps, top_k_d)
44
+ all_embeddings.append(splare_reps.cpu().float().numpy())
45
+ if return_dict:
46
+ d = bow_dict(self, np.concatenate(all_embeddings, axis=0))
47
+ if print_dict:
48
+ print_bow_bars(sentences, d)
49
+ return d
50
+ else:
51
+ return np.concatenate(all_embeddings, axis=0)
52
+
53
+
54
+ def bow_dict(self, embeddings):
55
+ out = []
56
+ for vector in embeddings:
57
+ idx = np.nonzero(vector)[0]
58
+ weights = vector[idx]
59
+ d = {k: v for k, v in zip(idx.tolist(), weights.tolist())}
60
+ sorted_d = {
61
+ self.reverse_voc[k]: float(v)
62
+ for k, v in sorted(d.items(), key=lambda item: item[1], reverse=True)
63
+ }
64
+ out.append(sorted_d)
65
+ return out
66
+
67
+
68
+ def print_bow_bars(sentences, bow_list, width=20):
69
+ ascii_header("TOP ACTIVATED WORDS")
70
+ for sent, bow in zip(sentences, bow_list):
71
+ print(f"* INPUT: {sent}\n")
72
+ max_w = max(bow.values())
73
+ for k, v in sorted(bow.items(), key=lambda x: x[1], reverse=True):
74
+ bar = "█" * int(v / max_w * width)
75
+ print(f"{k[:25]:25} | {bar} {v:.2f}")
76
+ print("\n")
77
+
78
+
79
+ def ascii_header(title, width=70):
80
+ title = f" {title} "
81
+ print("+" + "-" * (width - 2) + "+")
82
+ print("|" + title.center(width - 2) + "|")
83
+ print("+" + "-" * (width - 2) + "+")
84
+ print("\n")
85
+
86
+
87
+ def similarity(self, a, b) -> torch.Tensor:
88
+ """
89
+ MTEB eval requires this
90
+ """
91
+ if not isinstance(a, torch.Tensor):
92
+ a = torch.tensor(a)
93
+ if not isinstance(b, torch.Tensor):
94
+ b = torch.tensor(b)
95
+
96
+ def _dot_score_core(a_tensor, b_tensor):
97
+ if len(a_tensor.shape) == 1:
98
+ a_tensor = a_tensor.unsqueeze(0)
99
+ if len(b_tensor.shape) == 1:
100
+ b_tensor = b_tensor.unsqueeze(0)
101
+ return a_tensor @ b_tensor.transpose(0, 1)
102
+
103
+ return _dot_score_core(a, b)
104
+
105
+
106
+ def prepare_tokenizer(tokenizer_name: str, padding_side="right"):
107
+ """
108
+ loads and prepares tokenizer
109
+ """
110
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
111
+ tokenizer.pad_token = (
112
+ tokenizer.bos_token or tokenizer.pad_token or tokenizer.eos_token
113
+ )
114
+ tokenizer.padding_side = padding_side
115
+ return tokenizer
116
+
117
+
118
+ def top_k(x: torch.Tensor, k: int) -> torch.Tensor:
119
+ """
120
+ zeroes out all but the top-k values in the last dimension of x
121
+ """
122
+ _, topk_indices = x.topk(k, dim=-1)
123
+ # create a zero tensor of the same shape as x
124
+ mask = torch.zeros_like(x, dtype=torch.bool)
125
+ # use scatter along the last dimension
126
+ mask.scatter_(-1, topk_indices, True)
127
+ # zero out all but the top-k
128
+ return x * mask