Commit ·
263ce5b
1
Parent(s): a640dac
Upload modeling_flash_llama.py
Browse files- modeling_flash_llama.py +9 -3
modeling_flash_llama.py
CHANGED
|
@@ -290,9 +290,10 @@ class LlamaAttention(nn.Module):
|
|
| 290 |
scaling_type = self.config.rope_scaling["type"]
|
| 291 |
scaling_factor = self.config.rope_scaling["factor"]
|
| 292 |
assert scaling_type == 'linear'
|
| 293 |
-
|
|
|
|
| 294 |
self.rotary_emb = FlashRotaryEmbedding(
|
| 295 |
-
self.head_dim, base=
|
| 296 |
)
|
| 297 |
|
| 298 |
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
|
@@ -362,12 +363,17 @@ class LlamaAttention(nn.Module):
|
|
| 362 |
past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
|
| 363 |
|
| 364 |
# no padding tokens, more efficient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 365 |
attn_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
|
| 366 |
attn_outputs = flash_attn_kvpacked_func(
|
| 367 |
q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
|
| 368 |
|
| 369 |
attn_output = attn_outputs[0] if output_attentions else attn_outputs
|
| 370 |
-
attn_output = attn_output.reshape(bsz, q_len, h_size)
|
| 371 |
attn_weights = attn_outputs[2] if output_attentions else None
|
| 372 |
|
| 373 |
if self.config.pretraining_tp > 1:
|
|
|
|
| 290 |
scaling_type = self.config.rope_scaling["type"]
|
| 291 |
scaling_factor = self.config.rope_scaling["factor"]
|
| 292 |
assert scaling_type == 'linear'
|
| 293 |
+
rotary_base = self.config.__dict__.get("rope_theta", 10000.0)
|
| 294 |
+
|
| 295 |
self.rotary_emb = FlashRotaryEmbedding(
|
| 296 |
+
self.head_dim, base=rotary_base, interleaved=False, scaling_factor=scaling_factor,
|
| 297 |
)
|
| 298 |
|
| 299 |
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
|
|
|
| 363 |
past_key_value = (past_kv, past_len+q.size(1)) if use_cache else None
|
| 364 |
|
| 365 |
# no padding tokens, more efficient
|
| 366 |
+
# the basic problem here is that for qlora, stuff is stored in float32, but attention needs float16 or bfloat16.
|
| 367 |
+
# if you cast just based on torch.cuda.is_bf_16_supported(), it works for training, but it breaks in evals
|
| 368 |
+
# that load the model in fp16 on a gpu where bf16 is supported. so, we cast based on the GPU support, but then
|
| 369 |
+
# cast back to whatever q originally was. hopefully that works!
|
| 370 |
+
orig_dtype = q.dtype
|
| 371 |
attn_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
|
| 372 |
attn_outputs = flash_attn_kvpacked_func(
|
| 373 |
q.type(attn_dtype), kv.type(attn_dtype), dropout_p=0.0, softmax_scale=1.0/self.norm_factor, causal=(not has_layer_past), return_attn_probs=output_attentions)
|
| 374 |
|
| 375 |
attn_output = attn_outputs[0] if output_attentions else attn_outputs
|
| 376 |
+
attn_output = attn_output.reshape(bsz, q_len, h_size).type(orig_dtype)
|
| 377 |
attn_weights = attn_outputs[2] if output_attentions else None
|
| 378 |
|
| 379 |
if self.config.pretraining_tp > 1:
|