Upload folder using huggingface_hub
Browse files- README.md +156 -3
- __init__.py +3 -0
- chat_template.jinja +118 -0
- config.json +479 -0
- configuration_nle.py +147 -0
- feature_extraction_nle.py +130 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +952 -0
- modeling_conformer.py +159 -0
- modeling_ctc.py +92 -0
- modeling_nle.py +228 -0
- modeling_projector.py +143 -0
- preprocessor_config.json +8 -0
- rtf_wer.png +0 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.py +30 -0
- tokenizer_config.json +783 -0
- vocab.json +0 -0
README.md
CHANGED
|
@@ -1,3 +1,156 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
- fr
|
| 6 |
+
- de
|
| 7 |
+
- es
|
| 8 |
+
- pt
|
| 9 |
+
base_model:
|
| 10 |
+
- ibm-granite/granite-4.0-1b-base
|
| 11 |
+
library_name: transformers
|
| 12 |
+
tags:
|
| 13 |
+
- speech
|
| 14 |
+
- asr
|
| 15 |
+
- non-autoregressive
|
| 16 |
+
- ctc
|
| 17 |
+
---
|
| 18 |
+
# Granite-4.0-1b-speech-nar
|
| 19 |
+
|
| 20 |
+
**Model Summary:**
|
| 21 |
+
Granite-4.0-1b-speech-nar is a non-autoregressive (NAR) speech recognition model that formulates ASR as conditional transcript editing.
|
| 22 |
+
Instead of decoding tokens one at a time, it edits a CTC hypothesis in a single forward pass using a bidirectional LLM, achieving competitive accuracy with dramatically faster inference than autoregressive alternatives.
|
| 23 |
+
|
| 24 |
+
The model is based on the **NLE** (Non-autoregressive LLM-based Editing) architecture described in our [paper](https://arxiv.org/abs/2603.08397).
|
| 25 |
+
This release corresponds to the **NLE++** configuration with enhanced training.
|
| 26 |
+
|
| 27 |
+
Key highlights:
|
| 28 |
+
* **27x faster** than autoregressive decoding in single-utterance inference (RTFx 310 vs 12)
|
| 29 |
+
* **4x faster** in batched inference (RTFx 1630 vs 430)
|
| 30 |
+
* On the **Pareto frontier** of the [Open ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) in the WER-RTFx tradeoff
|
| 31 |
+
* **Multilingual**: supports English, French, German, Spanish, and Portuguese
|
| 32 |
+
* Only **280M trainable parameters** (160M projector + 120M LoRA) on top of a frozen CTC encoder and a 1B LLM
|
| 33 |
+
|
| 34 |
+
**Evaluations:**
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
|
| 38 |
+
| Metric | Value |
|
| 39 |
+
|--------|-------|
|
| 40 |
+
| Open ASR Average WER | 5.67% |
|
| 41 |
+
| All-19 Average WER | 6.44% |
|
| 42 |
+
| RTFx (batch size 96) | 1630 |
|
| 43 |
+
| RTFx (batch size 1) | 310 |
|
| 44 |
+
|
| 45 |
+
All RTFx measurements are from offline inference on a single H100 GPU with bf16 precision.
|
| 46 |
+
|
| 47 |
+
**Release Date**: March 2026
|
| 48 |
+
|
| 49 |
+
**License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
| 50 |
+
|
| 51 |
+
**Supported Languages:**
|
| 52 |
+
English, French, German, Spanish, Portuguese
|
| 53 |
+
|
| 54 |
+
**Intended Use:**
|
| 55 |
+
The model is intended for automatic speech recognition tasks, particularly in latency-sensitive applications where fast inference is critical.
|
| 56 |
+
It supports multilingual speech-to-text for English, French, German, Spanish, and Portuguese.
|
| 57 |
+
|
| 58 |
+
## Usage
|
| 59 |
+
|
| 60 |
+
### Installation
|
| 61 |
+
|
| 62 |
+
```shell
|
| 63 |
+
pip install transformers torchaudio soundfile
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
### Inference with `transformers`
|
| 67 |
+
|
| 68 |
+
```python
|
| 69 |
+
import torch
|
| 70 |
+
import torchaudio
|
| 71 |
+
from transformers import AutoModel, AutoFeatureExtractor
|
| 72 |
+
|
| 73 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 74 |
+
|
| 75 |
+
model_name = "ibm-granite/granite-4.0-1b-speech-nar"
|
| 76 |
+
model = AutoModel.from_pretrained(model_name, trust_remote_code=True).eval().to(device)
|
| 77 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name, trust_remote_code=True)
|
| 78 |
+
|
| 79 |
+
# Load audio (16kHz mono)
|
| 80 |
+
audio_path = "your_audio.wav"
|
| 81 |
+
waveform, sr = torchaudio.load(audio_path)
|
| 82 |
+
if sr != 16000:
|
| 83 |
+
waveform = torchaudio.functional.resample(waveform, sr, 16000)
|
| 84 |
+
if waveform.shape[0] > 1:
|
| 85 |
+
waveform = waveform.mean(dim=0, keepdim=True)
|
| 86 |
+
waveform = waveform.squeeze(0)
|
| 87 |
+
|
| 88 |
+
# Extract features and run inference
|
| 89 |
+
inputs = feature_extractor([waveform], device=device)
|
| 90 |
+
output = model.generate(**inputs)
|
| 91 |
+
|
| 92 |
+
print(f"CTC hypothesis: {output.text_ctc_preds[0]}")
|
| 93 |
+
print(f"NLE prediction: {output.text_preds[0]}")
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
The model produces two outputs:
|
| 97 |
+
- `text_ctc_preds`: the initial CTC encoder hypothesis (fast but less accurate)
|
| 98 |
+
- `text_preds`: the NLE-edited transcript (refined by the bidirectional LLM)
|
| 99 |
+
|
| 100 |
+
## Model Architecture
|
| 101 |
+
|
| 102 |
+
The architecture consists of three components:
|
| 103 |
+
|
| 104 |
+
**(1) Frozen CTC Speech Encoder (440M params)**
|
| 105 |
+
|
| 106 |
+
A 16-layer Conformer encoder trained with CTC on character-level targets. It processes 16kHz audio with stacked log-mel features (80 mel bins, 2-frame stacking) and uses block attention with 4-second audio blocks and self-conditioning at layer 8.
|
| 107 |
+
|
| 108 |
+
| Parameter | Value |
|
| 109 |
+
|-----------|-------|
|
| 110 |
+
| Input dimension | 160 (80 logmels x 2) |
|
| 111 |
+
| Nb. of layers | 16 |
|
| 112 |
+
| Hidden dimension | 1024 |
|
| 113 |
+
| Nb. of attention heads | 8 |
|
| 114 |
+
| Attention head size | 128 |
|
| 115 |
+
| Convolution kernel size | 15 |
|
| 116 |
+
| CTC vocabulary size | 348 |
|
| 117 |
+
|
| 118 |
+
**(2) Q-Former Projector (160M params)**
|
| 119 |
+
|
| 120 |
+
A 2-layer window Q-Former that downsamples the concatenated hidden representations from 4 encoder layers (layers 4, 8, 12, 16) by 5x.
|
| 121 |
+
Each 15-frame window is reduced to 3 queries via cross-attention, resulting in a 10Hz acoustic embedding rate for the LLM (2x from encoder + 5x from projector).
|
| 122 |
+
|
| 123 |
+
**(3) Bidirectional LLM Editor (1B params, LoRA-adapted)**
|
| 124 |
+
|
| 125 |
+
[granite-4.0-1b-base](https://huggingface.co/ibm-granite/granite-4.0-1b-base) with its causal attention mask removed, enabling bidirectional context.
|
| 126 |
+
Adapted with LoRA (rank 160) applied to both attention and MLP layers. The LLM receives concatenated audio embeddings and an interleaved CTC hypothesis with insertion slots, then predicts the edited transcript in a single parallel forward pass using a CTC objective.
|
| 127 |
+
|
| 128 |
+
### How NLE Works
|
| 129 |
+
|
| 130 |
+
1. The frozen CTC encoder produces acoustic embeddings and an initial character-level hypothesis
|
| 131 |
+
2. The hypothesis is re-tokenized with the LLM tokenizer and interleaved with insertion slots (blank tokens between each token)
|
| 132 |
+
3. The projected audio embeddings are concatenated with the interleaved hypothesis embeddings
|
| 133 |
+
4. The bidirectional LLM predicts edits (copy, insert, delete, replace) at all positions simultaneously
|
| 134 |
+
5. CTC greedy decoding (argmax + collapse) produces the final transcript
|
| 135 |
+
|
| 136 |
+
This design exploits the **identity mapping bias** of Transformers: residual connections and tied embeddings make the model naturally inclined to copy input tokens, so it focuses learning capacity on corrections rather than full reconstruction.
|
| 137 |
+
|
| 138 |
+
**Training Data:**
|
| 139 |
+
|
| 140 |
+
The model was trained on approximately 70K hours of speech across five languages (English, Spanish, French, German, Portuguese), using publicly available datasets including CommonVoice 15, MLS, LibriSpeech, VoxPopuli, AMI, YODAS, Earnings-22, Fisher, CallHome, and SwitchBoard.
|
| 141 |
+
For full training data details, see the [paper](https://arxiv.org/abs/2603.08397).
|
| 142 |
+
|
| 143 |
+
**Infrastructure:**
|
| 144 |
+
|
| 145 |
+
Training was completed on IBM's Blue Vela cluster using 16 H100 GPUs (2 nodes) for 5 epochs.
|
| 146 |
+
|
| 147 |
+
**Ethical Considerations and Limitations:**
|
| 148 |
+
|
| 149 |
+
The model is designed specifically for automatic speech recognition and does not generate free-form text, which limits the risk of hallucination compared to general-purpose speech-language models.
|
| 150 |
+
However, transcription accuracy varies across languages and acoustic conditions. Performance may be weaker on languages with less training data (e.g., Portuguese) or in challenging acoustic environments (e.g., far-field, overlapping speech).
|
| 151 |
+
|
| 152 |
+
The model's editing approach is conservative by design — it prefers deletions over insertions, which reduces hallucination risk but may occasionally drop words in noisy conditions.
|
| 153 |
+
|
| 154 |
+
**Resources**
|
| 155 |
+
- Read the paper: [NLE: Non-autoregressive LLM-based ASR by Transcript Editing](https://arxiv.org/abs/2603.08397)
|
| 156 |
+
- Learn about Granite: https://www.ibm.com/granite
|
__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .configuration_nle import NLEEncoderConfig, NLEProjectorConfig, NLEConfig
|
| 2 |
+
from .modeling_nle import NLENARDecoder
|
| 3 |
+
from .modeling_ctc import NLECTCEncoder
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- set tools_system_message_prefix = 'You are a helpful assistant with access to the following tools. You may call one or more tools to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>' %}
|
| 2 |
+
{%- set tools_system_message_suffix = '\n</tools>\n\nFor each tool call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.' %}
|
| 3 |
+
{%- set documents_system_message_prefix = 'You are a helpful assistant with access to the following documents. You may use one or more documents to assist with the user query.\n\nYou are given a list of documents within <documents></documents> XML tags:\n<documents>' %}
|
| 4 |
+
{%- set documents_system_message_suffix = '\n</documents>\n\nWrite the response to the user\'s input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.' %}
|
| 5 |
+
{%- set g4_default_system_message = 'You are a helpful assistant. Please ensure responses are professional, accurate, and safe.' %}
|
| 6 |
+
{%- if available_tools is defined and available_tools %}
|
| 7 |
+
{%- set tools = available_tools %}
|
| 8 |
+
{%- endif %}
|
| 9 |
+
{%- set ns = namespace(tools_system_message=tools_system_message_prefix,
|
| 10 |
+
documents_system_message=documents_system_message_prefix,
|
| 11 |
+
default_system_message=g4_default_system_message,
|
| 12 |
+
system_message=''
|
| 13 |
+
) %}
|
| 14 |
+
{%- if tools %}
|
| 15 |
+
{%- for tool in tools %}
|
| 16 |
+
{%- set ns.tools_system_message = ns.tools_system_message + '\n' + (tool | tojson) %}
|
| 17 |
+
{%- endfor %}
|
| 18 |
+
{%- set ns.tools_system_message = ns.tools_system_message + tools_system_message_suffix %}
|
| 19 |
+
{%- else %}
|
| 20 |
+
{%- set ns.tools_system_message = '' %}
|
| 21 |
+
{%- endif %}
|
| 22 |
+
{%- if documents %}
|
| 23 |
+
{%- for document in documents %}
|
| 24 |
+
{%- set ns.documents_system_message = ns.documents_system_message + '\n' + (document | tojson) %}
|
| 25 |
+
{%- endfor %}
|
| 26 |
+
{%- set ns.documents_system_message = ns.documents_system_message + documents_system_message_suffix %}
|
| 27 |
+
{%- else %}
|
| 28 |
+
{%- set ns.documents_system_message = '' %}
|
| 29 |
+
{%- endif %}
|
| 30 |
+
{%- if messages[0].role == 'system' %}
|
| 31 |
+
{%- if messages[0].content is string %}
|
| 32 |
+
{%- set ns.system_message = messages[0].content %}
|
| 33 |
+
{%- elif messages[0].content is iterable %}
|
| 34 |
+
{%- for entry in messages[0].content %}
|
| 35 |
+
{%- if entry.type== 'text' %}
|
| 36 |
+
{%- if ns.system_message != '' %}
|
| 37 |
+
{%- set ns.system_message = ns.system_message + '\n' %}
|
| 38 |
+
{%- endif %}
|
| 39 |
+
{%- set ns.system_message = ns.system_message + entry.text %}
|
| 40 |
+
{%- endif %}
|
| 41 |
+
{%- endfor %}
|
| 42 |
+
{%- endif %}
|
| 43 |
+
{%- if tools and documents %}
|
| 44 |
+
{%- set ns.system_message = ns.system_message + '\n\n' + ns.tools_system_message + '\n\n' + ns.documents_system_message %}
|
| 45 |
+
{%- elif tools %}
|
| 46 |
+
{%- set ns.system_message = ns.system_message + '\n\n' + ns.tools_system_message %}
|
| 47 |
+
{%- elif documents %}
|
| 48 |
+
{%- set ns.system_message = ns.system_message + '\n\n' + ns.documents_system_message %}
|
| 49 |
+
{%- endif %}
|
| 50 |
+
{%- else %}
|
| 51 |
+
{%- if tools and documents %}
|
| 52 |
+
{%- set ns.system_message = ns.tools_system_message + '\n\n' + ns.documents_system_message %}
|
| 53 |
+
{%- elif tools %}
|
| 54 |
+
{%- set ns.system_message = ns.tools_system_message %}
|
| 55 |
+
{%- elif documents %}
|
| 56 |
+
{%- set ns.system_message = ns.documents_system_message %}
|
| 57 |
+
{%- endif %}
|
| 58 |
+
{%- endif %}
|
| 59 |
+
{%- if ns.system_message %}
|
| 60 |
+
{{- '<|start_of_role|>system<|end_of_role|>' + ns.system_message + '<|end_of_text|>\n' }}
|
| 61 |
+
{%- else %}
|
| 62 |
+
{{- '<|start_of_role|>system<|end_of_role|>' + ns.default_system_message + '<|end_of_text|>\n' }}
|
| 63 |
+
{%- endif %}
|
| 64 |
+
{%- for message in messages %}
|
| 65 |
+
{%- set content = namespace(val='') %}
|
| 66 |
+
{%- if message.content is string %}
|
| 67 |
+
{%- set content.val = message.content %}
|
| 68 |
+
{%- else %}
|
| 69 |
+
{%- if message.content is iterable %}
|
| 70 |
+
{%- for entry in message.content %}
|
| 71 |
+
{%- if entry.type== 'text' %}
|
| 72 |
+
{%- if content.val != '' %}
|
| 73 |
+
{%- set content.val = content.val + '\n' %}
|
| 74 |
+
{%- endif %}
|
| 75 |
+
{%- set content.val = content.val + entry.text %}
|
| 76 |
+
{%- endif %}
|
| 77 |
+
{%- endfor %}
|
| 78 |
+
{%- endif %}
|
| 79 |
+
{%- endif %}
|
| 80 |
+
{%- if (message.role == 'user') or (message.role == 'system' and not loop.first) %}
|
| 81 |
+
{{- '<|start_of_role|>' + message.role + '<|end_of_role|>' + content.val + '<|end_of_text|>\n' }}
|
| 82 |
+
{%- elif message.role == 'assistant' %}
|
| 83 |
+
{{- '<|start_of_role|>' + message.role + '<|end_of_role|>' + content.val }}
|
| 84 |
+
{%- if message.tool_calls %}
|
| 85 |
+
{%- for tool_call in message.tool_calls %}
|
| 86 |
+
{%- if (loop.first and content.val) or (not loop.first) %}
|
| 87 |
+
{{- '\n' }}
|
| 88 |
+
{%- endif %}
|
| 89 |
+
{%- if tool_call.function %}
|
| 90 |
+
{%- set tool_call = tool_call.function %}
|
| 91 |
+
{%- endif %}
|
| 92 |
+
{{- '<tool_call>\n{"name": "' }}
|
| 93 |
+
{{- tool_call.name }}
|
| 94 |
+
{{- '", "arguments": ' }}
|
| 95 |
+
{%- if tool_call.arguments is string %}
|
| 96 |
+
{{- tool_call.arguments }}
|
| 97 |
+
{%- else %}
|
| 98 |
+
{{- tool_call.arguments | tojson }}
|
| 99 |
+
{%- endif %}
|
| 100 |
+
{{- '}\n</tool_call>' }}
|
| 101 |
+
{%- endfor %}
|
| 102 |
+
{%- endif %}
|
| 103 |
+
{{- '<|end_of_text|>\n' }}
|
| 104 |
+
{%- elif message.role == 'tool' %}
|
| 105 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') %}
|
| 106 |
+
{{- '<|start_of_role|>user<|end_of_role|>' }}
|
| 107 |
+
{%- endif %}
|
| 108 |
+
{{- '\n<tool_response>\n' }}
|
| 109 |
+
{{- content.val }}
|
| 110 |
+
{{- '\n</tool_response>' }}
|
| 111 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') %}
|
| 112 |
+
{{- '<|end_of_text|>\n' }}
|
| 113 |
+
{%- endif %}
|
| 114 |
+
{%- endif %}
|
| 115 |
+
{%- endfor %}
|
| 116 |
+
{%- if add_generation_prompt %}
|
| 117 |
+
{{- '<|start_of_role|>assistant<|end_of_role|>' }}
|
| 118 |
+
{%- endif %}
|
config.json
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"NLENARDecoder"
|
| 4 |
+
],
|
| 5 |
+
"attn_implementation": "flash_attention_2",
|
| 6 |
+
"auto_map": {
|
| 7 |
+
"AutoConfig": "configuration_nle.NLEConfig",
|
| 8 |
+
"AutoFeatureExtractor": "feature_extraction_nle.NLEFeatureExtractor",
|
| 9 |
+
"AutoModel": "modeling_nle.NLENARDecoder"
|
| 10 |
+
},
|
| 11 |
+
"ctc_tokenizer_config": {
|
| 12 |
+
"char2idx": {
|
| 13 |
+
" ": 32,
|
| 14 |
+
"!": 33,
|
| 15 |
+
"\"": 34,
|
| 16 |
+
"#": 35,
|
| 17 |
+
"$": 36,
|
| 18 |
+
"%": 37,
|
| 19 |
+
"&": 38,
|
| 20 |
+
"'": 39,
|
| 21 |
+
"(": 40,
|
| 22 |
+
")": 41,
|
| 23 |
+
"*": 42,
|
| 24 |
+
"+": 43,
|
| 25 |
+
",": 44,
|
| 26 |
+
"-": 45,
|
| 27 |
+
".": 46,
|
| 28 |
+
"/": 47,
|
| 29 |
+
"0": 48,
|
| 30 |
+
"1": 49,
|
| 31 |
+
"2": 50,
|
| 32 |
+
"3": 51,
|
| 33 |
+
"4": 52,
|
| 34 |
+
"5": 53,
|
| 35 |
+
"6": 54,
|
| 36 |
+
"7": 55,
|
| 37 |
+
"8": 56,
|
| 38 |
+
"9": 57,
|
| 39 |
+
":": 58,
|
| 40 |
+
";": 59,
|
| 41 |
+
"<": 60,
|
| 42 |
+
"=": 61,
|
| 43 |
+
">": 62,
|
| 44 |
+
"?": 63,
|
| 45 |
+
"@": 64,
|
| 46 |
+
"A": 65,
|
| 47 |
+
"B": 66,
|
| 48 |
+
"C": 67,
|
| 49 |
+
"D": 68,
|
| 50 |
+
"E": 69,
|
| 51 |
+
"F": 70,
|
| 52 |
+
"G": 71,
|
| 53 |
+
"H": 72,
|
| 54 |
+
"I": 73,
|
| 55 |
+
"J": 74,
|
| 56 |
+
"K": 75,
|
| 57 |
+
"L": 76,
|
| 58 |
+
"M": 77,
|
| 59 |
+
"N": 78,
|
| 60 |
+
"O": 79,
|
| 61 |
+
"P": 80,
|
| 62 |
+
"Q": 81,
|
| 63 |
+
"R": 82,
|
| 64 |
+
"S": 83,
|
| 65 |
+
"T": 84,
|
| 66 |
+
"U": 85,
|
| 67 |
+
"V": 86,
|
| 68 |
+
"W": 87,
|
| 69 |
+
"X": 88,
|
| 70 |
+
"Y": 89,
|
| 71 |
+
"Z": 90,
|
| 72 |
+
"[": 91,
|
| 73 |
+
"\\": 92,
|
| 74 |
+
"]": 93,
|
| 75 |
+
"^": 94,
|
| 76 |
+
"_": 95,
|
| 77 |
+
"`": 96,
|
| 78 |
+
"a": 97,
|
| 79 |
+
"b": 98,
|
| 80 |
+
"c": 99,
|
| 81 |
+
"d": 100,
|
| 82 |
+
"e": 101,
|
| 83 |
+
"f": 102,
|
| 84 |
+
"g": 103,
|
| 85 |
+
"h": 104,
|
| 86 |
+
"i": 105,
|
| 87 |
+
"j": 106,
|
| 88 |
+
"k": 107,
|
| 89 |
+
"l": 108,
|
| 90 |
+
"m": 109,
|
| 91 |
+
"n": 110,
|
| 92 |
+
"o": 111,
|
| 93 |
+
"p": 112,
|
| 94 |
+
"q": 113,
|
| 95 |
+
"r": 114,
|
| 96 |
+
"s": 115,
|
| 97 |
+
"t": 116,
|
| 98 |
+
"u": 117,
|
| 99 |
+
"v": 118,
|
| 100 |
+
"w": 119,
|
| 101 |
+
"x": 120,
|
| 102 |
+
"y": 121,
|
| 103 |
+
"z": 122,
|
| 104 |
+
"{": 123,
|
| 105 |
+
"|": 124,
|
| 106 |
+
"}": 125,
|
| 107 |
+
"~": 126,
|
| 108 |
+
"\u007f": 127,
|
| 109 |
+
"\u0080": 128,
|
| 110 |
+
"\u0081": 129,
|
| 111 |
+
"\u0082": 130,
|
| 112 |
+
"\u0083": 131,
|
| 113 |
+
"\u0084": 132,
|
| 114 |
+
"\u0085": 133,
|
| 115 |
+
"\u0086": 134,
|
| 116 |
+
"\u0087": 135,
|
| 117 |
+
"\u0088": 136,
|
| 118 |
+
"\u0089": 137,
|
| 119 |
+
"\u008a": 138,
|
| 120 |
+
"\u008b": 139,
|
| 121 |
+
"\u008c": 140,
|
| 122 |
+
"\u008d": 141,
|
| 123 |
+
"\u008e": 142,
|
| 124 |
+
"\u008f": 143,
|
| 125 |
+
"\u0090": 144,
|
| 126 |
+
"\u0091": 145,
|
| 127 |
+
"\u0092": 146,
|
| 128 |
+
"\u0093": 147,
|
| 129 |
+
"\u0094": 148,
|
| 130 |
+
"\u0095": 149,
|
| 131 |
+
"\u0096": 150,
|
| 132 |
+
"\u0097": 151,
|
| 133 |
+
"\u0098": 152,
|
| 134 |
+
"\u0099": 153,
|
| 135 |
+
"\u009a": 154,
|
| 136 |
+
"\u009b": 155,
|
| 137 |
+
"\u009c": 156,
|
| 138 |
+
"\u009d": 157,
|
| 139 |
+
"\u009e": 158,
|
| 140 |
+
"\u009f": 159,
|
| 141 |
+
"\u00a0": 160,
|
| 142 |
+
"\u00a1": 161,
|
| 143 |
+
"\u00a2": 162,
|
| 144 |
+
"\u00a3": 163,
|
| 145 |
+
"\u00a4": 164,
|
| 146 |
+
"\u00a5": 165,
|
| 147 |
+
"\u00a6": 166,
|
| 148 |
+
"\u00a7": 167,
|
| 149 |
+
"\u00a8": 168,
|
| 150 |
+
"\u00a9": 169,
|
| 151 |
+
"\u00aa": 170,
|
| 152 |
+
"\u00ab": 171,
|
| 153 |
+
"\u00ac": 172,
|
| 154 |
+
"\u00ad": 173,
|
| 155 |
+
"\u00ae": 174,
|
| 156 |
+
"\u00af": 175,
|
| 157 |
+
"\u00b0": 176,
|
| 158 |
+
"\u00b1": 177,
|
| 159 |
+
"\u00b2": 178,
|
| 160 |
+
"\u00b3": 179,
|
| 161 |
+
"\u00b4": 180,
|
| 162 |
+
"\u00b5": 181,
|
| 163 |
+
"\u00b6": 182,
|
| 164 |
+
"\u00b7": 183,
|
| 165 |
+
"\u00b8": 184,
|
| 166 |
+
"\u00b9": 185,
|
| 167 |
+
"\u00ba": 186,
|
| 168 |
+
"\u00bb": 187,
|
| 169 |
+
"\u00bc": 188,
|
| 170 |
+
"\u00bd": 189,
|
| 171 |
+
"\u00be": 190,
|
| 172 |
+
"\u00bf": 191,
|
| 173 |
+
"\u00c0": 192,
|
| 174 |
+
"\u00c1": 193,
|
| 175 |
+
"\u00c2": 194,
|
| 176 |
+
"\u00c3": 195,
|
| 177 |
+
"\u00c4": 196,
|
| 178 |
+
"\u00c5": 197,
|
| 179 |
+
"\u00c6": 198,
|
| 180 |
+
"\u00c7": 199,
|
| 181 |
+
"\u00c8": 200,
|
| 182 |
+
"\u00c9": 201,
|
| 183 |
+
"\u00ca": 202,
|
| 184 |
+
"\u00cb": 203,
|
| 185 |
+
"\u00cc": 204,
|
| 186 |
+
"\u00cd": 205,
|
| 187 |
+
"\u00ce": 206,
|
| 188 |
+
"\u00cf": 207,
|
| 189 |
+
"\u00d0": 208,
|
| 190 |
+
"\u00d1": 209,
|
| 191 |
+
"\u00d2": 210,
|
| 192 |
+
"\u00d3": 211,
|
| 193 |
+
"\u00d4": 212,
|
| 194 |
+
"\u00d5": 213,
|
| 195 |
+
"\u00d6": 214,
|
| 196 |
+
"\u00d7": 215,
|
| 197 |
+
"\u00d8": 216,
|
| 198 |
+
"\u00d9": 217,
|
| 199 |
+
"\u00da": 218,
|
| 200 |
+
"\u00db": 219,
|
| 201 |
+
"\u00dc": 220,
|
| 202 |
+
"\u00dd": 221,
|
| 203 |
+
"\u00de": 222,
|
| 204 |
+
"\u00df": 223,
|
| 205 |
+
"\u00e0": 224,
|
| 206 |
+
"\u00e1": 225,
|
| 207 |
+
"\u00e2": 226,
|
| 208 |
+
"\u00e3": 227,
|
| 209 |
+
"\u00e4": 228,
|
| 210 |
+
"\u00e5": 229,
|
| 211 |
+
"\u00e6": 230,
|
| 212 |
+
"\u00e7": 231,
|
| 213 |
+
"\u00e8": 232,
|
| 214 |
+
"\u00e9": 233,
|
| 215 |
+
"\u00ea": 234,
|
| 216 |
+
"\u00eb": 235,
|
| 217 |
+
"\u00ec": 236,
|
| 218 |
+
"\u00ed": 237,
|
| 219 |
+
"\u00ee": 238,
|
| 220 |
+
"\u00ef": 239,
|
| 221 |
+
"\u00f0": 240,
|
| 222 |
+
"\u00f1": 241,
|
| 223 |
+
"\u00f2": 242,
|
| 224 |
+
"\u00f3": 243,
|
| 225 |
+
"\u00f4": 244,
|
| 226 |
+
"\u00f5": 245,
|
| 227 |
+
"\u00f6": 246,
|
| 228 |
+
"\u00f7": 247,
|
| 229 |
+
"\u00f8": 248,
|
| 230 |
+
"\u00f9": 249,
|
| 231 |
+
"\u00fa": 250,
|
| 232 |
+
"\u00fb": 251,
|
| 233 |
+
"\u00fc": 252,
|
| 234 |
+
"\u00fd": 253,
|
| 235 |
+
"\u00fe": 254,
|
| 236 |
+
"\u00ff": 255,
|
| 237 |
+
"\u30a1": 256,
|
| 238 |
+
"\u30a2": 257,
|
| 239 |
+
"\u30a3": 258,
|
| 240 |
+
"\u30a4": 259,
|
| 241 |
+
"\u30a5": 260,
|
| 242 |
+
"\u30a6": 261,
|
| 243 |
+
"\u30a7": 262,
|
| 244 |
+
"\u30a8": 263,
|
| 245 |
+
"\u30a9": 264,
|
| 246 |
+
"\u30aa": 265,
|
| 247 |
+
"\u30ab": 266,
|
| 248 |
+
"\u30ac": 267,
|
| 249 |
+
"\u30ad": 268,
|
| 250 |
+
"\u30ae": 269,
|
| 251 |
+
"\u30af": 270,
|
| 252 |
+
"\u30b0": 271,
|
| 253 |
+
"\u30b1": 272,
|
| 254 |
+
"\u30b2": 273,
|
| 255 |
+
"\u30b3": 274,
|
| 256 |
+
"\u30b4": 275,
|
| 257 |
+
"\u30b5": 276,
|
| 258 |
+
"\u30b6": 277,
|
| 259 |
+
"\u30b7": 278,
|
| 260 |
+
"\u30b8": 279,
|
| 261 |
+
"\u30b9": 280,
|
| 262 |
+
"\u30ba": 281,
|
| 263 |
+
"\u30bb": 282,
|
| 264 |
+
"\u30bc": 283,
|
| 265 |
+
"\u30bd": 284,
|
| 266 |
+
"\u30be": 285,
|
| 267 |
+
"\u30bf": 286,
|
| 268 |
+
"\u30c0": 287,
|
| 269 |
+
"\u30c1": 288,
|
| 270 |
+
"\u30c2": 289,
|
| 271 |
+
"\u30c3": 290,
|
| 272 |
+
"\u30c4": 291,
|
| 273 |
+
"\u30c5": 292,
|
| 274 |
+
"\u30c6": 293,
|
| 275 |
+
"\u30c7": 294,
|
| 276 |
+
"\u30c8": 295,
|
| 277 |
+
"\u30c9": 296,
|
| 278 |
+
"\u30ca": 297,
|
| 279 |
+
"\u30cb": 298,
|
| 280 |
+
"\u30cc": 299,
|
| 281 |
+
"\u30cd": 300,
|
| 282 |
+
"\u30ce": 301,
|
| 283 |
+
"\u30cf": 302,
|
| 284 |
+
"\u30d0": 303,
|
| 285 |
+
"\u30d1": 304,
|
| 286 |
+
"\u30d2": 305,
|
| 287 |
+
"\u30d3": 306,
|
| 288 |
+
"\u30d4": 307,
|
| 289 |
+
"\u30d5": 308,
|
| 290 |
+
"\u30d6": 309,
|
| 291 |
+
"\u30d7": 310,
|
| 292 |
+
"\u30d8": 311,
|
| 293 |
+
"\u30d9": 312,
|
| 294 |
+
"\u30da": 313,
|
| 295 |
+
"\u30db": 314,
|
| 296 |
+
"\u30dc": 315,
|
| 297 |
+
"\u30dd": 316,
|
| 298 |
+
"\u30de": 317,
|
| 299 |
+
"\u30df": 318,
|
| 300 |
+
"\u30e0": 319,
|
| 301 |
+
"\u30e1": 320,
|
| 302 |
+
"\u30e2": 321,
|
| 303 |
+
"\u30e3": 322,
|
| 304 |
+
"\u30e4": 323,
|
| 305 |
+
"\u30e5": 324,
|
| 306 |
+
"\u30e6": 325,
|
| 307 |
+
"\u30e7": 326,
|
| 308 |
+
"\u30e8": 327,
|
| 309 |
+
"\u30e9": 328,
|
| 310 |
+
"\u30ea": 329,
|
| 311 |
+
"\u30eb": 330,
|
| 312 |
+
"\u30ec": 331,
|
| 313 |
+
"\u30ed": 332,
|
| 314 |
+
"\u30ee": 333,
|
| 315 |
+
"\u30ef": 334,
|
| 316 |
+
"\u30f0": 335,
|
| 317 |
+
"\u30f1": 336,
|
| 318 |
+
"\u30f2": 337,
|
| 319 |
+
"\u30f3": 338,
|
| 320 |
+
"\u30f4": 339,
|
| 321 |
+
"\u30f5": 340,
|
| 322 |
+
"\u30f6": 341,
|
| 323 |
+
"\u30f7": 342,
|
| 324 |
+
"\u30f8": 343,
|
| 325 |
+
"\u30f9": 344,
|
| 326 |
+
"\u30fa": 345,
|
| 327 |
+
"\u30fb": 346,
|
| 328 |
+
"\u30fc": 347
|
| 329 |
+
}
|
| 330 |
+
},
|
| 331 |
+
"dtype": "float32",
|
| 332 |
+
"encoder_config": {
|
| 333 |
+
"attn_type": "block",
|
| 334 |
+
"backbone": null,
|
| 335 |
+
"backbone_requires_grad": false,
|
| 336 |
+
"context_size": 200,
|
| 337 |
+
"conv_expansion_factor": 2,
|
| 338 |
+
"conv_kernel_size": 15,
|
| 339 |
+
"dim_head": 128,
|
| 340 |
+
"dropout": 0.1,
|
| 341 |
+
"feedforward_mult": 4,
|
| 342 |
+
"fmask_F": 6,
|
| 343 |
+
"fmask_m": 2,
|
| 344 |
+
"fmask_prob": 0.9,
|
| 345 |
+
"hidden_dim": 1024,
|
| 346 |
+
"initializer_range": 0.02,
|
| 347 |
+
"input_dim": 160,
|
| 348 |
+
"loss_lambda": 0.2,
|
| 349 |
+
"max_pos_emb": 512,
|
| 350 |
+
"model_type": "nle_encoder",
|
| 351 |
+
"num_heads": 8,
|
| 352 |
+
"num_layers": 16,
|
| 353 |
+
"old_encoder_mask": true,
|
| 354 |
+
"output_dim": 348,
|
| 355 |
+
"pred_dropout": 0.25,
|
| 356 |
+
"self_conditioning_layer": 8,
|
| 357 |
+
"tmask_T": 50,
|
| 358 |
+
"tmask_m": 2,
|
| 359 |
+
"tmask_m_relative_max": 0.02,
|
| 360 |
+
"tmask_prob": 0.9
|
| 361 |
+
},
|
| 362 |
+
"encoder_layer_indices": [
|
| 363 |
+
4,
|
| 364 |
+
8,
|
| 365 |
+
12,
|
| 366 |
+
-1
|
| 367 |
+
],
|
| 368 |
+
"initializer_range": 0.02,
|
| 369 |
+
"llm_config": {
|
| 370 |
+
"_name_or_path": "/proj/speech/saon/slam-llm/29.2-c/granite-4.0-1b-base",
|
| 371 |
+
"add_cross_attention": false,
|
| 372 |
+
"architectures": [
|
| 373 |
+
"GraniteForCausalLM"
|
| 374 |
+
],
|
| 375 |
+
"attention_bias": false,
|
| 376 |
+
"attention_dropout": 0.0,
|
| 377 |
+
"attention_multiplier": 0.0078125,
|
| 378 |
+
"bad_words_ids": null,
|
| 379 |
+
"begin_suppress_tokens": null,
|
| 380 |
+
"bos_token_id": 100257,
|
| 381 |
+
"chunk_size_feed_forward": 0,
|
| 382 |
+
"cross_attention_hidden_size": null,
|
| 383 |
+
"decoder_start_token_id": null,
|
| 384 |
+
"diversity_penalty": 0.0,
|
| 385 |
+
"do_sample": false,
|
| 386 |
+
"dtype": "bfloat16",
|
| 387 |
+
"early_stopping": false,
|
| 388 |
+
"embedding_multiplier": 12,
|
| 389 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 390 |
+
"eos_token_id": 100257,
|
| 391 |
+
"exponential_decay_length_penalty": null,
|
| 392 |
+
"finetuning_task": null,
|
| 393 |
+
"forced_bos_token_id": null,
|
| 394 |
+
"forced_eos_token_id": null,
|
| 395 |
+
"hidden_act": "silu",
|
| 396 |
+
"hidden_size": 2048,
|
| 397 |
+
"id2label": {
|
| 398 |
+
"0": "LABEL_0",
|
| 399 |
+
"1": "LABEL_1"
|
| 400 |
+
},
|
| 401 |
+
"initializer_range": 0.1,
|
| 402 |
+
"intermediate_size": 4096,
|
| 403 |
+
"is_decoder": false,
|
| 404 |
+
"is_encoder_decoder": false,
|
| 405 |
+
"label2id": {
|
| 406 |
+
"LABEL_0": 0,
|
| 407 |
+
"LABEL_1": 1
|
| 408 |
+
},
|
| 409 |
+
"length_penalty": 1.0,
|
| 410 |
+
"logits_scaling": 8,
|
| 411 |
+
"max_length": 20,
|
| 412 |
+
"max_position_embeddings": 4096,
|
| 413 |
+
"min_length": 0,
|
| 414 |
+
"mlp_bias": false,
|
| 415 |
+
"model_type": "granite",
|
| 416 |
+
"no_repeat_ngram_size": 0,
|
| 417 |
+
"num_attention_heads": 16,
|
| 418 |
+
"num_beam_groups": 1,
|
| 419 |
+
"num_beams": 1,
|
| 420 |
+
"num_hidden_layers": 40,
|
| 421 |
+
"num_key_value_heads": 4,
|
| 422 |
+
"num_return_sequences": 1,
|
| 423 |
+
"output_attentions": false,
|
| 424 |
+
"output_hidden_states": false,
|
| 425 |
+
"output_scores": false,
|
| 426 |
+
"pad_token_id": 100256,
|
| 427 |
+
"prefix": null,
|
| 428 |
+
"problem_type": null,
|
| 429 |
+
"pruned_heads": {},
|
| 430 |
+
"remove_invalid_values": false,
|
| 431 |
+
"repetition_penalty": 1.0,
|
| 432 |
+
"residual_multiplier": 0.22,
|
| 433 |
+
"return_dict": true,
|
| 434 |
+
"return_dict_in_generate": false,
|
| 435 |
+
"rms_norm_eps": 1e-05,
|
| 436 |
+
"rope_parameters": {
|
| 437 |
+
"rope_theta": 10000,
|
| 438 |
+
"rope_type": "default"
|
| 439 |
+
},
|
| 440 |
+
"rope_scaling": null,
|
| 441 |
+
"rope_theta": 10000.0,
|
| 442 |
+
"sep_token_id": null,
|
| 443 |
+
"suppress_tokens": null,
|
| 444 |
+
"task_specific_params": null,
|
| 445 |
+
"temperature": 1.0,
|
| 446 |
+
"tf_legacy_loss": false,
|
| 447 |
+
"tie_encoder_decoder": false,
|
| 448 |
+
"tie_word_embeddings": true,
|
| 449 |
+
"tokenizer_class": null,
|
| 450 |
+
"top_k": 50,
|
| 451 |
+
"top_p": 1.0,
|
| 452 |
+
"torchscript": false,
|
| 453 |
+
"transformers_version": "4.57.3",
|
| 454 |
+
"typical_p": 1.0,
|
| 455 |
+
"use_bfloat16": false,
|
| 456 |
+
"use_cache": true,
|
| 457 |
+
"vocab_size": 100352
|
| 458 |
+
},
|
| 459 |
+
"llm_name": "/proj/speech/saon/slam-llm/29.2-c/granite-4.0-1b-base",
|
| 460 |
+
"model_type": "nle",
|
| 461 |
+
"projector_config": {
|
| 462 |
+
"attn_bias": true,
|
| 463 |
+
"block_size": 15,
|
| 464 |
+
"downsample_rate": 5,
|
| 465 |
+
"dropout_prob": 0.1,
|
| 466 |
+
"encoder_dim": 1024,
|
| 467 |
+
"hidden_size": 2048,
|
| 468 |
+
"layernorm_eps": 1e-06,
|
| 469 |
+
"llm_dim": 2048,
|
| 470 |
+
"mlp_bias": true,
|
| 471 |
+
"mlp_ratio": 2,
|
| 472 |
+
"model_type": "nle_projector",
|
| 473 |
+
"num_encoder_layers": 4,
|
| 474 |
+
"num_heads": 32,
|
| 475 |
+
"num_layers": 2
|
| 476 |
+
},
|
| 477 |
+
"scale_projected_embeddings": true,
|
| 478 |
+
"transformers_version": "4.57.3"
|
| 479 |
+
}
|
configuration_nle.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Union
|
| 2 |
+
|
| 3 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class NLEEncoderConfig(PretrainedConfig):
|
| 7 |
+
model_type = "nle_encoder"
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
input_dim=160,
|
| 12 |
+
num_layers=10,
|
| 13 |
+
hidden_dim=1024,
|
| 14 |
+
feedforward_mult=4,
|
| 15 |
+
num_heads=8,
|
| 16 |
+
dim_head=128,
|
| 17 |
+
output_dim=42,
|
| 18 |
+
context_size=200,
|
| 19 |
+
max_pos_emb=512,
|
| 20 |
+
dropout=0.1,
|
| 21 |
+
pred_dropout=0.25,
|
| 22 |
+
conv_kernel_size=15,
|
| 23 |
+
conv_expansion_factor=2,
|
| 24 |
+
loss_lambda=0.2,
|
| 25 |
+
initializer_range=0.02,
|
| 26 |
+
self_conditioning_layer=None,
|
| 27 |
+
old_encoder_mask=True,
|
| 28 |
+
**kwargs,
|
| 29 |
+
):
|
| 30 |
+
super().__init__(**kwargs)
|
| 31 |
+
self.input_dim = input_dim
|
| 32 |
+
self.num_layers = num_layers
|
| 33 |
+
self.hidden_dim = hidden_dim
|
| 34 |
+
self.feedforward_mult = feedforward_mult
|
| 35 |
+
self.num_heads = num_heads
|
| 36 |
+
self.dim_head = dim_head
|
| 37 |
+
self.output_dim = output_dim
|
| 38 |
+
self.context_size = context_size
|
| 39 |
+
self.dropout = dropout
|
| 40 |
+
self.pred_dropout = pred_dropout
|
| 41 |
+
self.conv_kernel_size = conv_kernel_size
|
| 42 |
+
self.conv_expansion_factor = conv_expansion_factor
|
| 43 |
+
self.max_pos_emb = max_pos_emb
|
| 44 |
+
self.loss_lambda = loss_lambda
|
| 45 |
+
self.initializer_range = initializer_range
|
| 46 |
+
if self_conditioning_layer is None:
|
| 47 |
+
self_conditioning_layer = num_layers // 2
|
| 48 |
+
self.self_conditioning_layer = self_conditioning_layer
|
| 49 |
+
self.old_encoder_mask = old_encoder_mask
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class NLEProjectorConfig(PretrainedConfig):
|
| 53 |
+
"""Config for the QFormer-based encoder-to-LLM projector."""
|
| 54 |
+
model_type = "nle_projector"
|
| 55 |
+
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
encoder_dim: int = 1024,
|
| 59 |
+
llm_dim: int = 2048,
|
| 60 |
+
downsample_rate: int = 5,
|
| 61 |
+
num_encoder_layers: int = 1,
|
| 62 |
+
hidden_size: Optional[int] = None,
|
| 63 |
+
num_heads: Optional[int] = None,
|
| 64 |
+
num_layers: int = 1,
|
| 65 |
+
dropout_prob: float = 0.0,
|
| 66 |
+
block_size: int = 15,
|
| 67 |
+
mlp_ratio: int = 2,
|
| 68 |
+
layernorm_eps: float = 1e-6,
|
| 69 |
+
attn_bias: bool = True,
|
| 70 |
+
mlp_bias: bool = True,
|
| 71 |
+
**kwargs,
|
| 72 |
+
):
|
| 73 |
+
super().__init__(**kwargs)
|
| 74 |
+
self.encoder_dim = encoder_dim
|
| 75 |
+
self.llm_dim = llm_dim
|
| 76 |
+
self.downsample_rate = downsample_rate
|
| 77 |
+
self.num_encoder_layers = num_encoder_layers
|
| 78 |
+
self.hidden_size = hidden_size if hidden_size is not None else encoder_dim
|
| 79 |
+
self.num_heads = num_heads if num_heads is not None else self.hidden_size // 64
|
| 80 |
+
self.num_layers = num_layers
|
| 81 |
+
self.dropout_prob = dropout_prob
|
| 82 |
+
self.block_size = block_size
|
| 83 |
+
self.mlp_ratio = mlp_ratio
|
| 84 |
+
self.layernorm_eps = layernorm_eps
|
| 85 |
+
self.attn_bias = attn_bias
|
| 86 |
+
self.mlp_bias = mlp_bias
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class NLEConfig(PretrainedConfig):
|
| 90 |
+
model_type = "nle"
|
| 91 |
+
|
| 92 |
+
def __init__(
|
| 93 |
+
self,
|
| 94 |
+
encoder_config: Union[NLEEncoderConfig, dict, None] = None,
|
| 95 |
+
projector_config: Union[NLEProjectorConfig, dict, None] = None,
|
| 96 |
+
llm_name: str = "ibm-granite/granite-3.3-2b-base",
|
| 97 |
+
llm_config: Optional[dict] = None,
|
| 98 |
+
attn_implementation: str = "flash_attention_2",
|
| 99 |
+
initializer_range: float = 0.02,
|
| 100 |
+
encoder_layer_indices: Optional[List[int]] = None,
|
| 101 |
+
scale_projected_embeddings: bool = False,
|
| 102 |
+
ctc_tokenizer_config: Optional[dict] = None,
|
| 103 |
+
**kwargs,
|
| 104 |
+
):
|
| 105 |
+
super().__init__(**kwargs)
|
| 106 |
+
|
| 107 |
+
if isinstance(encoder_config, dict):
|
| 108 |
+
self.encoder_config = NLEEncoderConfig(**encoder_config)
|
| 109 |
+
elif isinstance(encoder_config, NLEEncoderConfig):
|
| 110 |
+
self.encoder_config = encoder_config
|
| 111 |
+
elif encoder_config is None:
|
| 112 |
+
self.encoder_config = NLEEncoderConfig()
|
| 113 |
+
else:
|
| 114 |
+
raise TypeError("encoder_config must be NLEEncoderConfig or dict")
|
| 115 |
+
|
| 116 |
+
if isinstance(projector_config, dict):
|
| 117 |
+
self.projector_config = NLEProjectorConfig(**projector_config)
|
| 118 |
+
elif isinstance(projector_config, NLEProjectorConfig):
|
| 119 |
+
self.projector_config = projector_config
|
| 120 |
+
elif projector_config is None:
|
| 121 |
+
self.projector_config = NLEProjectorConfig()
|
| 122 |
+
else:
|
| 123 |
+
raise TypeError("projector_config must be NLEProjectorConfig or dict")
|
| 124 |
+
|
| 125 |
+
self.llm_name = llm_name
|
| 126 |
+
self.llm_config = llm_config
|
| 127 |
+
self.attn_implementation = attn_implementation
|
| 128 |
+
self.initializer_range = initializer_range
|
| 129 |
+
self.encoder_layer_indices = list(encoder_layer_indices) if encoder_layer_indices is not None else [-1]
|
| 130 |
+
self.scale_projected_embeddings = scale_projected_embeddings
|
| 131 |
+
self.ctc_tokenizer_config = ctc_tokenizer_config
|
| 132 |
+
self.auto_map = {
|
| 133 |
+
"AutoConfig": "configuration_nle.NLEConfig",
|
| 134 |
+
"AutoModel": "modeling_nle.NLENARDecoder",
|
| 135 |
+
"AutoFeatureExtractor": "feature_extraction_nle.NLEFeatureExtractor",
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
def to_dict(self):
|
| 139 |
+
d = super().to_dict()
|
| 140 |
+
d["encoder_config"] = self.encoder_config.to_dict()
|
| 141 |
+
d["projector_config"] = self.projector_config.to_dict()
|
| 142 |
+
if self.llm_config is not None:
|
| 143 |
+
d["llm_config"] = self.llm_config
|
| 144 |
+
return d
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
__all__ = ["NLEEncoderConfig", "NLEProjectorConfig", "NLEConfig"]
|
feature_extraction_nle.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Feature extractor for NLE models.
|
| 2 |
+
|
| 3 |
+
Handles mel spectrogram extraction, frame stacking, batching, and computing
|
| 4 |
+
the correct attention_mask / x_sizes at encoder-frame rate.
|
| 5 |
+
|
| 6 |
+
Usage:
|
| 7 |
+
from feature_extraction_nle import NLEFeatureExtractor
|
| 8 |
+
|
| 9 |
+
feature_extractor = NLEFeatureExtractor()
|
| 10 |
+
inputs = feature_extractor([waveform1, waveform2])
|
| 11 |
+
output = model.generate(**inputs)
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from typing import List, Optional, Union
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torchaudio
|
| 18 |
+
from transformers.feature_extraction_utils import FeatureExtractionMixin
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class NLEFeatureExtractor(FeatureExtractionMixin):
|
| 22 |
+
"""Prepares raw audio for the NLENARDecoder.
|
| 23 |
+
|
| 24 |
+
Pipeline: raw 16kHz audio -> MelSpectrogram -> log-mel normalize -> stack 2 frames.
|
| 25 |
+
Encoder frame rate = sample_rate / (hop_length * 2) = 50 fps, i.e. T_samples // 320.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
- input_features: [B, T_enc, 160] stacked log-mel features
|
| 29 |
+
- attention_mask: [B, T_enc] bool mask at encoder-frame rate
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
model_input_names = ["input_features", "attention_mask"]
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self,
|
| 36 |
+
sampling_rate: int = 16000,
|
| 37 |
+
n_fft: int = 512,
|
| 38 |
+
win_length: int = 400,
|
| 39 |
+
hop_length: int = 160,
|
| 40 |
+
n_mels: int = 80,
|
| 41 |
+
**kwargs,
|
| 42 |
+
):
|
| 43 |
+
super().__init__(**kwargs)
|
| 44 |
+
self.sampling_rate = sampling_rate
|
| 45 |
+
self.n_fft = n_fft
|
| 46 |
+
self.win_length = win_length
|
| 47 |
+
self.hop_length = hop_length
|
| 48 |
+
self.n_mels = n_mels
|
| 49 |
+
self.mel_filters = torchaudio.transforms.MelSpectrogram(
|
| 50 |
+
sample_rate=sampling_rate, n_fft=n_fft, win_length=win_length,
|
| 51 |
+
hop_length=hop_length, n_mels=n_mels,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
@torch.no_grad()
|
| 55 |
+
def _extract_features(self, raw_audio: torch.Tensor) -> torch.Tensor:
|
| 56 |
+
"""Convert raw waveform batch to stacked log-mel features.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
raw_audio: [B, T] raw 16kHz waveform
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
[B, T_enc, n_mels * 2] stacked log-mel features
|
| 63 |
+
"""
|
| 64 |
+
melspec = self.mel_filters.to(raw_audio.device)
|
| 65 |
+
B, T = raw_audio.shape
|
| 66 |
+
# Ensure even number of mel frames for stacking
|
| 67 |
+
l = 2 * (T // (2 * self.hop_length))
|
| 68 |
+
mel = melspec(raw_audio.float())[..., :l]
|
| 69 |
+
logmel = mel.transpose(-1, -2).clamp_min_(1e-10).log10_()
|
| 70 |
+
mx = logmel.amax(dim=(-2, -1), keepdim=True)
|
| 71 |
+
logmel = torch.maximum(logmel, mx - 8.0).div_(4).add_(1)
|
| 72 |
+
# Stack 2 consecutive frames
|
| 73 |
+
return logmel.reshape(B, -1, 2 * self.n_mels)
|
| 74 |
+
|
| 75 |
+
def __call__(
|
| 76 |
+
self,
|
| 77 |
+
audios: Union[torch.Tensor, List[torch.Tensor]],
|
| 78 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 79 |
+
) -> dict:
|
| 80 |
+
"""Prepare a batch of raw audio waveforms for the model.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
audios: A single tensor [T] or [B, T], or a list of 1-D tensors
|
| 84 |
+
(variable length). Expected 16 kHz float waveforms.
|
| 85 |
+
device: Target device for the output tensors.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
Dict with keys: input_features, attention_mask — ready to
|
| 89 |
+
unpack into model.generate(**inputs).
|
| 90 |
+
"""
|
| 91 |
+
# Normalise to list of 1-D tensors
|
| 92 |
+
if isinstance(audios, torch.Tensor):
|
| 93 |
+
if audios.ndim == 1:
|
| 94 |
+
audios = [audios]
|
| 95 |
+
elif audios.ndim == 2:
|
| 96 |
+
audios = [audios[i] for i in range(audios.shape[0])]
|
| 97 |
+
else:
|
| 98 |
+
raise ValueError(f"Expected 1-D or 2-D tensor, got {audios.ndim}-D")
|
| 99 |
+
|
| 100 |
+
raw_lengths = [a.shape[-1] for a in audios]
|
| 101 |
+
encoder_frame_counts = [l // (2 * self.hop_length) for l in raw_lengths]
|
| 102 |
+
|
| 103 |
+
# Pad waveforms to same length
|
| 104 |
+
raw_audio = torch.nn.utils.rnn.pad_sequence(
|
| 105 |
+
[a.squeeze(0) if a.ndim > 1 else a for a in audios],
|
| 106 |
+
batch_first=True,
|
| 107 |
+
padding_value=0.0,
|
| 108 |
+
)
|
| 109 |
+
if device is not None:
|
| 110 |
+
raw_audio = raw_audio.to(device)
|
| 111 |
+
|
| 112 |
+
# Extract mel features on the padded batch
|
| 113 |
+
input_features = self._extract_features(raw_audio)
|
| 114 |
+
|
| 115 |
+
# Build attention_mask at encoder-frame rate
|
| 116 |
+
max_enc_frames = input_features.shape[1]
|
| 117 |
+
x_sizes = torch.tensor(encoder_frame_counts, dtype=torch.long)
|
| 118 |
+
attention_mask = torch.arange(max_enc_frames).unsqueeze(0) < x_sizes.unsqueeze(1)
|
| 119 |
+
|
| 120 |
+
if device is not None:
|
| 121 |
+
input_features = input_features.to(device)
|
| 122 |
+
attention_mask = attention_mask.to(device)
|
| 123 |
+
|
| 124 |
+
return {
|
| 125 |
+
"input_features": input_features,
|
| 126 |
+
"attention_mask": attention_mask,
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
__all__ = ["NLEFeatureExtractor"]
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6de98bd5aaa11a36170aab8be62e25bf533790409031e89049107412042391c8
|
| 3 |
+
size 4990972336
|
model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4eaf74f8b4416b764714166bc7a7731d6d8e0996a6c5aaef9fc96fd0aeaddb35
|
| 3 |
+
size 352732992
|
model.safetensors.index.json
ADDED
|
@@ -0,0 +1,952 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_parameters": 2151709020,
|
| 4 |
+
"total_size": 5343598064
|
| 5 |
+
},
|
| 6 |
+
"weight_map": {
|
| 7 |
+
"encoder.input_linear.bias": "model-00001-of-00002.safetensors",
|
| 8 |
+
"encoder.input_linear.weight": "model-00001-of-00002.safetensors",
|
| 9 |
+
"encoder.layers.0.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 10 |
+
"encoder.layers.0.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 11 |
+
"encoder.layers.0.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 12 |
+
"encoder.layers.0.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 13 |
+
"encoder.layers.0.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 14 |
+
"encoder.layers.0.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 15 |
+
"encoder.layers.0.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 16 |
+
"encoder.layers.0.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 17 |
+
"encoder.layers.0.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 18 |
+
"encoder.layers.0.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 19 |
+
"encoder.layers.0.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 20 |
+
"encoder.layers.0.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 21 |
+
"encoder.layers.0.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 22 |
+
"encoder.layers.0.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 23 |
+
"encoder.layers.0.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 24 |
+
"encoder.layers.0.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 25 |
+
"encoder.layers.0.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 26 |
+
"encoder.layers.0.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 27 |
+
"encoder.layers.0.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 28 |
+
"encoder.layers.0.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 29 |
+
"encoder.layers.0.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 30 |
+
"encoder.layers.0.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 31 |
+
"encoder.layers.0.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 32 |
+
"encoder.layers.0.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 33 |
+
"encoder.layers.0.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 34 |
+
"encoder.layers.0.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 35 |
+
"encoder.layers.0.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 36 |
+
"encoder.layers.0.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 37 |
+
"encoder.layers.0.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 38 |
+
"encoder.layers.0.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 39 |
+
"encoder.layers.0.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 40 |
+
"encoder.layers.0.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 41 |
+
"encoder.layers.0.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 42 |
+
"encoder.layers.1.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 43 |
+
"encoder.layers.1.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 44 |
+
"encoder.layers.1.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 45 |
+
"encoder.layers.1.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 46 |
+
"encoder.layers.1.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 47 |
+
"encoder.layers.1.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 48 |
+
"encoder.layers.1.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 49 |
+
"encoder.layers.1.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 50 |
+
"encoder.layers.1.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 51 |
+
"encoder.layers.1.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 52 |
+
"encoder.layers.1.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 53 |
+
"encoder.layers.1.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 54 |
+
"encoder.layers.1.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 55 |
+
"encoder.layers.1.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 56 |
+
"encoder.layers.1.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 57 |
+
"encoder.layers.1.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 58 |
+
"encoder.layers.1.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 59 |
+
"encoder.layers.1.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 60 |
+
"encoder.layers.1.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 61 |
+
"encoder.layers.1.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 62 |
+
"encoder.layers.1.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 63 |
+
"encoder.layers.1.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 64 |
+
"encoder.layers.1.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 65 |
+
"encoder.layers.1.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 66 |
+
"encoder.layers.1.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 67 |
+
"encoder.layers.1.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 68 |
+
"encoder.layers.1.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 69 |
+
"encoder.layers.1.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 70 |
+
"encoder.layers.1.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 71 |
+
"encoder.layers.1.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 72 |
+
"encoder.layers.1.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 73 |
+
"encoder.layers.1.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 74 |
+
"encoder.layers.1.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 75 |
+
"encoder.layers.10.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 76 |
+
"encoder.layers.10.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 77 |
+
"encoder.layers.10.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 78 |
+
"encoder.layers.10.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 79 |
+
"encoder.layers.10.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 80 |
+
"encoder.layers.10.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 81 |
+
"encoder.layers.10.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 82 |
+
"encoder.layers.10.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 83 |
+
"encoder.layers.10.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 84 |
+
"encoder.layers.10.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 85 |
+
"encoder.layers.10.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 86 |
+
"encoder.layers.10.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 87 |
+
"encoder.layers.10.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 88 |
+
"encoder.layers.10.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 89 |
+
"encoder.layers.10.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 90 |
+
"encoder.layers.10.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 91 |
+
"encoder.layers.10.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 92 |
+
"encoder.layers.10.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 93 |
+
"encoder.layers.10.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 94 |
+
"encoder.layers.10.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 95 |
+
"encoder.layers.10.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 96 |
+
"encoder.layers.10.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 97 |
+
"encoder.layers.10.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 98 |
+
"encoder.layers.10.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 99 |
+
"encoder.layers.10.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 100 |
+
"encoder.layers.10.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 101 |
+
"encoder.layers.10.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 102 |
+
"encoder.layers.10.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 103 |
+
"encoder.layers.10.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 104 |
+
"encoder.layers.10.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 105 |
+
"encoder.layers.10.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 106 |
+
"encoder.layers.10.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 107 |
+
"encoder.layers.10.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 108 |
+
"encoder.layers.11.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 109 |
+
"encoder.layers.11.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 110 |
+
"encoder.layers.11.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 111 |
+
"encoder.layers.11.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 112 |
+
"encoder.layers.11.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 113 |
+
"encoder.layers.11.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 114 |
+
"encoder.layers.11.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 115 |
+
"encoder.layers.11.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 116 |
+
"encoder.layers.11.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 117 |
+
"encoder.layers.11.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 118 |
+
"encoder.layers.11.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 119 |
+
"encoder.layers.11.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 120 |
+
"encoder.layers.11.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 121 |
+
"encoder.layers.11.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 122 |
+
"encoder.layers.11.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 123 |
+
"encoder.layers.11.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 124 |
+
"encoder.layers.11.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 125 |
+
"encoder.layers.11.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 126 |
+
"encoder.layers.11.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 127 |
+
"encoder.layers.11.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 128 |
+
"encoder.layers.11.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 129 |
+
"encoder.layers.11.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 130 |
+
"encoder.layers.11.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 131 |
+
"encoder.layers.11.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 132 |
+
"encoder.layers.11.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 133 |
+
"encoder.layers.11.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 134 |
+
"encoder.layers.11.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 135 |
+
"encoder.layers.11.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 136 |
+
"encoder.layers.11.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 137 |
+
"encoder.layers.11.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 138 |
+
"encoder.layers.11.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 139 |
+
"encoder.layers.11.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 140 |
+
"encoder.layers.11.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 141 |
+
"encoder.layers.12.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 142 |
+
"encoder.layers.12.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 143 |
+
"encoder.layers.12.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 144 |
+
"encoder.layers.12.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 145 |
+
"encoder.layers.12.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 146 |
+
"encoder.layers.12.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 147 |
+
"encoder.layers.12.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 148 |
+
"encoder.layers.12.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 149 |
+
"encoder.layers.12.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 150 |
+
"encoder.layers.12.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 151 |
+
"encoder.layers.12.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 152 |
+
"encoder.layers.12.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 153 |
+
"encoder.layers.12.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 154 |
+
"encoder.layers.12.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 155 |
+
"encoder.layers.12.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 156 |
+
"encoder.layers.12.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 157 |
+
"encoder.layers.12.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 158 |
+
"encoder.layers.12.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 159 |
+
"encoder.layers.12.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 160 |
+
"encoder.layers.12.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 161 |
+
"encoder.layers.12.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 162 |
+
"encoder.layers.12.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 163 |
+
"encoder.layers.12.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 164 |
+
"encoder.layers.12.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 165 |
+
"encoder.layers.12.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 166 |
+
"encoder.layers.12.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 167 |
+
"encoder.layers.12.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 168 |
+
"encoder.layers.12.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 169 |
+
"encoder.layers.12.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 170 |
+
"encoder.layers.12.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 171 |
+
"encoder.layers.12.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 172 |
+
"encoder.layers.12.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 173 |
+
"encoder.layers.12.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 174 |
+
"encoder.layers.13.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 175 |
+
"encoder.layers.13.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 176 |
+
"encoder.layers.13.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 177 |
+
"encoder.layers.13.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 178 |
+
"encoder.layers.13.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 179 |
+
"encoder.layers.13.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 180 |
+
"encoder.layers.13.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 181 |
+
"encoder.layers.13.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 182 |
+
"encoder.layers.13.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 183 |
+
"encoder.layers.13.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 184 |
+
"encoder.layers.13.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 185 |
+
"encoder.layers.13.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 186 |
+
"encoder.layers.13.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 187 |
+
"encoder.layers.13.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 188 |
+
"encoder.layers.13.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 189 |
+
"encoder.layers.13.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 190 |
+
"encoder.layers.13.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 191 |
+
"encoder.layers.13.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 192 |
+
"encoder.layers.13.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 193 |
+
"encoder.layers.13.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 194 |
+
"encoder.layers.13.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 195 |
+
"encoder.layers.13.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 196 |
+
"encoder.layers.13.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 197 |
+
"encoder.layers.13.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 198 |
+
"encoder.layers.13.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 199 |
+
"encoder.layers.13.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 200 |
+
"encoder.layers.13.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 201 |
+
"encoder.layers.13.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 202 |
+
"encoder.layers.13.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 203 |
+
"encoder.layers.13.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 204 |
+
"encoder.layers.13.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 205 |
+
"encoder.layers.13.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 206 |
+
"encoder.layers.13.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 207 |
+
"encoder.layers.14.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 208 |
+
"encoder.layers.14.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 209 |
+
"encoder.layers.14.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 210 |
+
"encoder.layers.14.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 211 |
+
"encoder.layers.14.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 212 |
+
"encoder.layers.14.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 213 |
+
"encoder.layers.14.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 214 |
+
"encoder.layers.14.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 215 |
+
"encoder.layers.14.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 216 |
+
"encoder.layers.14.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 217 |
+
"encoder.layers.14.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 218 |
+
"encoder.layers.14.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 219 |
+
"encoder.layers.14.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 220 |
+
"encoder.layers.14.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 221 |
+
"encoder.layers.14.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 222 |
+
"encoder.layers.14.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 223 |
+
"encoder.layers.14.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 224 |
+
"encoder.layers.14.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 225 |
+
"encoder.layers.14.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 226 |
+
"encoder.layers.14.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 227 |
+
"encoder.layers.14.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 228 |
+
"encoder.layers.14.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 229 |
+
"encoder.layers.14.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 230 |
+
"encoder.layers.14.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 231 |
+
"encoder.layers.14.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 232 |
+
"encoder.layers.14.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 233 |
+
"encoder.layers.14.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 234 |
+
"encoder.layers.14.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 235 |
+
"encoder.layers.14.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 236 |
+
"encoder.layers.14.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 237 |
+
"encoder.layers.14.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 238 |
+
"encoder.layers.14.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 239 |
+
"encoder.layers.14.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 240 |
+
"encoder.layers.15.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 241 |
+
"encoder.layers.15.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 242 |
+
"encoder.layers.15.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 243 |
+
"encoder.layers.15.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 244 |
+
"encoder.layers.15.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 245 |
+
"encoder.layers.15.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 246 |
+
"encoder.layers.15.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 247 |
+
"encoder.layers.15.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 248 |
+
"encoder.layers.15.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 249 |
+
"encoder.layers.15.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 250 |
+
"encoder.layers.15.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 251 |
+
"encoder.layers.15.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 252 |
+
"encoder.layers.15.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 253 |
+
"encoder.layers.15.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 254 |
+
"encoder.layers.15.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 255 |
+
"encoder.layers.15.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 256 |
+
"encoder.layers.15.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 257 |
+
"encoder.layers.15.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 258 |
+
"encoder.layers.15.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 259 |
+
"encoder.layers.15.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 260 |
+
"encoder.layers.15.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 261 |
+
"encoder.layers.15.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 262 |
+
"encoder.layers.15.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 263 |
+
"encoder.layers.15.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 264 |
+
"encoder.layers.15.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 265 |
+
"encoder.layers.15.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 266 |
+
"encoder.layers.15.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 267 |
+
"encoder.layers.15.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 268 |
+
"encoder.layers.15.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 269 |
+
"encoder.layers.15.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 270 |
+
"encoder.layers.15.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 271 |
+
"encoder.layers.15.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 272 |
+
"encoder.layers.15.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 273 |
+
"encoder.layers.2.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 274 |
+
"encoder.layers.2.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 275 |
+
"encoder.layers.2.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 276 |
+
"encoder.layers.2.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 277 |
+
"encoder.layers.2.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 278 |
+
"encoder.layers.2.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 279 |
+
"encoder.layers.2.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 280 |
+
"encoder.layers.2.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 281 |
+
"encoder.layers.2.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 282 |
+
"encoder.layers.2.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 283 |
+
"encoder.layers.2.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 284 |
+
"encoder.layers.2.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 285 |
+
"encoder.layers.2.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 286 |
+
"encoder.layers.2.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 287 |
+
"encoder.layers.2.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 288 |
+
"encoder.layers.2.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 289 |
+
"encoder.layers.2.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 290 |
+
"encoder.layers.2.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 291 |
+
"encoder.layers.2.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 292 |
+
"encoder.layers.2.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 293 |
+
"encoder.layers.2.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 294 |
+
"encoder.layers.2.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 295 |
+
"encoder.layers.2.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 296 |
+
"encoder.layers.2.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 297 |
+
"encoder.layers.2.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 298 |
+
"encoder.layers.2.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 299 |
+
"encoder.layers.2.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 300 |
+
"encoder.layers.2.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 301 |
+
"encoder.layers.2.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 302 |
+
"encoder.layers.2.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 303 |
+
"encoder.layers.2.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 304 |
+
"encoder.layers.2.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 305 |
+
"encoder.layers.2.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 306 |
+
"encoder.layers.3.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 307 |
+
"encoder.layers.3.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 308 |
+
"encoder.layers.3.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 309 |
+
"encoder.layers.3.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 310 |
+
"encoder.layers.3.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 311 |
+
"encoder.layers.3.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 312 |
+
"encoder.layers.3.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 313 |
+
"encoder.layers.3.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 314 |
+
"encoder.layers.3.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 315 |
+
"encoder.layers.3.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 316 |
+
"encoder.layers.3.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 317 |
+
"encoder.layers.3.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 318 |
+
"encoder.layers.3.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 319 |
+
"encoder.layers.3.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 320 |
+
"encoder.layers.3.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 321 |
+
"encoder.layers.3.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 322 |
+
"encoder.layers.3.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 323 |
+
"encoder.layers.3.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 324 |
+
"encoder.layers.3.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 325 |
+
"encoder.layers.3.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 326 |
+
"encoder.layers.3.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 327 |
+
"encoder.layers.3.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 328 |
+
"encoder.layers.3.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 329 |
+
"encoder.layers.3.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 330 |
+
"encoder.layers.3.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 331 |
+
"encoder.layers.3.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 332 |
+
"encoder.layers.3.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 333 |
+
"encoder.layers.3.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 334 |
+
"encoder.layers.3.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 335 |
+
"encoder.layers.3.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 336 |
+
"encoder.layers.3.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 337 |
+
"encoder.layers.3.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 338 |
+
"encoder.layers.3.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 339 |
+
"encoder.layers.4.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 340 |
+
"encoder.layers.4.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 341 |
+
"encoder.layers.4.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 342 |
+
"encoder.layers.4.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 343 |
+
"encoder.layers.4.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 344 |
+
"encoder.layers.4.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 345 |
+
"encoder.layers.4.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 346 |
+
"encoder.layers.4.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 347 |
+
"encoder.layers.4.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 348 |
+
"encoder.layers.4.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 349 |
+
"encoder.layers.4.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 350 |
+
"encoder.layers.4.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 351 |
+
"encoder.layers.4.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 352 |
+
"encoder.layers.4.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 353 |
+
"encoder.layers.4.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 354 |
+
"encoder.layers.4.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 355 |
+
"encoder.layers.4.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 356 |
+
"encoder.layers.4.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 357 |
+
"encoder.layers.4.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 358 |
+
"encoder.layers.4.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 359 |
+
"encoder.layers.4.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 360 |
+
"encoder.layers.4.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 361 |
+
"encoder.layers.4.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 362 |
+
"encoder.layers.4.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 363 |
+
"encoder.layers.4.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 364 |
+
"encoder.layers.4.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 365 |
+
"encoder.layers.4.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 366 |
+
"encoder.layers.4.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 367 |
+
"encoder.layers.4.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 368 |
+
"encoder.layers.4.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 369 |
+
"encoder.layers.4.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 370 |
+
"encoder.layers.4.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 371 |
+
"encoder.layers.4.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 372 |
+
"encoder.layers.5.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 373 |
+
"encoder.layers.5.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 374 |
+
"encoder.layers.5.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 375 |
+
"encoder.layers.5.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 376 |
+
"encoder.layers.5.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 377 |
+
"encoder.layers.5.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 378 |
+
"encoder.layers.5.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 379 |
+
"encoder.layers.5.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 380 |
+
"encoder.layers.5.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 381 |
+
"encoder.layers.5.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 382 |
+
"encoder.layers.5.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 383 |
+
"encoder.layers.5.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 384 |
+
"encoder.layers.5.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 385 |
+
"encoder.layers.5.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 386 |
+
"encoder.layers.5.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 387 |
+
"encoder.layers.5.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 388 |
+
"encoder.layers.5.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 389 |
+
"encoder.layers.5.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 390 |
+
"encoder.layers.5.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 391 |
+
"encoder.layers.5.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 392 |
+
"encoder.layers.5.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 393 |
+
"encoder.layers.5.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 394 |
+
"encoder.layers.5.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 395 |
+
"encoder.layers.5.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 396 |
+
"encoder.layers.5.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 397 |
+
"encoder.layers.5.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 398 |
+
"encoder.layers.5.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 399 |
+
"encoder.layers.5.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 400 |
+
"encoder.layers.5.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 401 |
+
"encoder.layers.5.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 402 |
+
"encoder.layers.5.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 403 |
+
"encoder.layers.5.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 404 |
+
"encoder.layers.5.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 405 |
+
"encoder.layers.6.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 406 |
+
"encoder.layers.6.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 407 |
+
"encoder.layers.6.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 408 |
+
"encoder.layers.6.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 409 |
+
"encoder.layers.6.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 410 |
+
"encoder.layers.6.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 411 |
+
"encoder.layers.6.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 412 |
+
"encoder.layers.6.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 413 |
+
"encoder.layers.6.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 414 |
+
"encoder.layers.6.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 415 |
+
"encoder.layers.6.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 416 |
+
"encoder.layers.6.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 417 |
+
"encoder.layers.6.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 418 |
+
"encoder.layers.6.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 419 |
+
"encoder.layers.6.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 420 |
+
"encoder.layers.6.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 421 |
+
"encoder.layers.6.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 422 |
+
"encoder.layers.6.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 423 |
+
"encoder.layers.6.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 424 |
+
"encoder.layers.6.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 425 |
+
"encoder.layers.6.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 426 |
+
"encoder.layers.6.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 427 |
+
"encoder.layers.6.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 428 |
+
"encoder.layers.6.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 429 |
+
"encoder.layers.6.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 430 |
+
"encoder.layers.6.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 431 |
+
"encoder.layers.6.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 432 |
+
"encoder.layers.6.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 433 |
+
"encoder.layers.6.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 434 |
+
"encoder.layers.6.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 435 |
+
"encoder.layers.6.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 436 |
+
"encoder.layers.6.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 437 |
+
"encoder.layers.6.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 438 |
+
"encoder.layers.7.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 439 |
+
"encoder.layers.7.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 440 |
+
"encoder.layers.7.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 441 |
+
"encoder.layers.7.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 442 |
+
"encoder.layers.7.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 443 |
+
"encoder.layers.7.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 444 |
+
"encoder.layers.7.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 445 |
+
"encoder.layers.7.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 446 |
+
"encoder.layers.7.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 447 |
+
"encoder.layers.7.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 448 |
+
"encoder.layers.7.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 449 |
+
"encoder.layers.7.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 450 |
+
"encoder.layers.7.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 451 |
+
"encoder.layers.7.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 452 |
+
"encoder.layers.7.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 453 |
+
"encoder.layers.7.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 454 |
+
"encoder.layers.7.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 455 |
+
"encoder.layers.7.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 456 |
+
"encoder.layers.7.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 457 |
+
"encoder.layers.7.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 458 |
+
"encoder.layers.7.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 459 |
+
"encoder.layers.7.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 460 |
+
"encoder.layers.7.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 461 |
+
"encoder.layers.7.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 462 |
+
"encoder.layers.7.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 463 |
+
"encoder.layers.7.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 464 |
+
"encoder.layers.7.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 465 |
+
"encoder.layers.7.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 466 |
+
"encoder.layers.7.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 467 |
+
"encoder.layers.7.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 468 |
+
"encoder.layers.7.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 469 |
+
"encoder.layers.7.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 470 |
+
"encoder.layers.7.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 471 |
+
"encoder.layers.8.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 472 |
+
"encoder.layers.8.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 473 |
+
"encoder.layers.8.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 474 |
+
"encoder.layers.8.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 475 |
+
"encoder.layers.8.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 476 |
+
"encoder.layers.8.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 477 |
+
"encoder.layers.8.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 478 |
+
"encoder.layers.8.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 479 |
+
"encoder.layers.8.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 480 |
+
"encoder.layers.8.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 481 |
+
"encoder.layers.8.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 482 |
+
"encoder.layers.8.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 483 |
+
"encoder.layers.8.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 484 |
+
"encoder.layers.8.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 485 |
+
"encoder.layers.8.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 486 |
+
"encoder.layers.8.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 487 |
+
"encoder.layers.8.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 488 |
+
"encoder.layers.8.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 489 |
+
"encoder.layers.8.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 490 |
+
"encoder.layers.8.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 491 |
+
"encoder.layers.8.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 492 |
+
"encoder.layers.8.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 493 |
+
"encoder.layers.8.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 494 |
+
"encoder.layers.8.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 495 |
+
"encoder.layers.8.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 496 |
+
"encoder.layers.8.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 497 |
+
"encoder.layers.8.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 498 |
+
"encoder.layers.8.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 499 |
+
"encoder.layers.8.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 500 |
+
"encoder.layers.8.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 501 |
+
"encoder.layers.8.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 502 |
+
"encoder.layers.8.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 503 |
+
"encoder.layers.8.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 504 |
+
"encoder.layers.9.attn.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 505 |
+
"encoder.layers.9.attn.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 506 |
+
"encoder.layers.9.attn.rel_pos_emb.weight": "model-00001-of-00002.safetensors",
|
| 507 |
+
"encoder.layers.9.attn.to_kv.weight": "model-00001-of-00002.safetensors",
|
| 508 |
+
"encoder.layers.9.attn.to_out.bias": "model-00001-of-00002.safetensors",
|
| 509 |
+
"encoder.layers.9.attn.to_out.weight": "model-00001-of-00002.safetensors",
|
| 510 |
+
"encoder.layers.9.attn.to_q.weight": "model-00001-of-00002.safetensors",
|
| 511 |
+
"encoder.layers.9.conv.batch_norm.bias": "model-00001-of-00002.safetensors",
|
| 512 |
+
"encoder.layers.9.conv.batch_norm.num_batches_tracked": "model-00001-of-00002.safetensors",
|
| 513 |
+
"encoder.layers.9.conv.batch_norm.running_mean": "model-00001-of-00002.safetensors",
|
| 514 |
+
"encoder.layers.9.conv.batch_norm.running_var": "model-00001-of-00002.safetensors",
|
| 515 |
+
"encoder.layers.9.conv.batch_norm.weight": "model-00001-of-00002.safetensors",
|
| 516 |
+
"encoder.layers.9.conv.depth_conv.conv.weight": "model-00001-of-00002.safetensors",
|
| 517 |
+
"encoder.layers.9.conv.down_conv.bias": "model-00001-of-00002.safetensors",
|
| 518 |
+
"encoder.layers.9.conv.down_conv.weight": "model-00001-of-00002.safetensors",
|
| 519 |
+
"encoder.layers.9.conv.norm.bias": "model-00001-of-00002.safetensors",
|
| 520 |
+
"encoder.layers.9.conv.norm.weight": "model-00001-of-00002.safetensors",
|
| 521 |
+
"encoder.layers.9.conv.up_conv.bias": "model-00001-of-00002.safetensors",
|
| 522 |
+
"encoder.layers.9.conv.up_conv.weight": "model-00001-of-00002.safetensors",
|
| 523 |
+
"encoder.layers.9.ff1.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 524 |
+
"encoder.layers.9.ff1.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 525 |
+
"encoder.layers.9.ff1.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 526 |
+
"encoder.layers.9.ff1.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 527 |
+
"encoder.layers.9.ff1.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 528 |
+
"encoder.layers.9.ff1.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 529 |
+
"encoder.layers.9.ff2.down_proj.bias": "model-00001-of-00002.safetensors",
|
| 530 |
+
"encoder.layers.9.ff2.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 531 |
+
"encoder.layers.9.ff2.pre_norm.bias": "model-00001-of-00002.safetensors",
|
| 532 |
+
"encoder.layers.9.ff2.pre_norm.weight": "model-00001-of-00002.safetensors",
|
| 533 |
+
"encoder.layers.9.ff2.up_proj.bias": "model-00001-of-00002.safetensors",
|
| 534 |
+
"encoder.layers.9.ff2.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 535 |
+
"encoder.layers.9.post_norm.bias": "model-00001-of-00002.safetensors",
|
| 536 |
+
"encoder.layers.9.post_norm.weight": "model-00001-of-00002.safetensors",
|
| 537 |
+
"encoder.out.bias": "model-00001-of-00002.safetensors",
|
| 538 |
+
"encoder.out.weight": "model-00001-of-00002.safetensors",
|
| 539 |
+
"encoder.out_mid.bias": "model-00001-of-00002.safetensors",
|
| 540 |
+
"encoder.out_mid.weight": "model-00001-of-00002.safetensors",
|
| 541 |
+
"llm.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
| 542 |
+
"llm.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 543 |
+
"llm.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 544 |
+
"llm.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 545 |
+
"llm.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 546 |
+
"llm.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 547 |
+
"llm.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 548 |
+
"llm.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 549 |
+
"llm.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 550 |
+
"llm.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 551 |
+
"llm.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 552 |
+
"llm.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 553 |
+
"llm.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 554 |
+
"llm.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 555 |
+
"llm.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 556 |
+
"llm.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 557 |
+
"llm.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 558 |
+
"llm.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 559 |
+
"llm.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 560 |
+
"llm.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 561 |
+
"llm.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 562 |
+
"llm.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 563 |
+
"llm.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 564 |
+
"llm.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 565 |
+
"llm.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 566 |
+
"llm.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 567 |
+
"llm.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 568 |
+
"llm.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 569 |
+
"llm.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 570 |
+
"llm.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 571 |
+
"llm.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 572 |
+
"llm.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 573 |
+
"llm.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 574 |
+
"llm.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 575 |
+
"llm.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 576 |
+
"llm.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 577 |
+
"llm.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 578 |
+
"llm.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 579 |
+
"llm.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 580 |
+
"llm.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 581 |
+
"llm.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 582 |
+
"llm.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 583 |
+
"llm.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 584 |
+
"llm.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 585 |
+
"llm.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 586 |
+
"llm.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 587 |
+
"llm.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 588 |
+
"llm.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 589 |
+
"llm.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 590 |
+
"llm.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 591 |
+
"llm.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 592 |
+
"llm.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 593 |
+
"llm.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 594 |
+
"llm.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 595 |
+
"llm.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 596 |
+
"llm.model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 597 |
+
"llm.model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 598 |
+
"llm.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 599 |
+
"llm.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 600 |
+
"llm.model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 601 |
+
"llm.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 602 |
+
"llm.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 603 |
+
"llm.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 604 |
+
"llm.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 605 |
+
"llm.model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 606 |
+
"llm.model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 607 |
+
"llm.model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 608 |
+
"llm.model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 609 |
+
"llm.model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 610 |
+
"llm.model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 611 |
+
"llm.model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 612 |
+
"llm.model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 613 |
+
"llm.model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 614 |
+
"llm.model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 615 |
+
"llm.model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 616 |
+
"llm.model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 617 |
+
"llm.model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 618 |
+
"llm.model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 619 |
+
"llm.model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 620 |
+
"llm.model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 621 |
+
"llm.model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 622 |
+
"llm.model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 623 |
+
"llm.model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 624 |
+
"llm.model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 625 |
+
"llm.model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 626 |
+
"llm.model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 627 |
+
"llm.model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 628 |
+
"llm.model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 629 |
+
"llm.model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 630 |
+
"llm.model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 631 |
+
"llm.model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 632 |
+
"llm.model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 633 |
+
"llm.model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 634 |
+
"llm.model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 635 |
+
"llm.model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 636 |
+
"llm.model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 637 |
+
"llm.model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 638 |
+
"llm.model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 639 |
+
"llm.model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 640 |
+
"llm.model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 641 |
+
"llm.model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 642 |
+
"llm.model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 643 |
+
"llm.model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 644 |
+
"llm.model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 645 |
+
"llm.model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 646 |
+
"llm.model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 647 |
+
"llm.model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 648 |
+
"llm.model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 649 |
+
"llm.model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 650 |
+
"llm.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 651 |
+
"llm.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 652 |
+
"llm.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 653 |
+
"llm.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 654 |
+
"llm.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 655 |
+
"llm.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 656 |
+
"llm.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 657 |
+
"llm.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 658 |
+
"llm.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 659 |
+
"llm.model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 660 |
+
"llm.model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 661 |
+
"llm.model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 662 |
+
"llm.model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 663 |
+
"llm.model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 664 |
+
"llm.model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 665 |
+
"llm.model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 666 |
+
"llm.model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 667 |
+
"llm.model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 668 |
+
"llm.model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 669 |
+
"llm.model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 670 |
+
"llm.model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 671 |
+
"llm.model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 672 |
+
"llm.model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 673 |
+
"llm.model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 674 |
+
"llm.model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 675 |
+
"llm.model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 676 |
+
"llm.model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 677 |
+
"llm.model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 678 |
+
"llm.model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 679 |
+
"llm.model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 680 |
+
"llm.model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 681 |
+
"llm.model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 682 |
+
"llm.model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 683 |
+
"llm.model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 684 |
+
"llm.model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 685 |
+
"llm.model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 686 |
+
"llm.model.layers.23.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 687 |
+
"llm.model.layers.23.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 688 |
+
"llm.model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 689 |
+
"llm.model.layers.23.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 690 |
+
"llm.model.layers.23.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 691 |
+
"llm.model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 692 |
+
"llm.model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 693 |
+
"llm.model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 694 |
+
"llm.model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 695 |
+
"llm.model.layers.24.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 696 |
+
"llm.model.layers.24.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 697 |
+
"llm.model.layers.24.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 698 |
+
"llm.model.layers.24.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 699 |
+
"llm.model.layers.24.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 700 |
+
"llm.model.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 701 |
+
"llm.model.layers.24.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 702 |
+
"llm.model.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 703 |
+
"llm.model.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 704 |
+
"llm.model.layers.25.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 705 |
+
"llm.model.layers.25.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 706 |
+
"llm.model.layers.25.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 707 |
+
"llm.model.layers.25.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 708 |
+
"llm.model.layers.25.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 709 |
+
"llm.model.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 710 |
+
"llm.model.layers.25.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 711 |
+
"llm.model.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 712 |
+
"llm.model.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 713 |
+
"llm.model.layers.26.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 714 |
+
"llm.model.layers.26.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 715 |
+
"llm.model.layers.26.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 716 |
+
"llm.model.layers.26.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 717 |
+
"llm.model.layers.26.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 718 |
+
"llm.model.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 719 |
+
"llm.model.layers.26.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 720 |
+
"llm.model.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 721 |
+
"llm.model.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 722 |
+
"llm.model.layers.27.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 723 |
+
"llm.model.layers.27.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 724 |
+
"llm.model.layers.27.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 725 |
+
"llm.model.layers.27.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 726 |
+
"llm.model.layers.27.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 727 |
+
"llm.model.layers.27.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 728 |
+
"llm.model.layers.27.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 729 |
+
"llm.model.layers.27.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 730 |
+
"llm.model.layers.27.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 731 |
+
"llm.model.layers.28.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 732 |
+
"llm.model.layers.28.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 733 |
+
"llm.model.layers.28.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 734 |
+
"llm.model.layers.28.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 735 |
+
"llm.model.layers.28.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 736 |
+
"llm.model.layers.28.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 737 |
+
"llm.model.layers.28.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 738 |
+
"llm.model.layers.28.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 739 |
+
"llm.model.layers.28.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 740 |
+
"llm.model.layers.29.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 741 |
+
"llm.model.layers.29.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 742 |
+
"llm.model.layers.29.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 743 |
+
"llm.model.layers.29.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 744 |
+
"llm.model.layers.29.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 745 |
+
"llm.model.layers.29.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 746 |
+
"llm.model.layers.29.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 747 |
+
"llm.model.layers.29.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 748 |
+
"llm.model.layers.29.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 749 |
+
"llm.model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 750 |
+
"llm.model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 751 |
+
"llm.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 752 |
+
"llm.model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 753 |
+
"llm.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 754 |
+
"llm.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 755 |
+
"llm.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 756 |
+
"llm.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 757 |
+
"llm.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 758 |
+
"llm.model.layers.30.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 759 |
+
"llm.model.layers.30.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 760 |
+
"llm.model.layers.30.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 761 |
+
"llm.model.layers.30.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 762 |
+
"llm.model.layers.30.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 763 |
+
"llm.model.layers.30.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 764 |
+
"llm.model.layers.30.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 765 |
+
"llm.model.layers.30.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 766 |
+
"llm.model.layers.30.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 767 |
+
"llm.model.layers.31.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 768 |
+
"llm.model.layers.31.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 769 |
+
"llm.model.layers.31.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 770 |
+
"llm.model.layers.31.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 771 |
+
"llm.model.layers.31.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 772 |
+
"llm.model.layers.31.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 773 |
+
"llm.model.layers.31.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 774 |
+
"llm.model.layers.31.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 775 |
+
"llm.model.layers.31.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 776 |
+
"llm.model.layers.32.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 777 |
+
"llm.model.layers.32.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 778 |
+
"llm.model.layers.32.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 779 |
+
"llm.model.layers.32.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 780 |
+
"llm.model.layers.32.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 781 |
+
"llm.model.layers.32.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 782 |
+
"llm.model.layers.32.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 783 |
+
"llm.model.layers.32.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 784 |
+
"llm.model.layers.32.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 785 |
+
"llm.model.layers.33.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 786 |
+
"llm.model.layers.33.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 787 |
+
"llm.model.layers.33.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 788 |
+
"llm.model.layers.33.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 789 |
+
"llm.model.layers.33.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 790 |
+
"llm.model.layers.33.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 791 |
+
"llm.model.layers.33.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 792 |
+
"llm.model.layers.33.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 793 |
+
"llm.model.layers.33.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 794 |
+
"llm.model.layers.34.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 795 |
+
"llm.model.layers.34.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 796 |
+
"llm.model.layers.34.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 797 |
+
"llm.model.layers.34.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 798 |
+
"llm.model.layers.34.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 799 |
+
"llm.model.layers.34.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 800 |
+
"llm.model.layers.34.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 801 |
+
"llm.model.layers.34.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 802 |
+
"llm.model.layers.34.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 803 |
+
"llm.model.layers.35.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 804 |
+
"llm.model.layers.35.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 805 |
+
"llm.model.layers.35.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 806 |
+
"llm.model.layers.35.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 807 |
+
"llm.model.layers.35.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 808 |
+
"llm.model.layers.35.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 809 |
+
"llm.model.layers.35.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 810 |
+
"llm.model.layers.35.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 811 |
+
"llm.model.layers.35.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 812 |
+
"llm.model.layers.36.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 813 |
+
"llm.model.layers.36.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 814 |
+
"llm.model.layers.36.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 815 |
+
"llm.model.layers.36.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 816 |
+
"llm.model.layers.36.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 817 |
+
"llm.model.layers.36.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 818 |
+
"llm.model.layers.36.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 819 |
+
"llm.model.layers.36.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 820 |
+
"llm.model.layers.36.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 821 |
+
"llm.model.layers.37.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 822 |
+
"llm.model.layers.37.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 823 |
+
"llm.model.layers.37.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 824 |
+
"llm.model.layers.37.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 825 |
+
"llm.model.layers.37.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 826 |
+
"llm.model.layers.37.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 827 |
+
"llm.model.layers.37.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 828 |
+
"llm.model.layers.37.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 829 |
+
"llm.model.layers.37.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 830 |
+
"llm.model.layers.38.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 831 |
+
"llm.model.layers.38.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 832 |
+
"llm.model.layers.38.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 833 |
+
"llm.model.layers.38.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 834 |
+
"llm.model.layers.38.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 835 |
+
"llm.model.layers.38.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 836 |
+
"llm.model.layers.38.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 837 |
+
"llm.model.layers.38.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 838 |
+
"llm.model.layers.38.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 839 |
+
"llm.model.layers.39.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 840 |
+
"llm.model.layers.39.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
| 841 |
+
"llm.model.layers.39.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 842 |
+
"llm.model.layers.39.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
| 843 |
+
"llm.model.layers.39.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
| 844 |
+
"llm.model.layers.39.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 845 |
+
"llm.model.layers.39.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 846 |
+
"llm.model.layers.39.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 847 |
+
"llm.model.layers.39.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 848 |
+
"llm.model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 849 |
+
"llm.model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 850 |
+
"llm.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 851 |
+
"llm.model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 852 |
+
"llm.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 853 |
+
"llm.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 854 |
+
"llm.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 855 |
+
"llm.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 856 |
+
"llm.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 857 |
+
"llm.model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 858 |
+
"llm.model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 859 |
+
"llm.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 860 |
+
"llm.model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 861 |
+
"llm.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 862 |
+
"llm.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 863 |
+
"llm.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 864 |
+
"llm.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 865 |
+
"llm.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 866 |
+
"llm.model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 867 |
+
"llm.model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 868 |
+
"llm.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 869 |
+
"llm.model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 870 |
+
"llm.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 871 |
+
"llm.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 872 |
+
"llm.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 873 |
+
"llm.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 874 |
+
"llm.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 875 |
+
"llm.model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 876 |
+
"llm.model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 877 |
+
"llm.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 878 |
+
"llm.model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 879 |
+
"llm.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 880 |
+
"llm.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 881 |
+
"llm.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 882 |
+
"llm.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 883 |
+
"llm.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 884 |
+
"llm.model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 885 |
+
"llm.model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 886 |
+
"llm.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 887 |
+
"llm.model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 888 |
+
"llm.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 889 |
+
"llm.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 890 |
+
"llm.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 891 |
+
"llm.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 892 |
+
"llm.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 893 |
+
"llm.model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 894 |
+
"llm.model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
| 895 |
+
"llm.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
| 896 |
+
"llm.model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
| 897 |
+
"llm.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
| 898 |
+
"llm.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
| 899 |
+
"llm.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
| 900 |
+
"llm.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
| 901 |
+
"llm.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
| 902 |
+
"llm.model.norm.weight": "model-00002-of-00002.safetensors",
|
| 903 |
+
"projector.layer_norms.0.bias": "model-00002-of-00002.safetensors",
|
| 904 |
+
"projector.layer_norms.0.weight": "model-00002-of-00002.safetensors",
|
| 905 |
+
"projector.layer_norms.1.bias": "model-00002-of-00002.safetensors",
|
| 906 |
+
"projector.layer_norms.1.weight": "model-00002-of-00002.safetensors",
|
| 907 |
+
"projector.layer_norms.2.bias": "model-00002-of-00002.safetensors",
|
| 908 |
+
"projector.layer_norms.2.weight": "model-00002-of-00002.safetensors",
|
| 909 |
+
"projector.layer_norms.3.bias": "model-00002-of-00002.safetensors",
|
| 910 |
+
"projector.layer_norms.3.weight": "model-00002-of-00002.safetensors",
|
| 911 |
+
"projector.layer_projector.bias": "model-00002-of-00002.safetensors",
|
| 912 |
+
"projector.layer_projector.weight": "model-00002-of-00002.safetensors",
|
| 913 |
+
"projector.out_linear.bias": "model-00002-of-00002.safetensors",
|
| 914 |
+
"projector.out_linear.weight": "model-00002-of-00002.safetensors",
|
| 915 |
+
"projector.out_norm.bias": "model-00002-of-00002.safetensors",
|
| 916 |
+
"projector.out_norm.weight": "model-00002-of-00002.safetensors",
|
| 917 |
+
"projector.qformer.layers.0.attn_norm.bias": "model-00002-of-00002.safetensors",
|
| 918 |
+
"projector.qformer.layers.0.attn_norm.weight": "model-00002-of-00002.safetensors",
|
| 919 |
+
"projector.qformer.layers.0.cross_attention.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 920 |
+
"projector.qformer.layers.0.cross_attention.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 921 |
+
"projector.qformer.layers.0.cross_attention.o_proj.bias": "model-00002-of-00002.safetensors",
|
| 922 |
+
"projector.qformer.layers.0.cross_attention.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 923 |
+
"projector.qformer.layers.0.cross_attention.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 924 |
+
"projector.qformer.layers.0.cross_attention.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 925 |
+
"projector.qformer.layers.0.cross_attention.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 926 |
+
"projector.qformer.layers.0.cross_attention.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 927 |
+
"projector.qformer.layers.0.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 928 |
+
"projector.qformer.layers.0.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 929 |
+
"projector.qformer.layers.0.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 930 |
+
"projector.qformer.layers.0.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 931 |
+
"projector.qformer.layers.0.mlp_norm.bias": "model-00002-of-00002.safetensors",
|
| 932 |
+
"projector.qformer.layers.0.mlp_norm.weight": "model-00002-of-00002.safetensors",
|
| 933 |
+
"projector.qformer.layers.1.attn_norm.bias": "model-00002-of-00002.safetensors",
|
| 934 |
+
"projector.qformer.layers.1.attn_norm.weight": "model-00002-of-00002.safetensors",
|
| 935 |
+
"projector.qformer.layers.1.cross_attention.k_proj.bias": "model-00002-of-00002.safetensors",
|
| 936 |
+
"projector.qformer.layers.1.cross_attention.k_proj.weight": "model-00002-of-00002.safetensors",
|
| 937 |
+
"projector.qformer.layers.1.cross_attention.o_proj.bias": "model-00002-of-00002.safetensors",
|
| 938 |
+
"projector.qformer.layers.1.cross_attention.o_proj.weight": "model-00002-of-00002.safetensors",
|
| 939 |
+
"projector.qformer.layers.1.cross_attention.q_proj.bias": "model-00002-of-00002.safetensors",
|
| 940 |
+
"projector.qformer.layers.1.cross_attention.q_proj.weight": "model-00002-of-00002.safetensors",
|
| 941 |
+
"projector.qformer.layers.1.cross_attention.v_proj.bias": "model-00002-of-00002.safetensors",
|
| 942 |
+
"projector.qformer.layers.1.cross_attention.v_proj.weight": "model-00002-of-00002.safetensors",
|
| 943 |
+
"projector.qformer.layers.1.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
| 944 |
+
"projector.qformer.layers.1.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
| 945 |
+
"projector.qformer.layers.1.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
| 946 |
+
"projector.qformer.layers.1.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
| 947 |
+
"projector.qformer.layers.1.mlp_norm.bias": "model-00002-of-00002.safetensors",
|
| 948 |
+
"projector.qformer.layers.1.mlp_norm.weight": "model-00002-of-00002.safetensors",
|
| 949 |
+
"projector.query": "model-00002-of-00002.safetensors",
|
| 950 |
+
"projector.window_positions": "model-00002-of-00002.safetensors"
|
| 951 |
+
}
|
| 952 |
+
}
|
modeling_conformer.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import math, torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from torch import nn
|
| 5 |
+
from .configuration_nle import NLEEncoderConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class NLEConformerFeedForward(nn.Module):
|
| 9 |
+
"""Feedforward module for conformer encoder blocks."""
|
| 10 |
+
|
| 11 |
+
def __init__(self, config: NLEEncoderConfig):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.pre_norm = nn.LayerNorm(config.hidden_dim)
|
| 14 |
+
self.up_proj = nn.Linear(config.hidden_dim, config.hidden_dim * config.feedforward_mult)
|
| 15 |
+
self.silu = nn.SiLU()
|
| 16 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 17 |
+
self.down_proj = nn.Linear(config.hidden_dim * config.feedforward_mult, config.hidden_dim)
|
| 18 |
+
|
| 19 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 20 |
+
hidden_states = self.pre_norm(hidden_states)
|
| 21 |
+
hidden_states = self.up_proj(hidden_states)
|
| 22 |
+
hidden_states = self.dropout(self.silu(hidden_states))
|
| 23 |
+
hidden_states = self.down_proj(hidden_states)
|
| 24 |
+
hidden_states = self.dropout(hidden_states)
|
| 25 |
+
return hidden_states
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class NLEConformerAttention(nn.Module):
|
| 29 |
+
"""Attention for conformer blocks using Shaw's relative positional embeddings.
|
| 30 |
+
See the following [paper](https://arxiv.org/pdf/1803.02155) for more details.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(self, config: NLEEncoderConfig):
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.config = config
|
| 36 |
+
seq = torch.arange(config.context_size)
|
| 37 |
+
relpos_dist = seq.view(-1, 1) - seq.view(1, -1)
|
| 38 |
+
attention_dists = torch.clamp(relpos_dist, -config.context_size, config.context_size) + config.max_pos_emb
|
| 39 |
+
self.register_buffer("attention_dists", attention_dists, persistent=False)
|
| 40 |
+
inner_dim = config.dim_head * config.num_heads
|
| 41 |
+
self.max_pos_emb = config.max_pos_emb
|
| 42 |
+
self.context_size = config.context_size
|
| 43 |
+
self.num_heads = config.num_heads
|
| 44 |
+
self.dim_head = config.dim_head
|
| 45 |
+
self.scale = self.dim_head**-0.5
|
| 46 |
+
self.pre_norm = nn.LayerNorm(config.hidden_dim)
|
| 47 |
+
self.to_q = nn.Linear(config.hidden_dim, inner_dim, bias=False)
|
| 48 |
+
self.to_kv = nn.Linear(config.hidden_dim, inner_dim * 2, bias=False)
|
| 49 |
+
self.to_out = nn.Linear(inner_dim, config.hidden_dim)
|
| 50 |
+
self.rel_pos_emb = nn.Embedding(2 * self.max_pos_emb + 1, self.dim_head)
|
| 51 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 52 |
+
|
| 53 |
+
if self.context_size <= 0 or self.context_size > self.max_pos_emb:
|
| 54 |
+
raise ValueError("Context size is either less than 0 or exceeds the max_pos_emb")
|
| 55 |
+
|
| 56 |
+
def forward(self, hidden_states: torch.Tensor,
|
| 57 |
+
attention_mask: torch.Tensor) -> torch.Tensor:
|
| 58 |
+
|
| 59 |
+
hidden_states = self.pre_norm(hidden_states)
|
| 60 |
+
bsz, num_features, _ = hidden_states.shape
|
| 61 |
+
num_blocks = math.ceil(num_features / self.context_size)
|
| 62 |
+
remainder = num_features % self.context_size
|
| 63 |
+
if self.config.old_encoder_mask:
|
| 64 |
+
attention_mask = torch.ones_like(attention_mask)
|
| 65 |
+
if remainder > 0:
|
| 66 |
+
# right padding to reach block size
|
| 67 |
+
hidden_states = torch.nn.functional.pad(hidden_states, (0, 0, 0, self.context_size - remainder))
|
| 68 |
+
attention_mask = torch.nn.functional.pad(attention_mask, (0, self.context_size - remainder))
|
| 69 |
+
|
| 70 |
+
query_states = self.to_q(hidden_states)
|
| 71 |
+
key_states, value_states = self.to_kv(hidden_states).chunk(2, dim=-1)
|
| 72 |
+
|
| 73 |
+
query_states = query_states.reshape(bsz, num_blocks, self.context_size, self.num_heads, -1).transpose(2, 3)
|
| 74 |
+
key_states = key_states.reshape(bsz, num_blocks, self.context_size, self.num_heads, -1).transpose(2, 3)
|
| 75 |
+
value_states = value_states.reshape(bsz, num_blocks, self.context_size, self.num_heads, -1).transpose(2, 3)
|
| 76 |
+
dist = self.attention_dists.to(hidden_states.device)
|
| 77 |
+
rel_pos_emb = self.rel_pos_emb(dist).to(query_states.dtype)
|
| 78 |
+
pos_attn = torch.einsum('b m h c d, c r d -> b m h c r', query_states, rel_pos_emb) * self.scale
|
| 79 |
+
mask_value = -torch.finfo(pos_attn.dtype).max
|
| 80 |
+
expanded_attention_mask = attention_mask.reshape(bsz, num_blocks, 1, 1, -1)
|
| 81 |
+
pos_attn.masked_fill_(~expanded_attention_mask, mask_value)
|
| 82 |
+
|
| 83 |
+
with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH):
|
| 84 |
+
out = F.scaled_dot_product_attention(
|
| 85 |
+
query_states, key_states, value_states, attn_mask=pos_attn, scale=self.scale
|
| 86 |
+
)
|
| 87 |
+
out = out.transpose(2, 3).reshape(bsz, hidden_states.shape[1], -1)
|
| 88 |
+
out = self.to_out(out[:, :num_features, :])
|
| 89 |
+
return self.dropout(out)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class NLEConformerDepthWiseConv1d(nn.Module):
|
| 93 |
+
"""Wrapper for padded 1D pointwise convolution."""
|
| 94 |
+
|
| 95 |
+
def __init__(self, chan_in: int, chan_out: int, kernel_size: int):
|
| 96 |
+
super().__init__()
|
| 97 |
+
# Padding for the 1D conv is symmetric or close (i.e., offset by one).
|
| 98 |
+
pad = kernel_size // 2
|
| 99 |
+
pad_offset = (kernel_size + 1) % 2
|
| 100 |
+
self.padding = (pad, pad - pad_offset)
|
| 101 |
+
|
| 102 |
+
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in, bias=False)
|
| 103 |
+
|
| 104 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 105 |
+
hidden_states = F.pad(hidden_states, self.padding)
|
| 106 |
+
return self.conv(hidden_states)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class NLEConformerConvModule(nn.Module):
|
| 110 |
+
"""Conformer conv module consisting of several 1D/depthwise 1D convolutional layers."""
|
| 111 |
+
|
| 112 |
+
def __init__(self, config: NLEEncoderConfig):
|
| 113 |
+
super().__init__()
|
| 114 |
+
inner_dim = config.hidden_dim * config.conv_expansion_factor
|
| 115 |
+
|
| 116 |
+
self.norm = nn.LayerNorm(config.hidden_dim)
|
| 117 |
+
self.up_conv = nn.Conv1d(config.hidden_dim, inner_dim * 2, 1)
|
| 118 |
+
self.glu = nn.GLU(dim=1)
|
| 119 |
+
self.depth_conv = NLEConformerDepthWiseConv1d(
|
| 120 |
+
inner_dim,
|
| 121 |
+
inner_dim,
|
| 122 |
+
kernel_size=config.conv_kernel_size,
|
| 123 |
+
)
|
| 124 |
+
self.silu = nn.SiLU()
|
| 125 |
+
self.batch_norm = nn.BatchNorm1d(inner_dim)
|
| 126 |
+
self.down_conv = nn.Conv1d(inner_dim, config.hidden_dim, 1)
|
| 127 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 128 |
+
|
| 129 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| 130 |
+
hidden_states = self.norm(hidden_states)
|
| 131 |
+
hidden_states = self.up_conv(hidden_states.permute(0, 2, 1))
|
| 132 |
+
hidden_states = self.glu(hidden_states)
|
| 133 |
+
hidden_states = self.depth_conv(hidden_states)
|
| 134 |
+
hidden_states = self.silu(self.batch_norm(hidden_states))
|
| 135 |
+
hidden_states = self.down_conv(hidden_states).permute(0, 2, 1)
|
| 136 |
+
hidden_states = self.dropout(hidden_states)
|
| 137 |
+
return hidden_states
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class NLEConformerBlock(nn.Module):
|
| 141 |
+
"""Conformer block, consisting largely of linear layers, attention, and convolutional layers."""
|
| 142 |
+
|
| 143 |
+
def __init__(self, config: NLEEncoderConfig):
|
| 144 |
+
super().__init__()
|
| 145 |
+
self.ff1 = NLEConformerFeedForward(config)
|
| 146 |
+
self.attn = NLEConformerAttention(config)
|
| 147 |
+
self.conv = NLEConformerConvModule(config)
|
| 148 |
+
self.ff2 = NLEConformerFeedForward(config)
|
| 149 |
+
self.post_norm = nn.LayerNorm(config.hidden_dim)
|
| 150 |
+
|
| 151 |
+
def forward(self, hidden_states: torch.Tensor,
|
| 152 |
+
attention_mask: torch.Tensor) -> torch.Tensor:
|
| 153 |
+
hidden_states = 0.5 * self.ff1(hidden_states) + hidden_states
|
| 154 |
+
hidden_states = self.attn(hidden_states,
|
| 155 |
+
attention_mask=attention_mask) + hidden_states
|
| 156 |
+
hidden_states = self.conv(hidden_states) + hidden_states
|
| 157 |
+
hidden_states = 0.5 * self.ff2(hidden_states) + hidden_states
|
| 158 |
+
hidden_states = self.post_norm(hidden_states)
|
| 159 |
+
return hidden_states
|
modeling_ctc.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
from transformers.modeling_outputs import ModelOutput
|
| 7 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 8 |
+
from .configuration_nle import NLEEncoderConfig
|
| 9 |
+
from .modeling_conformer import NLEConformerBlock
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass
|
| 13 |
+
class NLEEncoderOutput(ModelOutput):
|
| 14 |
+
logits: Optional[torch.FloatTensor] = None
|
| 15 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 16 |
+
all_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class NLECTCEncoder(PreTrainedModel):
|
| 20 |
+
config_class = NLEEncoderConfig
|
| 21 |
+
|
| 22 |
+
def __init__(self, config: NLEEncoderConfig):
|
| 23 |
+
super().__init__(config)
|
| 24 |
+
self.config = config
|
| 25 |
+
self.input_linear = nn.Linear(config.input_dim, config.hidden_dim, bias=True)
|
| 26 |
+
self.layers = nn.ModuleList([NLEConformerBlock(config) for _ in range(config.num_layers)])
|
| 27 |
+
self.out = nn.Linear(config.hidden_dim, config.output_dim, bias=True)
|
| 28 |
+
self.out_mid = nn.Linear(config.output_dim, config.hidden_dim, bias=True)
|
| 29 |
+
self.dropout = nn.Dropout(config.pred_dropout)
|
| 30 |
+
|
| 31 |
+
self.post_init()
|
| 32 |
+
|
| 33 |
+
def _init_weights(self, module: nn.Module):
|
| 34 |
+
std = self.config.initializer_range
|
| 35 |
+
if isinstance(module, (nn.Linear, nn.Conv1d)):
|
| 36 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 37 |
+
if module.bias is not None:
|
| 38 |
+
module.bias.data.zero_()
|
| 39 |
+
elif isinstance(module, nn.Embedding):
|
| 40 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 41 |
+
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)):
|
| 42 |
+
module.weight.data.fill_(1.0)
|
| 43 |
+
module.bias.data.zero_()
|
| 44 |
+
|
| 45 |
+
def forward(
|
| 46 |
+
self,
|
| 47 |
+
input_features: torch.Tensor,
|
| 48 |
+
attention_mask: Optional[torch.Tensor] = None, # [B, T_enc] bool after stacking
|
| 49 |
+
output_hidden_states: Optional[bool] = None,
|
| 50 |
+
) -> NLEEncoderOutput:
|
| 51 |
+
|
| 52 |
+
inputs_embeds = input_features
|
| 53 |
+
if attention_mask is None:
|
| 54 |
+
mask_shape = inputs_embeds.shape[:-1]
|
| 55 |
+
attention_mask = torch.ones(mask_shape, dtype=bool, device=inputs_embeds.device)
|
| 56 |
+
|
| 57 |
+
output_hidden_states = (
|
| 58 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
hidden_states = self.input_linear(inputs_embeds.to(self.dtype))
|
| 62 |
+
all_hidden_states = (hidden_states,) if output_hidden_states else None
|
| 63 |
+
|
| 64 |
+
for idx, layer in enumerate(self.layers, start=1):
|
| 65 |
+
hidden_states = layer(hidden_states, attention_mask=attention_mask)
|
| 66 |
+
|
| 67 |
+
if idx == self.config.self_conditioning_layer:
|
| 68 |
+
logits_mid_plain = self.out(self.dropout(hidden_states))
|
| 69 |
+
probs_mid = torch.softmax(logits_mid_plain, dim=-1)
|
| 70 |
+
hidden_states = hidden_states + self.out_mid(probs_mid)
|
| 71 |
+
|
| 72 |
+
if output_hidden_states:
|
| 73 |
+
all_hidden_states += (hidden_states,)
|
| 74 |
+
|
| 75 |
+
hidden_states = self.dropout(hidden_states)
|
| 76 |
+
logits_plain = self.out(hidden_states)
|
| 77 |
+
logits = torch.log_softmax(logits_plain, dim=-1)
|
| 78 |
+
|
| 79 |
+
return NLEEncoderOutput(
|
| 80 |
+
logits=logits,
|
| 81 |
+
last_hidden_state=hidden_states,
|
| 82 |
+
all_hidden_states=all_hidden_states
|
| 83 |
+
)
|
| 84 |
+
@torch.inference_mode()
|
| 85 |
+
def generate(self, input_features, attention_mask, method="greedy"):
|
| 86 |
+
model_outputs = self(input_features=input_features, attention_mask=attention_mask)
|
| 87 |
+
if method == "greedy":
|
| 88 |
+
preds = model_outputs.logits.argmax(-1)
|
| 89 |
+
preds = torch.where(attention_mask, preds, 0)
|
| 90 |
+
return preds
|
| 91 |
+
raise NotImplementedError("unknown method")
|
| 92 |
+
|
modeling_nle.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from transformers import (
|
| 8 |
+
PreTrainedModel,
|
| 9 |
+
AutoTokenizer,
|
| 10 |
+
AutoModelForCausalLM,
|
| 11 |
+
AutoConfig,
|
| 12 |
+
)
|
| 13 |
+
from transformers.utils import ModelOutput
|
| 14 |
+
|
| 15 |
+
from .modeling_ctc import NLECTCEncoder
|
| 16 |
+
from .modeling_projector import EncoderProjectorQFormer
|
| 17 |
+
from .configuration_nle import NLEConfig
|
| 18 |
+
from .tokenizer import Tokenizer
|
| 19 |
+
from .modeling_conformer import NLEConformerBlock
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@dataclass
|
| 23 |
+
class NLENARDecoderOutput(ModelOutput):
|
| 24 |
+
loss: Optional[torch.Tensor] = None
|
| 25 |
+
text_preds: Optional[List[str]] = None
|
| 26 |
+
text_ctc_preds: Optional[List[str]] = None
|
| 27 |
+
editing_logits: Optional[torch.Tensor] = None
|
| 28 |
+
editing_attn_mask: Optional[torch.Tensor] = None
|
| 29 |
+
encoder_logits: Optional[torch.Tensor] = None
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class NLENARDecoder(PreTrainedModel):
|
| 33 |
+
config_class = NLEConfig
|
| 34 |
+
|
| 35 |
+
def __init__(self, config: NLEConfig):
|
| 36 |
+
super().__init__(config)
|
| 37 |
+
|
| 38 |
+
self.encoder = NLECTCEncoder(config.encoder_config)
|
| 39 |
+
|
| 40 |
+
if config.ctc_tokenizer_config is not None:
|
| 41 |
+
self.ctc_tokenizer = Tokenizer(**config.ctc_tokenizer_config)
|
| 42 |
+
else:
|
| 43 |
+
self.ctc_tokenizer = Tokenizer()
|
| 44 |
+
|
| 45 |
+
if config.llm_config is not None:
|
| 46 |
+
llm_cfg = AutoConfig.for_model(**config.llm_config)
|
| 47 |
+
if config.attn_implementation is not None:
|
| 48 |
+
llm_cfg._attn_implementation = config.attn_implementation
|
| 49 |
+
self.llm = AutoModelForCausalLM.from_config(llm_cfg)
|
| 50 |
+
else:
|
| 51 |
+
llm_kwargs = {"device_map": "cpu", "torch_dtype": torch.bfloat16}
|
| 52 |
+
if config.attn_implementation is not None:
|
| 53 |
+
llm_kwargs["attn_implementation"] = config.attn_implementation
|
| 54 |
+
self.llm = AutoModelForCausalLM.from_pretrained(config.llm_name, **llm_kwargs)
|
| 55 |
+
|
| 56 |
+
for layer in self.llm.model.layers:
|
| 57 |
+
layer.self_attn.is_causal = False
|
| 58 |
+
|
| 59 |
+
self.llm_tokenizer = AutoTokenizer.from_pretrained(config.llm_name)
|
| 60 |
+
self.projector = EncoderProjectorQFormer(config.projector_config)
|
| 61 |
+
|
| 62 |
+
self.post_init()
|
| 63 |
+
|
| 64 |
+
def save_pretrained(self, save_directory, **kwargs):
|
| 65 |
+
save_directory = Path(save_directory)
|
| 66 |
+
save_directory.mkdir(parents=True, exist_ok=True)
|
| 67 |
+
|
| 68 |
+
self.config.llm_config = self.llm.config.to_dict()
|
| 69 |
+
if self.config.ctc_tokenizer_config is None and self.ctc_tokenizer is not None:
|
| 70 |
+
self.config.ctc_tokenizer_config = {"char2idx": self.ctc_tokenizer.char2idx}
|
| 71 |
+
|
| 72 |
+
self.llm_tokenizer.save_pretrained(save_directory)
|
| 73 |
+
super().save_pretrained(save_directory, **kwargs)
|
| 74 |
+
|
| 75 |
+
src_dir = Path(__file__).parent
|
| 76 |
+
for py_file in src_dir.glob("*.py"):
|
| 77 |
+
shutil.copy2(py_file, save_directory / py_file.name)
|
| 78 |
+
|
| 79 |
+
def add_insertion_slots(self, x: torch.Tensor) -> torch.Tensor:
|
| 80 |
+
"""Inserts pad_id (EOS) tokens between each CTC token."""
|
| 81 |
+
pad_id = self.llm.config.eos_token_id
|
| 82 |
+
n = x.numel()
|
| 83 |
+
total_len = max(2 * n + 1, 8)
|
| 84 |
+
idx = torch.arange(n, device=x.device)
|
| 85 |
+
out_idx = 2 * idx + 1
|
| 86 |
+
out = torch.full((total_len,), fill_value=pad_id, dtype=x.dtype, device=x.device)
|
| 87 |
+
out[out_idx] = x
|
| 88 |
+
return out
|
| 89 |
+
|
| 90 |
+
def _decode_encoder_greedy(
|
| 91 |
+
self,
|
| 92 |
+
encoder_logits: torch.Tensor,
|
| 93 |
+
attention_mask: torch.Tensor
|
| 94 |
+
) -> List[str]:
|
| 95 |
+
ctc_preds = torch.where(attention_mask, encoder_logits.argmax(dim=-1), 0).cpu().numpy()
|
| 96 |
+
text_ctc_preds = [self.ctc_tokenizer.decode(pred).strip() for pred in ctc_preds]
|
| 97 |
+
text_ctc_preds = [x if x != "" else " " for x in text_ctc_preds]
|
| 98 |
+
return text_ctc_preds
|
| 99 |
+
|
| 100 |
+
def _prepare_llm_inputs(
|
| 101 |
+
self,
|
| 102 |
+
text_ctc_preds: List[str],
|
| 103 |
+
projected_lengths: List[int],
|
| 104 |
+
device: torch.device,
|
| 105 |
+
):
|
| 106 |
+
"""Prepare LLM input IDs and embeddings from CTC predictions."""
|
| 107 |
+
pred_text_llm_tokens = self.llm_tokenizer(text_ctc_preds)
|
| 108 |
+
temp_pad_id = -3
|
| 109 |
+
audio_ids = [torch.full((s,), -1, dtype=torch.long) for s in projected_lengths]
|
| 110 |
+
audio_ids = torch.nn.utils.rnn.pad_sequence(
|
| 111 |
+
audio_ids, batch_first=True, padding_side="left", padding_value=temp_pad_id
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
text_ids_unpadded = [
|
| 115 |
+
self.add_insertion_slots(torch.tensor(x))
|
| 116 |
+
for x in pred_text_llm_tokens.input_ids
|
| 117 |
+
]
|
| 118 |
+
text_ids = torch.nn.utils.rnn.pad_sequence(
|
| 119 |
+
text_ids_unpadded, batch_first=True, padding_side="right", padding_value=temp_pad_id
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
llm_input_ids = torch.cat([audio_ids, text_ids], dim=1).to(device)
|
| 123 |
+
llm_attn_mask = llm_input_ids != temp_pad_id
|
| 124 |
+
llm_embeds = self.llm.model.embed_tokens(
|
| 125 |
+
torch.where(llm_input_ids < 0, self.llm_tokenizer.eos_token_id, llm_input_ids)
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
return llm_input_ids, llm_attn_mask, llm_embeds, audio_ids, text_ids_unpadded
|
| 129 |
+
|
| 130 |
+
def _project_and_inject_audio_embeds(
|
| 131 |
+
self,
|
| 132 |
+
encoder_embs: torch.Tensor,
|
| 133 |
+
llm_embeds: torch.Tensor,
|
| 134 |
+
llm_input_ids: torch.Tensor,
|
| 135 |
+
projected_lengths: List[int],
|
| 136 |
+
) -> torch.Tensor:
|
| 137 |
+
"""Project encoder embeddings and inject them into LLM embeddings."""
|
| 138 |
+
projected_encoder_embeds = self.projector(encoder_embs)
|
| 139 |
+
|
| 140 |
+
if self.config.scale_projected_embeddings and hasattr(self.llm.config, "embedding_multiplier"):
|
| 141 |
+
projected_encoder_embeds = projected_encoder_embeds / self.llm.config.embedding_multiplier
|
| 142 |
+
|
| 143 |
+
projected_encoder_embeds = projected_encoder_embeds.to(llm_embeds.dtype)
|
| 144 |
+
for i, s in enumerate(projected_lengths):
|
| 145 |
+
llm_embeds[i, llm_input_ids[i] == -1] = projected_encoder_embeds[i, :s]
|
| 146 |
+
|
| 147 |
+
return llm_embeds
|
| 148 |
+
|
| 149 |
+
def forward(
|
| 150 |
+
self,
|
| 151 |
+
*,
|
| 152 |
+
input_features: Optional[torch.Tensor] = None,
|
| 153 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 154 |
+
) -> NLENARDecoderOutput:
|
| 155 |
+
|
| 156 |
+
need_hidden_states = self.config.encoder_layer_indices != [-1]
|
| 157 |
+
enc_out = self.encoder(
|
| 158 |
+
input_features=input_features,
|
| 159 |
+
attention_mask=attention_mask,
|
| 160 |
+
output_hidden_states=need_hidden_states,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
encoder_logits = enc_out.logits
|
| 164 |
+
|
| 165 |
+
if enc_out.all_hidden_states is not None and len(self.config.encoder_layer_indices) > 0:
|
| 166 |
+
selected_list = [enc_out.all_hidden_states[idx] for idx in self.config.encoder_layer_indices]
|
| 167 |
+
encoder_embs = torch.cat(selected_list, dim=-1)
|
| 168 |
+
else:
|
| 169 |
+
encoder_embs = enc_out.last_hidden_state
|
| 170 |
+
enc_out = None
|
| 171 |
+
|
| 172 |
+
if attention_mask is None:
|
| 173 |
+
attention_mask = torch.ones_like(encoder_logits[..., 0], dtype=torch.bool)
|
| 174 |
+
|
| 175 |
+
x_sizes = attention_mask.sum(dim=1)
|
| 176 |
+
projected_lengths = (x_sizes // self.config.projector_config.downsample_rate).cpu().tolist()
|
| 177 |
+
|
| 178 |
+
text_ctc_preds = self._decode_encoder_greedy(encoder_logits, attention_mask)
|
| 179 |
+
llm_input_ids, llm_attn_mask, llm_embeds, audio_ids, _ = self._prepare_llm_inputs(
|
| 180 |
+
text_ctc_preds, projected_lengths, encoder_embs.device
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
llm_embeds = self._project_and_inject_audio_embeds(
|
| 184 |
+
encoder_embs, llm_embeds, llm_input_ids, projected_lengths
|
| 185 |
+
)
|
| 186 |
+
encoder_embs = None
|
| 187 |
+
|
| 188 |
+
llm_position_ids = llm_attn_mask.int().cumsum(dim=1) - 1
|
| 189 |
+
llm_outputs = self.llm(
|
| 190 |
+
inputs_embeds=llm_embeds[llm_attn_mask].unsqueeze(0),
|
| 191 |
+
position_ids=llm_position_ids[llm_attn_mask].unsqueeze(0),
|
| 192 |
+
use_cache=False,
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
llm_logits_shape = list(llm_attn_mask.shape) + [llm_outputs.logits.shape[-1]]
|
| 196 |
+
llm_logits = torch.zeros(llm_logits_shape, device=llm_outputs.logits.device, dtype=llm_outputs.logits.dtype)
|
| 197 |
+
llm_logits[llm_attn_mask] = llm_outputs.logits.squeeze(0)
|
| 198 |
+
|
| 199 |
+
editing_logits = llm_logits[:, audio_ids.shape[1]:]
|
| 200 |
+
|
| 201 |
+
return NLENARDecoderOutput(
|
| 202 |
+
editing_logits=editing_logits,
|
| 203 |
+
editing_attn_mask=llm_attn_mask[:, audio_ids.shape[1]:],
|
| 204 |
+
encoder_logits=encoder_logits,
|
| 205 |
+
text_ctc_preds=text_ctc_preds,
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
@torch.inference_mode()
|
| 209 |
+
def generate(self, input_features, attention_mask):
|
| 210 |
+
"""Single-pass inference: forward + argmax decoding."""
|
| 211 |
+
output = self.forward(input_features=input_features, attention_mask=attention_mask)
|
| 212 |
+
|
| 213 |
+
editing_preds = output.editing_logits.argmax(-1)
|
| 214 |
+
editing_preds = torch.where(output.editing_attn_mask, editing_preds, self.llm.config.eos_token_id)
|
| 215 |
+
|
| 216 |
+
text_llm_preds = []
|
| 217 |
+
for i in range(editing_preds.shape[0]):
|
| 218 |
+
cur_pred = torch.unique_consecutive(editing_preds[i])
|
| 219 |
+
cur_pred = cur_pred[cur_pred != self.llm.config.eos_token_id]
|
| 220 |
+
pred_text = self.llm_tokenizer.decode(cur_pred, skip_special_tokens=True)
|
| 221 |
+
text_llm_preds.append(pred_text)
|
| 222 |
+
|
| 223 |
+
return NLENARDecoderOutput(
|
| 224 |
+
text_preds=text_llm_preds,
|
| 225 |
+
text_ctc_preds=output.text_ctc_preds,
|
| 226 |
+
editing_logits=output.editing_logits,
|
| 227 |
+
encoder_logits=output.encoder_logits,
|
| 228 |
+
)
|
modeling_projector.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .configuration_nle import NLEProjectorConfig
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class QFormerCrossAttention(nn.Module):
|
| 8 |
+
def __init__(self, config: NLEProjectorConfig):
|
| 9 |
+
super().__init__()
|
| 10 |
+
self.num_heads = config.num_heads
|
| 11 |
+
self.head_dim = config.hidden_size // config.num_heads
|
| 12 |
+
self.hidden_size = config.hidden_size
|
| 13 |
+
|
| 14 |
+
self.q_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attn_bias)
|
| 15 |
+
self.k_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attn_bias)
|
| 16 |
+
self.v_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attn_bias)
|
| 17 |
+
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attn_bias)
|
| 18 |
+
|
| 19 |
+
def forward(self, hidden_states, encoder_hidden_states):
|
| 20 |
+
batch_size, query_len, _ = hidden_states.shape
|
| 21 |
+
encoder_len = encoder_hidden_states.shape[1]
|
| 22 |
+
|
| 23 |
+
query_states = self.q_proj(hidden_states).view(
|
| 24 |
+
batch_size, query_len, self.num_heads, self.head_dim
|
| 25 |
+
).transpose(1, 2)
|
| 26 |
+
key_states = self.k_proj(encoder_hidden_states).view(
|
| 27 |
+
batch_size, encoder_len, self.num_heads, self.head_dim
|
| 28 |
+
).transpose(1, 2)
|
| 29 |
+
value_states = self.v_proj(encoder_hidden_states).view(
|
| 30 |
+
batch_size, encoder_len, self.num_heads, self.head_dim
|
| 31 |
+
).transpose(1, 2)
|
| 32 |
+
|
| 33 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 34 |
+
query_states, key_states, value_states, is_causal=False,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, query_len, self.hidden_size)
|
| 38 |
+
return self.o_proj(attn_output)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class QFormerMLP(nn.Module):
|
| 42 |
+
def __init__(self, config: NLEProjectorConfig):
|
| 43 |
+
super().__init__()
|
| 44 |
+
mlp_hidden_size = int(config.hidden_size * config.mlp_ratio)
|
| 45 |
+
self.fc1 = nn.Linear(config.hidden_size, mlp_hidden_size, bias=config.mlp_bias)
|
| 46 |
+
self.act = nn.SiLU()
|
| 47 |
+
self.fc2 = nn.Linear(mlp_hidden_size, config.hidden_size, bias=config.mlp_bias)
|
| 48 |
+
|
| 49 |
+
def forward(self, hidden_states):
|
| 50 |
+
return self.fc2(self.act(self.fc1(hidden_states)))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class QFormerLayer(nn.Module):
|
| 54 |
+
def __init__(self, config: NLEProjectorConfig):
|
| 55 |
+
super().__init__()
|
| 56 |
+
self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.layernorm_eps)
|
| 57 |
+
self.cross_attention = QFormerCrossAttention(config)
|
| 58 |
+
self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.layernorm_eps)
|
| 59 |
+
self.mlp = QFormerMLP(config)
|
| 60 |
+
|
| 61 |
+
def forward(self, hidden_states, encoder_hidden_states):
|
| 62 |
+
hidden_states = hidden_states + self.cross_attention(
|
| 63 |
+
self.attn_norm(hidden_states), encoder_hidden_states
|
| 64 |
+
)
|
| 65 |
+
hidden_states = hidden_states + self.mlp(self.mlp_norm(hidden_states))
|
| 66 |
+
return hidden_states
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class SimplifiedQFormer(nn.Module):
|
| 70 |
+
def __init__(self, config: NLEProjectorConfig):
|
| 71 |
+
super().__init__()
|
| 72 |
+
self.layers = nn.ModuleList([
|
| 73 |
+
QFormerLayer(config) for _ in range(config.num_layers)
|
| 74 |
+
])
|
| 75 |
+
|
| 76 |
+
def forward(self, query_embeds, encoder_hidden_states):
|
| 77 |
+
hidden_states = query_embeds
|
| 78 |
+
for layer in self.layers:
|
| 79 |
+
hidden_states = layer(hidden_states, encoder_hidden_states)
|
| 80 |
+
return hidden_states
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class EncoderProjectorQFormer(nn.Module):
|
| 84 |
+
def __init__(self, config: NLEProjectorConfig):
|
| 85 |
+
super().__init__()
|
| 86 |
+
self.config = config
|
| 87 |
+
|
| 88 |
+
self.layer_norms = nn.ModuleList([
|
| 89 |
+
nn.LayerNorm(config.encoder_dim, eps=config.layernorm_eps)
|
| 90 |
+
for _ in range(config.num_encoder_layers)
|
| 91 |
+
])
|
| 92 |
+
|
| 93 |
+
self.layer_projector = nn.Linear(
|
| 94 |
+
config.encoder_dim * config.num_encoder_layers, config.hidden_size
|
| 95 |
+
)
|
| 96 |
+
self.dropout = nn.Dropout(config.dropout_prob)
|
| 97 |
+
self.projector_act = nn.GELU()
|
| 98 |
+
|
| 99 |
+
self.qformer = SimplifiedQFormer(config)
|
| 100 |
+
|
| 101 |
+
query_length = config.block_size // config.downsample_rate
|
| 102 |
+
embed_std = config.hidden_size ** -0.5
|
| 103 |
+
self.query = nn.Parameter(
|
| 104 |
+
torch.randn(1, query_length, config.hidden_size) * embed_std
|
| 105 |
+
)
|
| 106 |
+
self.window_positions = nn.Parameter(
|
| 107 |
+
torch.randn(1, config.block_size, config.hidden_size) * embed_std
|
| 108 |
+
)
|
| 109 |
+
self.out_norm = nn.LayerNorm(config.hidden_size, eps=config.layernorm_eps)
|
| 110 |
+
self.out_linear = nn.Linear(config.hidden_size, config.llm_dim)
|
| 111 |
+
|
| 112 |
+
def forward(self, x):
|
| 113 |
+
batch_size, seq_len, dim = x.size()
|
| 114 |
+
|
| 115 |
+
x = x.view(batch_size, seq_len, self.config.num_encoder_layers, self.config.encoder_dim)
|
| 116 |
+
normalized_layers = []
|
| 117 |
+
for i, layer_norm in enumerate(self.layer_norms):
|
| 118 |
+
normalized_layers.append(layer_norm(x[:, :, i]))
|
| 119 |
+
x = torch.cat(normalized_layers, dim=-1)
|
| 120 |
+
|
| 121 |
+
x = self.projector_act(self.layer_projector(x))
|
| 122 |
+
|
| 123 |
+
block_size = self.config.block_size
|
| 124 |
+
nblocks = seq_len // block_size
|
| 125 |
+
rest = seq_len % block_size
|
| 126 |
+
if rest > 0:
|
| 127 |
+
x = nn.functional.pad(x, (0, 0, 0, block_size - rest), 'constant', 0)
|
| 128 |
+
nblocks += 1
|
| 129 |
+
|
| 130 |
+
x = x.view(batch_size * nblocks, block_size, self.config.hidden_size)
|
| 131 |
+
query_length = self.query.shape[1]
|
| 132 |
+
mean_pool = x.view(
|
| 133 |
+
batch_size * nblocks, query_length, self.config.downsample_rate, self.config.hidden_size
|
| 134 |
+
).mean(dim=-2)
|
| 135 |
+
|
| 136 |
+
query_output = self.qformer(
|
| 137 |
+
query_embeds=self.dropout(self.query + mean_pool),
|
| 138 |
+
encoder_hidden_states=self.dropout(x + self.window_positions),
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
query_output = query_output.view(batch_size, nblocks * query_length, -1)
|
| 142 |
+
query_output = self.dropout(self.out_norm(query_output))
|
| 143 |
+
return self.out_linear(query_output)
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"feature_extractor_type": "NLEFeatureExtractor",
|
| 3 |
+
"hop_length": 160,
|
| 4 |
+
"n_fft": 512,
|
| 5 |
+
"n_mels": 80,
|
| 6 |
+
"sampling_rate": 16000,
|
| 7 |
+
"win_length": 400
|
| 8 |
+
}
|
rtf_wer.png
ADDED
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|end_of_text|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|end_of_text|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|pad|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<|unk|>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Minimal CTC tokenizer for Granite Speech."""
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Tokenizer:
|
| 7 |
+
"""
|
| 8 |
+
CTC tokenizer with char2idx mapping. Index 0 is always blank.
|
| 9 |
+
Default vocab: latin256_kana92 (348 tokens).
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, char2idx=None, **kwargs):
|
| 13 |
+
if char2idx is None:
|
| 14 |
+
# Default: latin256_kana92
|
| 15 |
+
char2idx = {chr(n): n for n in range(32, 256)}
|
| 16 |
+
char2idx |= {chr(0x30A1 + n): 256 + n for n in range(92)}
|
| 17 |
+
|
| 18 |
+
# char2idx values may be strings after JSON roundtrip
|
| 19 |
+
self.char2idx = {k: int(v) for k, v in char2idx.items()}
|
| 20 |
+
self.idx2char = {v: k for k, v in self.char2idx.items()}
|
| 21 |
+
self.vocab_size = len(self.char2idx) + 1
|
| 22 |
+
|
| 23 |
+
def encode(self, text: str) -> np.ndarray:
|
| 24 |
+
return np.array([self.char2idx[c] for c in text if c in self.char2idx], dtype=np.int64)
|
| 25 |
+
|
| 26 |
+
def decode(self, tokens: np.ndarray) -> str:
|
| 27 |
+
"""Decode CTC output: unique_consecutive + remove blanks."""
|
| 28 |
+
pred = tokens[np.insert(tokens[1:] != tokens[:-1], 0, True)]
|
| 29 |
+
pred = pred[pred != 0]
|
| 30 |
+
return "".join([self.idx2char[idx] for idx in pred.tolist()])
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,783 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"100256": {
|
| 6 |
+
"content": "<|pad|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"100257": {
|
| 14 |
+
"content": "<|end_of_text|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"100258": {
|
| 22 |
+
"content": "<|fim_prefix|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": false
|
| 28 |
+
},
|
| 29 |
+
"100259": {
|
| 30 |
+
"content": "<|fim_middle|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": false
|
| 36 |
+
},
|
| 37 |
+
"100260": {
|
| 38 |
+
"content": "<|fim_suffix|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": false
|
| 44 |
+
},
|
| 45 |
+
"100261": {
|
| 46 |
+
"content": "<|fim_pad|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": false
|
| 52 |
+
},
|
| 53 |
+
"100262": {
|
| 54 |
+
"content": "<|filename|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": false
|
| 60 |
+
},
|
| 61 |
+
"100263": {
|
| 62 |
+
"content": "<|reponame|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": false
|
| 68 |
+
},
|
| 69 |
+
"100264": {
|
| 70 |
+
"content": "<|start_of_role|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"100265": {
|
| 78 |
+
"content": "<|end_of_role|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"100266": {
|
| 86 |
+
"content": "<|unused_1|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"100267": {
|
| 94 |
+
"content": "<|start_of_plugin|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"100268": {
|
| 102 |
+
"content": "<|end_of_plugin|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"100269": {
|
| 110 |
+
"content": "<|unk|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"100270": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"100271": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"100272": {
|
| 134 |
+
"content": "<tool_response>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"100273": {
|
| 142 |
+
"content": "</tool_response>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"100274": {
|
| 150 |
+
"content": "<think>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"100275": {
|
| 158 |
+
"content": "</think>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"100276": {
|
| 166 |
+
"content": "<think_on>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": true
|
| 172 |
+
},
|
| 173 |
+
"100277": {
|
| 174 |
+
"content": "<think_off>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": true
|
| 180 |
+
},
|
| 181 |
+
"100278": {
|
| 182 |
+
"content": "<schema>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": true
|
| 188 |
+
},
|
| 189 |
+
"100279": {
|
| 190 |
+
"content": "</schema>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": true
|
| 196 |
+
},
|
| 197 |
+
"100280": {
|
| 198 |
+
"content": "<tools>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": true
|
| 204 |
+
},
|
| 205 |
+
"100281": {
|
| 206 |
+
"content": "</tools>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": true
|
| 212 |
+
},
|
| 213 |
+
"100282": {
|
| 214 |
+
"content": "<documents>",
|
| 215 |
+
"lstrip": false,
|
| 216 |
+
"normalized": false,
|
| 217 |
+
"rstrip": false,
|
| 218 |
+
"single_word": false,
|
| 219 |
+
"special": true
|
| 220 |
+
},
|
| 221 |
+
"100283": {
|
| 222 |
+
"content": "</documents>",
|
| 223 |
+
"lstrip": false,
|
| 224 |
+
"normalized": false,
|
| 225 |
+
"rstrip": false,
|
| 226 |
+
"single_word": false,
|
| 227 |
+
"special": true
|
| 228 |
+
},
|
| 229 |
+
"100284": {
|
| 230 |
+
"content": "<|unused_15|>",
|
| 231 |
+
"lstrip": false,
|
| 232 |
+
"normalized": false,
|
| 233 |
+
"rstrip": false,
|
| 234 |
+
"single_word": false,
|
| 235 |
+
"special": true
|
| 236 |
+
},
|
| 237 |
+
"100285": {
|
| 238 |
+
"content": "<|unused_16|>",
|
| 239 |
+
"lstrip": false,
|
| 240 |
+
"normalized": false,
|
| 241 |
+
"rstrip": false,
|
| 242 |
+
"single_word": false,
|
| 243 |
+
"special": true
|
| 244 |
+
},
|
| 245 |
+
"100286": {
|
| 246 |
+
"content": "<|unused_17|>",
|
| 247 |
+
"lstrip": false,
|
| 248 |
+
"normalized": false,
|
| 249 |
+
"rstrip": false,
|
| 250 |
+
"single_word": false,
|
| 251 |
+
"special": true
|
| 252 |
+
},
|
| 253 |
+
"100287": {
|
| 254 |
+
"content": "<|unused_18|>",
|
| 255 |
+
"lstrip": false,
|
| 256 |
+
"normalized": false,
|
| 257 |
+
"rstrip": false,
|
| 258 |
+
"single_word": false,
|
| 259 |
+
"special": true
|
| 260 |
+
},
|
| 261 |
+
"100288": {
|
| 262 |
+
"content": "<|unused_19|>",
|
| 263 |
+
"lstrip": false,
|
| 264 |
+
"normalized": false,
|
| 265 |
+
"rstrip": false,
|
| 266 |
+
"single_word": false,
|
| 267 |
+
"special": true
|
| 268 |
+
},
|
| 269 |
+
"100289": {
|
| 270 |
+
"content": "<|unused_20|>",
|
| 271 |
+
"lstrip": false,
|
| 272 |
+
"normalized": false,
|
| 273 |
+
"rstrip": false,
|
| 274 |
+
"single_word": false,
|
| 275 |
+
"special": true
|
| 276 |
+
},
|
| 277 |
+
"100290": {
|
| 278 |
+
"content": "<|unused_21|>",
|
| 279 |
+
"lstrip": false,
|
| 280 |
+
"normalized": false,
|
| 281 |
+
"rstrip": false,
|
| 282 |
+
"single_word": false,
|
| 283 |
+
"special": true
|
| 284 |
+
},
|
| 285 |
+
"100291": {
|
| 286 |
+
"content": "<|unused_22|>",
|
| 287 |
+
"lstrip": false,
|
| 288 |
+
"normalized": false,
|
| 289 |
+
"rstrip": false,
|
| 290 |
+
"single_word": false,
|
| 291 |
+
"special": true
|
| 292 |
+
},
|
| 293 |
+
"100292": {
|
| 294 |
+
"content": "<|unused_23|>",
|
| 295 |
+
"lstrip": false,
|
| 296 |
+
"normalized": false,
|
| 297 |
+
"rstrip": false,
|
| 298 |
+
"single_word": false,
|
| 299 |
+
"special": true
|
| 300 |
+
},
|
| 301 |
+
"100293": {
|
| 302 |
+
"content": "<|unused_24|>",
|
| 303 |
+
"lstrip": false,
|
| 304 |
+
"normalized": false,
|
| 305 |
+
"rstrip": false,
|
| 306 |
+
"single_word": false,
|
| 307 |
+
"special": true
|
| 308 |
+
},
|
| 309 |
+
"100294": {
|
| 310 |
+
"content": "<|unused_25|>",
|
| 311 |
+
"lstrip": false,
|
| 312 |
+
"normalized": false,
|
| 313 |
+
"rstrip": false,
|
| 314 |
+
"single_word": false,
|
| 315 |
+
"special": true
|
| 316 |
+
},
|
| 317 |
+
"100295": {
|
| 318 |
+
"content": "<|unused_26|>",
|
| 319 |
+
"lstrip": false,
|
| 320 |
+
"normalized": false,
|
| 321 |
+
"rstrip": false,
|
| 322 |
+
"single_word": false,
|
| 323 |
+
"special": true
|
| 324 |
+
},
|
| 325 |
+
"100296": {
|
| 326 |
+
"content": "<|unused_27|>",
|
| 327 |
+
"lstrip": false,
|
| 328 |
+
"normalized": false,
|
| 329 |
+
"rstrip": false,
|
| 330 |
+
"single_word": false,
|
| 331 |
+
"special": true
|
| 332 |
+
},
|
| 333 |
+
"100297": {
|
| 334 |
+
"content": "<|unused_28|>",
|
| 335 |
+
"lstrip": false,
|
| 336 |
+
"normalized": false,
|
| 337 |
+
"rstrip": false,
|
| 338 |
+
"single_word": false,
|
| 339 |
+
"special": true
|
| 340 |
+
},
|
| 341 |
+
"100298": {
|
| 342 |
+
"content": "<|unused_29|>",
|
| 343 |
+
"lstrip": false,
|
| 344 |
+
"normalized": false,
|
| 345 |
+
"rstrip": false,
|
| 346 |
+
"single_word": false,
|
| 347 |
+
"special": true
|
| 348 |
+
},
|
| 349 |
+
"100299": {
|
| 350 |
+
"content": "<|unused_30|>",
|
| 351 |
+
"lstrip": false,
|
| 352 |
+
"normalized": false,
|
| 353 |
+
"rstrip": false,
|
| 354 |
+
"single_word": false,
|
| 355 |
+
"special": true
|
| 356 |
+
},
|
| 357 |
+
"100300": {
|
| 358 |
+
"content": "<|unused_31|>",
|
| 359 |
+
"lstrip": false,
|
| 360 |
+
"normalized": false,
|
| 361 |
+
"rstrip": false,
|
| 362 |
+
"single_word": false,
|
| 363 |
+
"special": true
|
| 364 |
+
},
|
| 365 |
+
"100301": {
|
| 366 |
+
"content": "<|unused_32|>",
|
| 367 |
+
"lstrip": false,
|
| 368 |
+
"normalized": false,
|
| 369 |
+
"rstrip": false,
|
| 370 |
+
"single_word": false,
|
| 371 |
+
"special": true
|
| 372 |
+
},
|
| 373 |
+
"100302": {
|
| 374 |
+
"content": "<|unused_33|>",
|
| 375 |
+
"lstrip": false,
|
| 376 |
+
"normalized": false,
|
| 377 |
+
"rstrip": false,
|
| 378 |
+
"single_word": false,
|
| 379 |
+
"special": true
|
| 380 |
+
},
|
| 381 |
+
"100303": {
|
| 382 |
+
"content": "<|unused_34|>",
|
| 383 |
+
"lstrip": false,
|
| 384 |
+
"normalized": false,
|
| 385 |
+
"rstrip": false,
|
| 386 |
+
"single_word": false,
|
| 387 |
+
"special": true
|
| 388 |
+
},
|
| 389 |
+
"100304": {
|
| 390 |
+
"content": "<|unused_35|>",
|
| 391 |
+
"lstrip": false,
|
| 392 |
+
"normalized": false,
|
| 393 |
+
"rstrip": false,
|
| 394 |
+
"single_word": false,
|
| 395 |
+
"special": true
|
| 396 |
+
},
|
| 397 |
+
"100305": {
|
| 398 |
+
"content": "<|unused_36|>",
|
| 399 |
+
"lstrip": false,
|
| 400 |
+
"normalized": false,
|
| 401 |
+
"rstrip": false,
|
| 402 |
+
"single_word": false,
|
| 403 |
+
"special": true
|
| 404 |
+
},
|
| 405 |
+
"100306": {
|
| 406 |
+
"content": "<|unused_37|>",
|
| 407 |
+
"lstrip": false,
|
| 408 |
+
"normalized": false,
|
| 409 |
+
"rstrip": false,
|
| 410 |
+
"single_word": false,
|
| 411 |
+
"special": true
|
| 412 |
+
},
|
| 413 |
+
"100307": {
|
| 414 |
+
"content": "<|unused_38|>",
|
| 415 |
+
"lstrip": false,
|
| 416 |
+
"normalized": false,
|
| 417 |
+
"rstrip": false,
|
| 418 |
+
"single_word": false,
|
| 419 |
+
"special": true
|
| 420 |
+
},
|
| 421 |
+
"100308": {
|
| 422 |
+
"content": "<|unused_39|>",
|
| 423 |
+
"lstrip": false,
|
| 424 |
+
"normalized": false,
|
| 425 |
+
"rstrip": false,
|
| 426 |
+
"single_word": false,
|
| 427 |
+
"special": true
|
| 428 |
+
},
|
| 429 |
+
"100309": {
|
| 430 |
+
"content": "<|unused_40|>",
|
| 431 |
+
"lstrip": false,
|
| 432 |
+
"normalized": false,
|
| 433 |
+
"rstrip": false,
|
| 434 |
+
"single_word": false,
|
| 435 |
+
"special": true
|
| 436 |
+
},
|
| 437 |
+
"100310": {
|
| 438 |
+
"content": "<|unused_41|>",
|
| 439 |
+
"lstrip": false,
|
| 440 |
+
"normalized": false,
|
| 441 |
+
"rstrip": false,
|
| 442 |
+
"single_word": false,
|
| 443 |
+
"special": true
|
| 444 |
+
},
|
| 445 |
+
"100311": {
|
| 446 |
+
"content": "<|unused_42|>",
|
| 447 |
+
"lstrip": false,
|
| 448 |
+
"normalized": false,
|
| 449 |
+
"rstrip": false,
|
| 450 |
+
"single_word": false,
|
| 451 |
+
"special": true
|
| 452 |
+
},
|
| 453 |
+
"100312": {
|
| 454 |
+
"content": "<|unused_43|>",
|
| 455 |
+
"lstrip": false,
|
| 456 |
+
"normalized": false,
|
| 457 |
+
"rstrip": false,
|
| 458 |
+
"single_word": false,
|
| 459 |
+
"special": true
|
| 460 |
+
},
|
| 461 |
+
"100313": {
|
| 462 |
+
"content": "<|unused_44|>",
|
| 463 |
+
"lstrip": false,
|
| 464 |
+
"normalized": false,
|
| 465 |
+
"rstrip": false,
|
| 466 |
+
"single_word": false,
|
| 467 |
+
"special": true
|
| 468 |
+
},
|
| 469 |
+
"100314": {
|
| 470 |
+
"content": "<|unused_45|>",
|
| 471 |
+
"lstrip": false,
|
| 472 |
+
"normalized": false,
|
| 473 |
+
"rstrip": false,
|
| 474 |
+
"single_word": false,
|
| 475 |
+
"special": true
|
| 476 |
+
},
|
| 477 |
+
"100315": {
|
| 478 |
+
"content": "<|unused_46|>",
|
| 479 |
+
"lstrip": false,
|
| 480 |
+
"normalized": false,
|
| 481 |
+
"rstrip": false,
|
| 482 |
+
"single_word": false,
|
| 483 |
+
"special": true
|
| 484 |
+
},
|
| 485 |
+
"100316": {
|
| 486 |
+
"content": "<|unused_47|>",
|
| 487 |
+
"lstrip": false,
|
| 488 |
+
"normalized": false,
|
| 489 |
+
"rstrip": false,
|
| 490 |
+
"single_word": false,
|
| 491 |
+
"special": true
|
| 492 |
+
},
|
| 493 |
+
"100317": {
|
| 494 |
+
"content": "<|unused_48|>",
|
| 495 |
+
"lstrip": false,
|
| 496 |
+
"normalized": false,
|
| 497 |
+
"rstrip": false,
|
| 498 |
+
"single_word": false,
|
| 499 |
+
"special": true
|
| 500 |
+
},
|
| 501 |
+
"100318": {
|
| 502 |
+
"content": "<|unused_49|>",
|
| 503 |
+
"lstrip": false,
|
| 504 |
+
"normalized": false,
|
| 505 |
+
"rstrip": false,
|
| 506 |
+
"single_word": false,
|
| 507 |
+
"special": true
|
| 508 |
+
},
|
| 509 |
+
"100319": {
|
| 510 |
+
"content": "<|unused_50|>",
|
| 511 |
+
"lstrip": false,
|
| 512 |
+
"normalized": false,
|
| 513 |
+
"rstrip": false,
|
| 514 |
+
"single_word": false,
|
| 515 |
+
"special": true
|
| 516 |
+
},
|
| 517 |
+
"100320": {
|
| 518 |
+
"content": "<|unused_51|>",
|
| 519 |
+
"lstrip": false,
|
| 520 |
+
"normalized": false,
|
| 521 |
+
"rstrip": false,
|
| 522 |
+
"single_word": false,
|
| 523 |
+
"special": true
|
| 524 |
+
},
|
| 525 |
+
"100321": {
|
| 526 |
+
"content": "<|unused_52|>",
|
| 527 |
+
"lstrip": false,
|
| 528 |
+
"normalized": false,
|
| 529 |
+
"rstrip": false,
|
| 530 |
+
"single_word": false,
|
| 531 |
+
"special": true
|
| 532 |
+
},
|
| 533 |
+
"100322": {
|
| 534 |
+
"content": "<|unused_53|>",
|
| 535 |
+
"lstrip": false,
|
| 536 |
+
"normalized": false,
|
| 537 |
+
"rstrip": false,
|
| 538 |
+
"single_word": false,
|
| 539 |
+
"special": true
|
| 540 |
+
},
|
| 541 |
+
"100323": {
|
| 542 |
+
"content": "<|unused_54|>",
|
| 543 |
+
"lstrip": false,
|
| 544 |
+
"normalized": false,
|
| 545 |
+
"rstrip": false,
|
| 546 |
+
"single_word": false,
|
| 547 |
+
"special": true
|
| 548 |
+
},
|
| 549 |
+
"100324": {
|
| 550 |
+
"content": "<|unused_55|>",
|
| 551 |
+
"lstrip": false,
|
| 552 |
+
"normalized": false,
|
| 553 |
+
"rstrip": false,
|
| 554 |
+
"single_word": false,
|
| 555 |
+
"special": true
|
| 556 |
+
},
|
| 557 |
+
"100325": {
|
| 558 |
+
"content": "<|unused_56|>",
|
| 559 |
+
"lstrip": false,
|
| 560 |
+
"normalized": false,
|
| 561 |
+
"rstrip": false,
|
| 562 |
+
"single_word": false,
|
| 563 |
+
"special": true
|
| 564 |
+
},
|
| 565 |
+
"100326": {
|
| 566 |
+
"content": "<|unused_57|>",
|
| 567 |
+
"lstrip": false,
|
| 568 |
+
"normalized": false,
|
| 569 |
+
"rstrip": false,
|
| 570 |
+
"single_word": false,
|
| 571 |
+
"special": true
|
| 572 |
+
},
|
| 573 |
+
"100327": {
|
| 574 |
+
"content": "<|unused_58|>",
|
| 575 |
+
"lstrip": false,
|
| 576 |
+
"normalized": false,
|
| 577 |
+
"rstrip": false,
|
| 578 |
+
"single_word": false,
|
| 579 |
+
"special": true
|
| 580 |
+
},
|
| 581 |
+
"100328": {
|
| 582 |
+
"content": "<|unused_59|>",
|
| 583 |
+
"lstrip": false,
|
| 584 |
+
"normalized": false,
|
| 585 |
+
"rstrip": false,
|
| 586 |
+
"single_word": false,
|
| 587 |
+
"special": true
|
| 588 |
+
},
|
| 589 |
+
"100329": {
|
| 590 |
+
"content": "<|unused_60|>",
|
| 591 |
+
"lstrip": false,
|
| 592 |
+
"normalized": false,
|
| 593 |
+
"rstrip": false,
|
| 594 |
+
"single_word": false,
|
| 595 |
+
"special": true
|
| 596 |
+
},
|
| 597 |
+
"100330": {
|
| 598 |
+
"content": "<|unused_61|>",
|
| 599 |
+
"lstrip": false,
|
| 600 |
+
"normalized": false,
|
| 601 |
+
"rstrip": false,
|
| 602 |
+
"single_word": false,
|
| 603 |
+
"special": true
|
| 604 |
+
},
|
| 605 |
+
"100331": {
|
| 606 |
+
"content": "<|unused_62|>",
|
| 607 |
+
"lstrip": false,
|
| 608 |
+
"normalized": false,
|
| 609 |
+
"rstrip": false,
|
| 610 |
+
"single_word": false,
|
| 611 |
+
"special": true
|
| 612 |
+
},
|
| 613 |
+
"100332": {
|
| 614 |
+
"content": "<|unused_63|>",
|
| 615 |
+
"lstrip": false,
|
| 616 |
+
"normalized": false,
|
| 617 |
+
"rstrip": false,
|
| 618 |
+
"single_word": false,
|
| 619 |
+
"special": true
|
| 620 |
+
},
|
| 621 |
+
"100333": {
|
| 622 |
+
"content": "<|unused_64|>",
|
| 623 |
+
"lstrip": false,
|
| 624 |
+
"normalized": false,
|
| 625 |
+
"rstrip": false,
|
| 626 |
+
"single_word": false,
|
| 627 |
+
"special": true
|
| 628 |
+
},
|
| 629 |
+
"100334": {
|
| 630 |
+
"content": "<|unused_65|>",
|
| 631 |
+
"lstrip": false,
|
| 632 |
+
"normalized": false,
|
| 633 |
+
"rstrip": false,
|
| 634 |
+
"single_word": false,
|
| 635 |
+
"special": true
|
| 636 |
+
},
|
| 637 |
+
"100335": {
|
| 638 |
+
"content": "<|unused_66|>",
|
| 639 |
+
"lstrip": false,
|
| 640 |
+
"normalized": false,
|
| 641 |
+
"rstrip": false,
|
| 642 |
+
"single_word": false,
|
| 643 |
+
"special": true
|
| 644 |
+
},
|
| 645 |
+
"100336": {
|
| 646 |
+
"content": "<|unused_67|>",
|
| 647 |
+
"lstrip": false,
|
| 648 |
+
"normalized": false,
|
| 649 |
+
"rstrip": false,
|
| 650 |
+
"single_word": false,
|
| 651 |
+
"special": true
|
| 652 |
+
},
|
| 653 |
+
"100337": {
|
| 654 |
+
"content": "<|unused_68|>",
|
| 655 |
+
"lstrip": false,
|
| 656 |
+
"normalized": false,
|
| 657 |
+
"rstrip": false,
|
| 658 |
+
"single_word": false,
|
| 659 |
+
"special": true
|
| 660 |
+
},
|
| 661 |
+
"100338": {
|
| 662 |
+
"content": "<|unused_69|>",
|
| 663 |
+
"lstrip": false,
|
| 664 |
+
"normalized": false,
|
| 665 |
+
"rstrip": false,
|
| 666 |
+
"single_word": false,
|
| 667 |
+
"special": true
|
| 668 |
+
},
|
| 669 |
+
"100339": {
|
| 670 |
+
"content": "<|unused_70|>",
|
| 671 |
+
"lstrip": false,
|
| 672 |
+
"normalized": false,
|
| 673 |
+
"rstrip": false,
|
| 674 |
+
"single_word": false,
|
| 675 |
+
"special": true
|
| 676 |
+
},
|
| 677 |
+
"100340": {
|
| 678 |
+
"content": "<|unused_71|>",
|
| 679 |
+
"lstrip": false,
|
| 680 |
+
"normalized": false,
|
| 681 |
+
"rstrip": false,
|
| 682 |
+
"single_word": false,
|
| 683 |
+
"special": true
|
| 684 |
+
},
|
| 685 |
+
"100341": {
|
| 686 |
+
"content": "<|unused_72|>",
|
| 687 |
+
"lstrip": false,
|
| 688 |
+
"normalized": false,
|
| 689 |
+
"rstrip": false,
|
| 690 |
+
"single_word": false,
|
| 691 |
+
"special": true
|
| 692 |
+
},
|
| 693 |
+
"100342": {
|
| 694 |
+
"content": "<|unused_73|>",
|
| 695 |
+
"lstrip": false,
|
| 696 |
+
"normalized": false,
|
| 697 |
+
"rstrip": false,
|
| 698 |
+
"single_word": false,
|
| 699 |
+
"special": true
|
| 700 |
+
},
|
| 701 |
+
"100343": {
|
| 702 |
+
"content": "<|unused_74|>",
|
| 703 |
+
"lstrip": false,
|
| 704 |
+
"normalized": false,
|
| 705 |
+
"rstrip": false,
|
| 706 |
+
"single_word": false,
|
| 707 |
+
"special": true
|
| 708 |
+
},
|
| 709 |
+
"100344": {
|
| 710 |
+
"content": "<|unused_75|>",
|
| 711 |
+
"lstrip": false,
|
| 712 |
+
"normalized": false,
|
| 713 |
+
"rstrip": false,
|
| 714 |
+
"single_word": false,
|
| 715 |
+
"special": true
|
| 716 |
+
},
|
| 717 |
+
"100345": {
|
| 718 |
+
"content": "<|unused_76|>",
|
| 719 |
+
"lstrip": false,
|
| 720 |
+
"normalized": false,
|
| 721 |
+
"rstrip": false,
|
| 722 |
+
"single_word": false,
|
| 723 |
+
"special": true
|
| 724 |
+
},
|
| 725 |
+
"100346": {
|
| 726 |
+
"content": "<|unused_77|>",
|
| 727 |
+
"lstrip": false,
|
| 728 |
+
"normalized": false,
|
| 729 |
+
"rstrip": false,
|
| 730 |
+
"single_word": false,
|
| 731 |
+
"special": true
|
| 732 |
+
},
|
| 733 |
+
"100347": {
|
| 734 |
+
"content": "<|unused_78|>",
|
| 735 |
+
"lstrip": false,
|
| 736 |
+
"normalized": false,
|
| 737 |
+
"rstrip": false,
|
| 738 |
+
"single_word": false,
|
| 739 |
+
"special": true
|
| 740 |
+
},
|
| 741 |
+
"100348": {
|
| 742 |
+
"content": "<|unused_79|>",
|
| 743 |
+
"lstrip": false,
|
| 744 |
+
"normalized": false,
|
| 745 |
+
"rstrip": false,
|
| 746 |
+
"single_word": false,
|
| 747 |
+
"special": true
|
| 748 |
+
},
|
| 749 |
+
"100349": {
|
| 750 |
+
"content": "<|unused_80|>",
|
| 751 |
+
"lstrip": false,
|
| 752 |
+
"normalized": false,
|
| 753 |
+
"rstrip": false,
|
| 754 |
+
"single_word": false,
|
| 755 |
+
"special": true
|
| 756 |
+
},
|
| 757 |
+
"100350": {
|
| 758 |
+
"content": "<|unused_81|>",
|
| 759 |
+
"lstrip": false,
|
| 760 |
+
"normalized": false,
|
| 761 |
+
"rstrip": false,
|
| 762 |
+
"single_word": false,
|
| 763 |
+
"special": true
|
| 764 |
+
},
|
| 765 |
+
"100351": {
|
| 766 |
+
"content": "<|unused_82|>",
|
| 767 |
+
"lstrip": false,
|
| 768 |
+
"normalized": false,
|
| 769 |
+
"rstrip": false,
|
| 770 |
+
"single_word": false,
|
| 771 |
+
"special": true
|
| 772 |
+
}
|
| 773 |
+
},
|
| 774 |
+
"bos_token": "<|end_of_text|>",
|
| 775 |
+
"clean_up_tokenization_spaces": false,
|
| 776 |
+
"eos_token": "<|end_of_text|>",
|
| 777 |
+
"extra_special_tokens": {},
|
| 778 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 779 |
+
"pad_token": "<|pad|>",
|
| 780 |
+
"padding_side": "left",
|
| 781 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 782 |
+
"unk_token": "<|unk|>"
|
| 783 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|