| --- |
| library_name: transformers |
| pipeline_tag: text-generation |
| inference: true |
| widget: |
| - text: Hello! |
| example_title: Hello world |
| group: Python |
| base_model: |
| - swiss-ai/Apertus-70B-Instruct-2509 |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [swiss-ai/Apertus-70B-Instruct-2509](https://huggingface.co/swiss-ai/Apertus-70B-Instruct-2509). |
|
|
| ### Example usage: |
|
|
| - vLLM |
|
|
| ```bash |
| vllm serve tiny-random/apertus |
| ``` |
|
|
| - Transformers |
|
|
| ```python |
| import os |
| import re |
| |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| model_id = "tiny-random/apertus" |
| |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16) |
| messages = [ |
| {"role": "user", "content": "How to make pasta?"}, |
| ] |
| tokenized_chat = tokenizer.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_tensors="pt", |
| ) |
| outputs = model.generate(tokenized_chat.to(model.device), max_new_tokens=128) |
| output_text = tokenizer.decode(outputs[0]) |
| print(output_text) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| ```python |
| import json |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| GenerationConfig, |
| set_seed, |
| ) |
| |
| source_model_id = "swiss-ai/Apertus-70B-Instruct-2509" |
| save_folder = "/tmp/tiny-random/apertus" |
| |
| processor = AutoProcessor.from_pretrained(source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json = json.load(f) |
| config_json['hidden_size'] = 8 |
| config_json['head_dim'] = 32 # vllm requirement |
| config_json['intermediate_size'] = 32 |
| config_json['num_attention_heads'] = 8 |
| config_json['num_hidden_layers'] = 2 |
| config_json['num_key_value_heads'] = 4 # better support tensor parallel |
| config_json['tie_word_embeddings'] = False |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) |
| torch.set_default_dtype(torch.float32) |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| model.generation_config.do_sample = True |
| set_seed(42) |
| model = model.cpu() # cpu is more stable for random initialization across machines |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape) |
| model.save_pretrained(save_folder) |
| ``` |
|
|
| ### Printing the model: |
|
|
| ```text |
| ApertusForCausalLM( |
| (model): ApertusModel( |
| (embed_tokens): Embedding(131072, 8, padding_idx=3) |
| (layers): ModuleList( |
| (0-1): 2 x ApertusDecoderLayer( |
| (self_attn): ApertusAttention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=128, bias=False) |
| (v_proj): Linear(in_features=8, out_features=128, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| (q_norm): ApertusRMSNorm((32,), eps=1e-05) |
| (k_norm): ApertusRMSNorm((32,), eps=1e-05) |
| ) |
| (mlp): ApertusMLP( |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): XIELUActivation() |
| ) |
| (attention_layernorm): ApertusRMSNorm((8,), eps=1e-05) |
| (feedforward_layernorm): ApertusRMSNorm((8,), eps=1e-05) |
| ) |
| ) |
| (norm): ApertusRMSNorm((8,), eps=1e-05) |
| (rotary_emb): ApertusRotaryEmbedding() |
| ) |
| (lm_head): Linear(in_features=8, out_features=131072, bias=False) |
| ) |
| ``` |