| --- |
| library_name: transformers |
| base_model: |
| - google/gemma-2-27b-it |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [google/gemma-2-27b-it](https://huggingface.co/google/gemma-2-27b-it). |
|
|
| ### Example usage: |
|
|
| ```python |
| from transformers import pipeline |
| model_id = "tiny-random/gemma-2" |
| pipe = pipeline('text-generation', model=model_id, device='cuda', dtype="bfloat16") |
| print(pipe('Hello World!')) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| ```python |
| import json |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| GenerationConfig, |
| set_seed, |
| ) |
| |
| source_model_id = "google/gemma-2-27b-it" |
| save_folder = "/tmp/tiny-random/gemma-2" |
| |
| processor = AutoProcessor.from_pretrained( |
| source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json = json.load(f) |
| config_json['hidden_size'] = 8 |
| config_json['intermediate_size'] = 64 |
| config_json['num_attention_heads'] = 8 |
| config_json['num_hidden_layers'] = 2 |
| config_json['num_key_value_heads'] = 4 |
| config_json['head_dim'] = 32 |
| config_json['tie_word_embeddings'] = True |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = AutoModelForCausalLM.from_config(config) |
| torch.set_default_dtype(torch.float32) |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| set_seed(42) |
| model = model.cpu() |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape) |
| model.save_pretrained(save_folder) |
| print(model) |
| ``` |
|
|
| ### Printing the model: |
|
|
| ```text |
| Gemma2ForCausalLM( |
| (model): Gemma2Model( |
| (embed_tokens): Embedding(256000, 8, padding_idx=0) |
| (layers): ModuleList( |
| (0-1): 2 x Gemma2DecoderLayer( |
| (self_attn): Gemma2Attention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=128, bias=False) |
| (v_proj): Linear(in_features=8, out_features=128, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (mlp): Gemma2MLP( |
| (gate_proj): Linear(in_features=8, out_features=64, bias=False) |
| (up_proj): Linear(in_features=8, out_features=64, bias=False) |
| (down_proj): Linear(in_features=64, out_features=8, bias=False) |
| (act_fn): GELUTanh() |
| ) |
| (input_layernorm): Gemma2RMSNorm((8,), eps=1e-06) |
| (post_attention_layernorm): Gemma2RMSNorm((8,), eps=1e-06) |
| (pre_feedforward_layernorm): Gemma2RMSNorm((8,), eps=1e-06) |
| (post_feedforward_layernorm): Gemma2RMSNorm((8,), eps=1e-06) |
| ) |
| ) |
| (norm): Gemma2RMSNorm((8,), eps=1e-06) |
| (rotary_emb): Gemma2RotaryEmbedding() |
| ) |
| (lm_head): Linear(in_features=8, out_features=256000, bias=False) |
| ) |
| ``` |