aduncan94 commited on
Commit
0f7f578
·
verified ·
1 Parent(s): 490eb5e

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +4 -5
  2. config.json +44 -43
  3. generation_config.json +8 -0
  4. model.safetensors +2 -2
README.md CHANGED
@@ -1,8 +1,7 @@
1
  ---
2
- license: mit
3
- pipeline_tag: text-generation
4
  ---
5
 
6
- # EnhancAR
7
-
8
- EnhancAR is an autoregressive generative model of enhancer homology families, trained on 233,158,475 enhancers extracted from 241 vertebrate genomes. By "unrolling" homology families (enhancer sequences are sorted into sets of homology sequences, and input data is sequences concatenated to each other with a separator token delimiting different sequences), enhancAR learns to generate new sequences that conserve the function of prompt sequences. We demonstrate that this can be used to design new enhancers "by example", which is particularly useful when the function of enhancers is not known a priori.
 
1
  ---
2
+ {}
 
3
  ---
4
 
5
+ # EnhancAR
6
+ EnhancAR is an autoregressive generative model of enhancer homology families, trained on 233,158,475 enhancers extracted from 241 vertebrate genomes. By "unrolling" homology families (enhancer sequences are sorted into sets of homology sequences, and input data is sequences concatenated to each other with a separator token delimiting different sequences), enhancAR learns to generate new sequences that conserve the function of prompt sequences. We demonstrate that this can be used to design new enhancers "by example", which is particularly useful when the function of enhancers is not known a priori.
7
+
config.json CHANGED
@@ -1,47 +1,48 @@
1
  {
2
- "model_config": {
3
- "hidden_size": 256,
4
- "intermediate_size": 1024,
5
- "num_hidden_layers": 24,
6
- "num_attention_heads": 16,
7
- "num_key_value_heads": 8,
8
- "use_mamba_kernels": true,
9
- "mamba_d_state": 16,
10
- "mamba_d_conv": 4,
11
- "mamba_expand": 2,
12
- "mamba_dt_rank": "auto",
13
- "mamba_conv_bias": true,
14
- "mamba_proj_bias": false,
15
- "output_router_logits": true,
16
- "use_cache": false,
17
- "_attn_implementation": "flash_attention_2",
18
- "vocab_size": 16,
19
- "pad_token_id": 6,
20
- "bos_token_id": 9,
21
- "eos_token_id": 7
22
- },
23
- "alphabet": [
24
- "G",
25
- "A",
26
- "T",
27
- "C",
28
- "N",
29
- "-",
30
- "!",
31
- "*",
32
- "/",
33
- "@",
34
- "[",
35
- "]",
36
- "{",
37
- "}"
38
- ],
39
- "model_type": "enhancar",
40
  "architectures": [
41
- "EnhancARModel"
42
  ],
 
 
 
43
  "auto_map": {
44
- "AutoConfig": "modeling_enhancar.EnhancARConfig",
45
- "AutoModel": "modeling_enhancar.EnhancARModel"
46
- }
47
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
+ "_name_or_path": "ai21labs/Jamba-v0.1",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "architectures": [
4
+ "JambaForCausalLM"
5
  ],
6
+ "attention_dropout": 0.0,
7
+ "attn_layer_offset": 4,
8
+ "attn_layer_period": 8,
9
  "auto_map": {
10
+ "AutoConfig": "ai21labs/Jamba-v0.1--configuration_jamba.JambaConfig",
11
+ "AutoModel": "ai21labs/Jamba-v0.1--modeling_jamba.JambaModel",
12
+ "AutoModelForCausalLM": "ai21labs/Jamba-v0.1--modeling_jamba.JambaForCausalLM",
13
+ "AutoModelForSequenceClassification": "ai21labs/Jamba-v0.1--model.JambaForSequenceClassification"
14
+ },
15
+ "bos_token_id": 9,
16
+ "eos_token_id": 7,
17
+ "expert_layer_offset": 1,
18
+ "expert_layer_period": 2,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 256,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 1024,
23
+ "mamba_conv_bias": true,
24
+ "mamba_d_conv": 4,
25
+ "mamba_d_state": 16,
26
+ "mamba_dt_rank": 16,
27
+ "mamba_expand": 2,
28
+ "mamba_proj_bias": false,
29
+ "max_position_embeddings": 262144,
30
+ "model_type": "jamba",
31
+ "num_attention_heads": 16,
32
+ "num_experts": 16,
33
+ "num_experts_per_tok": 2,
34
+ "num_hidden_layers": 24,
35
+ "num_key_value_heads": 8,
36
+ "num_logits_to_keep": 1,
37
+ "output_router_logits": true,
38
+ "pad_token_id": 6,
39
+ "rms_norm_eps": 1e-06,
40
+ "router_aux_loss_coef": 0.001,
41
+ "sliding_window": null,
42
+ "tie_word_embeddings": false,
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.48.2",
45
+ "use_cache": false,
46
+ "use_mamba_kernels": true,
47
+ "vocab_size": 16
48
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 9,
4
+ "eos_token_id": 7,
5
+ "pad_token_id": 6,
6
+ "transformers_version": "4.48.2",
7
+ "use_cache": false
8
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b763ec95048b53f9c2b5722bf377c51aecb3a419e08f709b89f81958c64b2488
3
- size 681263120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42ade0d0c7a7e74e7e6657392d9644a5b8531e6edb4c5b03934e13b17921da43
3
+ size 681256576