mimba commited on
Commit
b6504f9
·
verified ·
1 Parent(s): a8b5fe9

Upload 3 files

Browse files
Files changed (3) hide show
  1. config.json +10 -3
  2. tokenizer.json +130 -0
  3. tokenizer_config.json +6 -0
config.json CHANGED
@@ -1,5 +1,12 @@
1
  {
2
- "model_name": "Supertonic",
3
- "model_type": "onnx",
4
- "description": "This is a stub config for Hugging Face download counting. The actual model is located at onnx/"
 
 
 
 
 
 
 
5
  }
 
1
  {
2
+ "base_chunk_size": 512,
3
+ "chunk_compress_factor": 6,
4
+ "latent_dim": 24,
5
+ "model_type": "supertonic",
6
+ "sampling_rate": 44100,
7
+ "style_dim": 128,
8
+ "transformers.js_config": {
9
+ "dtype": "fp32",
10
+ "use_external_data_format": true
11
+ }
12
  }
tokenizer.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [],
6
+ "normalizer": {
7
+ "type": "Sequence",
8
+ "normalizers": [
9
+ {
10
+ "type": "NFKD"
11
+ },
12
+ {
13
+ "type": "Replace",
14
+ "pattern": {
15
+ "Regex": "\\s+"
16
+ },
17
+ "content": " "
18
+ },
19
+ {
20
+ "type": "Replace",
21
+ "pattern": {
22
+ "Regex": "[\u2013\u2014]"
23
+ },
24
+ "content": "-"
25
+ },
26
+ {
27
+ "type": "Replace",
28
+ "pattern": {
29
+ "Regex": "[^ -\"$-.0-;?A-Za-z£́]"
30
+ },
31
+ "content": ""
32
+ }
33
+ ]
34
+ },
35
+ "pre_tokenizer": {
36
+ "type": "FixedLength",
37
+ "length": 1
38
+ },
39
+ "post_processor": null,
40
+ "decoder": {
41
+ "type": "Fuse"
42
+ },
43
+ "model": {
44
+ "type": "WordLevel",
45
+ "vocab": {
46
+ " ": 0,
47
+ "!": 1,
48
+ "\"": 2,
49
+ "$": 3,
50
+ "%": 4,
51
+ "&": 5,
52
+ "'": 6,
53
+ "(": 7,
54
+ ")": 8,
55
+ "*": 9,
56
+ "+": 10,
57
+ ",": 11,
58
+ "-": 12,
59
+ ".": 13,
60
+ "0": 14,
61
+ "1": 15,
62
+ "2": 16,
63
+ "3": 17,
64
+ "4": 18,
65
+ "5": 19,
66
+ "6": 20,
67
+ "7": 21,
68
+ "8": 22,
69
+ "9": 23,
70
+ ":": 24,
71
+ ";": 25,
72
+ "?": 26,
73
+ "A": 27,
74
+ "B": 28,
75
+ "C": 29,
76
+ "D": 30,
77
+ "E": 31,
78
+ "F": 32,
79
+ "G": 33,
80
+ "H": 34,
81
+ "I": 35,
82
+ "J": 36,
83
+ "K": 37,
84
+ "L": 38,
85
+ "M": 39,
86
+ "N": 40,
87
+ "O": 41,
88
+ "P": 42,
89
+ "Q": 43,
90
+ "R": 44,
91
+ "S": 45,
92
+ "T": 46,
93
+ "U": 47,
94
+ "V": 48,
95
+ "W": 49,
96
+ "X": 50,
97
+ "Y": 51,
98
+ "Z": 52,
99
+ "a": 53,
100
+ "b": 54,
101
+ "c": 55,
102
+ "d": 56,
103
+ "e": 57,
104
+ "f": 58,
105
+ "g": 59,
106
+ "h": 60,
107
+ "i": 61,
108
+ "j": 62,
109
+ "k": 63,
110
+ "l": 64,
111
+ "m": 65,
112
+ "n": 66,
113
+ "o": 67,
114
+ "p": 68,
115
+ "q": 69,
116
+ "r": 70,
117
+ "s": 71,
118
+ "t": 72,
119
+ "u": 73,
120
+ "v": 74,
121
+ "w": 75,
122
+ "x": 76,
123
+ "y": 77,
124
+ "z": 78,
125
+ "£": 79,
126
+ "\u0301": 80
127
+ },
128
+ "unk_token": "\u0301"
129
+ }
130
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "PreTrainedTokenizerFast",
3
+ "model_max_length": 1000,
4
+ "pad_token": " ",
5
+ "pad_token_id": 0
6
+ }