Update README.md
Browse files
README.md
CHANGED
|
@@ -14,16 +14,16 @@ and to use it with text generation as a base model :3 (not recommended 3: needs
|
|
| 14 |
from transformers import AutoTokenizer
|
| 15 |
import torch
|
| 16 |
|
| 17 |
-
# Load tokenizer
|
| 18 |
tokenizer = AutoTokenizer.from_pretrained("moelanoby/Kok-GPT")
|
| 19 |
|
| 20 |
-
# Load
|
| 21 |
model = BucketMemoryModel.from_pretrained(
|
| 22 |
"moelanoby/Kok-GPT",
|
| 23 |
trust_remote_code=True
|
| 24 |
)
|
| 25 |
|
| 26 |
-
# Generate text
|
| 27 |
def generate_text(prompt, max_length=50):
|
| 28 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 29 |
outputs = model.generate(
|
|
@@ -32,10 +32,10 @@ def generate_text(prompt, max_length=50):
|
|
| 32 |
)
|
| 33 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 34 |
|
| 35 |
-
#
|
| 36 |
prompt = "Hello"
|
| 37 |
generated = generate_text(prompt)
|
| 38 |
-
print(f"Generated: {generated}")
|
| 39 |
```
|
| 40 |
either way it was trained on 10K rows on the fineweb dataset which is considered insufficient I did end up with an average loss of 2.3468 so yeah you can still finetune the model but the time I get stronger GPUs I'll just target 7B parameters or 14B and etc...
|
| 41 |
|
|
|
|
| 14 |
from transformers import AutoTokenizer
|
| 15 |
import torch
|
| 16 |
|
| 17 |
+
# Load tokenizer >:D
|
| 18 |
tokenizer = AutoTokenizer.from_pretrained("moelanoby/Kok-GPT")
|
| 19 |
|
| 20 |
+
# Load mi model :3 (ensure trust_remote_code=True)
|
| 21 |
model = BucketMemoryModel.from_pretrained(
|
| 22 |
"moelanoby/Kok-GPT",
|
| 23 |
trust_remote_code=True
|
| 24 |
)
|
| 25 |
|
| 26 |
+
# Generate text with this function :D
|
| 27 |
def generate_text(prompt, max_length=50):
|
| 28 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 29 |
outputs = model.generate(
|
|
|
|
| 32 |
)
|
| 33 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 34 |
|
| 35 |
+
# change hello to anything you like :D
|
| 36 |
prompt = "Hello"
|
| 37 |
generated = generate_text(prompt)
|
| 38 |
+
print(f"Generated text >:3: {generated}")
|
| 39 |
```
|
| 40 |
either way it was trained on 10K rows on the fineweb dataset which is considered insufficient I did end up with an average loss of 2.3468 so yeah you can still finetune the model but the time I get stronger GPUs I'll just target 7B parameters or 14B and etc...
|
| 41 |
|