Update README.md
Browse files
README.md
CHANGED
|
@@ -9,7 +9,7 @@ tags:
|
|
| 9 |
- experimental
|
| 10 |
library_name: transformers
|
| 11 |
---
|
| 12 |
-
#
|
| 13 |
A small experimental language model based on LLaMA architecture trained on custom high-quality English dataset with around 200M tokens.
|
| 14 |
This model is just an experiment, it is not designed for coherent text generation or logical reasoning and may produce repetitive or nonsensical outputs.
|
| 15 |
|
|
@@ -27,8 +27,8 @@ Built by [PingVortex Labs](https://github.com/PingVortexLabs).
|
|
| 27 |
```python
|
| 28 |
from transformers import LlamaForCausalLM, PreTrainedTokenizerFast
|
| 29 |
|
| 30 |
-
model = LlamaForCausalLM.from_pretrained("pvlabs/
|
| 31 |
-
tokenizer = PreTrainedTokenizerFast.from_pretrained("pvlabs/
|
| 32 |
|
| 33 |
# don't expect a coherent response
|
| 34 |
prompt = "The capital of France is"
|
|
|
|
| 9 |
- experimental
|
| 10 |
library_name: transformers
|
| 11 |
---
|
| 12 |
+
# PingVortexLM1-20M-Base
|
| 13 |
A small experimental language model based on LLaMA architecture trained on custom high-quality English dataset with around 200M tokens.
|
| 14 |
This model is just an experiment, it is not designed for coherent text generation or logical reasoning and may produce repetitive or nonsensical outputs.
|
| 15 |
|
|
|
|
| 27 |
```python
|
| 28 |
from transformers import LlamaForCausalLM, PreTrainedTokenizerFast
|
| 29 |
|
| 30 |
+
model = LlamaForCausalLM.from_pretrained("pvlabs/PingVortexLM1-20M-Base")
|
| 31 |
+
tokenizer = PreTrainedTokenizerFast.from_pretrained("pvlabs/PingVortexLM1-20M-Base")
|
| 32 |
|
| 33 |
# don't expect a coherent response
|
| 34 |
prompt = "The capital of France is"
|