Upload modeling_seqcond.py with huggingface_hub
Browse files- modeling_seqcond.py +2 -2
modeling_seqcond.py
CHANGED
|
@@ -884,7 +884,7 @@ class SeqCondForCausalLM(SeqCondPreTrainedModel):
|
|
| 884 |
def generate(
|
| 885 |
self,
|
| 886 |
input_ids: torch.LongTensor,
|
| 887 |
-
max_new_tokens: int =
|
| 888 |
temperature: float = 0.15,
|
| 889 |
top_p: float = 0.9,
|
| 890 |
top_k: int = 50,
|
|
@@ -1029,7 +1029,7 @@ class SeqCondForCausalLM(SeqCondPreTrainedModel):
|
|
| 1029 |
def generate_batch(
|
| 1030 |
self,
|
| 1031 |
input_ids_list: List[torch.LongTensor],
|
| 1032 |
-
max_new_tokens: int =
|
| 1033 |
temperature: float = 0.7,
|
| 1034 |
eos_token_id: Optional[int] = None,
|
| 1035 |
use_triton: bool = False,
|
|
|
|
| 884 |
def generate(
|
| 885 |
self,
|
| 886 |
input_ids: torch.LongTensor,
|
| 887 |
+
max_new_tokens: int = 1024,
|
| 888 |
temperature: float = 0.15,
|
| 889 |
top_p: float = 0.9,
|
| 890 |
top_k: int = 50,
|
|
|
|
| 1029 |
def generate_batch(
|
| 1030 |
self,
|
| 1031 |
input_ids_list: List[torch.LongTensor],
|
| 1032 |
+
max_new_tokens: int = 1024,
|
| 1033 |
temperature: float = 0.7,
|
| 1034 |
eos_token_id: Optional[int] = None,
|
| 1035 |
use_triton: bool = False,
|