[Admin maintenance] Remove need for token to fix expired token issue
Browse files
app.py
CHANGED
|
@@ -11,8 +11,8 @@ import torch
|
|
| 11 |
from huggingface_hub import snapshot_download, login
|
| 12 |
from qwen_tts import Qwen3TTSModel
|
| 13 |
|
| 14 |
-
HF_TOKEN = os.environ.get('HF_TOKEN')
|
| 15 |
-
login(token=HF_TOKEN)
|
| 16 |
|
| 17 |
# Model size options
|
| 18 |
MODEL_SIZES = ["0.6B", "1.7B"]
|
|
@@ -40,7 +40,7 @@ voice_design_model = Qwen3TTSModel.from_pretrained(
|
|
| 40 |
get_model_path("VoiceDesign", "1.7B"),
|
| 41 |
device_map="cuda",
|
| 42 |
dtype=torch.bfloat16,
|
| 43 |
-
token=HF_TOKEN,
|
| 44 |
attn_implementation="kernels-community/flash-attn3",
|
| 45 |
)
|
| 46 |
|
|
@@ -50,7 +50,7 @@ base_model_0_6b = Qwen3TTSModel.from_pretrained(
|
|
| 50 |
get_model_path("Base", "0.6B"),
|
| 51 |
device_map="cuda",
|
| 52 |
dtype=torch.bfloat16,
|
| 53 |
-
token=HF_TOKEN,
|
| 54 |
attn_implementation="kernels-community/flash-attn3",
|
| 55 |
)
|
| 56 |
|
|
@@ -59,7 +59,7 @@ base_model_1_7b = Qwen3TTSModel.from_pretrained(
|
|
| 59 |
get_model_path("Base", "1.7B"),
|
| 60 |
device_map="cuda",
|
| 61 |
dtype=torch.bfloat16,
|
| 62 |
-
token=HF_TOKEN,
|
| 63 |
attn_implementation="kernels-community/flash-attn3",
|
| 64 |
)
|
| 65 |
|
|
@@ -69,7 +69,7 @@ custom_voice_model_0_6b = Qwen3TTSModel.from_pretrained(
|
|
| 69 |
get_model_path("CustomVoice", "0.6B"),
|
| 70 |
device_map="cuda",
|
| 71 |
dtype=torch.bfloat16,
|
| 72 |
-
token=HF_TOKEN,
|
| 73 |
attn_implementation="kernels-community/flash-attn3",
|
| 74 |
)
|
| 75 |
|
|
@@ -78,7 +78,7 @@ custom_voice_model_1_7b = Qwen3TTSModel.from_pretrained(
|
|
| 78 |
get_model_path("CustomVoice", "1.7B"),
|
| 79 |
device_map="cuda",
|
| 80 |
dtype=torch.bfloat16,
|
| 81 |
-
token=HF_TOKEN,
|
| 82 |
attn_implementation="kernels-community/flash-attn3",
|
| 83 |
)
|
| 84 |
|
|
|
|
| 11 |
from huggingface_hub import snapshot_download, login
|
| 12 |
from qwen_tts import Qwen3TTSModel
|
| 13 |
|
| 14 |
+
# HF_TOKEN = os.environ.get('HF_TOKEN')
|
| 15 |
+
# login(token=HF_TOKEN)
|
| 16 |
|
| 17 |
# Model size options
|
| 18 |
MODEL_SIZES = ["0.6B", "1.7B"]
|
|
|
|
| 40 |
get_model_path("VoiceDesign", "1.7B"),
|
| 41 |
device_map="cuda",
|
| 42 |
dtype=torch.bfloat16,
|
| 43 |
+
# token=HF_TOKEN,
|
| 44 |
attn_implementation="kernels-community/flash-attn3",
|
| 45 |
)
|
| 46 |
|
|
|
|
| 50 |
get_model_path("Base", "0.6B"),
|
| 51 |
device_map="cuda",
|
| 52 |
dtype=torch.bfloat16,
|
| 53 |
+
# token=HF_TOKEN,
|
| 54 |
attn_implementation="kernels-community/flash-attn3",
|
| 55 |
)
|
| 56 |
|
|
|
|
| 59 |
get_model_path("Base", "1.7B"),
|
| 60 |
device_map="cuda",
|
| 61 |
dtype=torch.bfloat16,
|
| 62 |
+
# token=HF_TOKEN,
|
| 63 |
attn_implementation="kernels-community/flash-attn3",
|
| 64 |
)
|
| 65 |
|
|
|
|
| 69 |
get_model_path("CustomVoice", "0.6B"),
|
| 70 |
device_map="cuda",
|
| 71 |
dtype=torch.bfloat16,
|
| 72 |
+
# token=HF_TOKEN,
|
| 73 |
attn_implementation="kernels-community/flash-attn3",
|
| 74 |
)
|
| 75 |
|
|
|
|
| 78 |
get_model_path("CustomVoice", "1.7B"),
|
| 79 |
device_map="cuda",
|
| 80 |
dtype=torch.bfloat16,
|
| 81 |
+
# token=HF_TOKEN,
|
| 82 |
attn_implementation="kernels-community/flash-attn3",
|
| 83 |
)
|
| 84 |
|