Sync from GitHub (preserve manual model files)
Browse files- StreamlitApp/utils/predict.py +7 -1
- requirements.txt +1 -0
StreamlitApp/utils/predict.py
CHANGED
|
@@ -90,7 +90,13 @@ def load_model():
|
|
| 90 |
classifier.eval()
|
| 91 |
|
| 92 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 93 |
-
tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
encoder = AutoModel.from_pretrained(PROTBERT_MODEL_NAME).to(device)
|
| 95 |
encoder.eval()
|
| 96 |
|
|
|
|
| 90 |
classifier.eval()
|
| 91 |
|
| 92 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 93 |
+
# ProtBERT tokenizer may be slow-only depending on whether sentencepiece/tokenizers
|
| 94 |
+
# backends are available. Fall back to `use_fast=False` so Spaces can start.
|
| 95 |
+
try:
|
| 96 |
+
tokenizer = AutoTokenizer.from_pretrained(PROTBERT_MODEL_NAME, use_fast=True)
|
| 97 |
+
except Exception:
|
| 98 |
+
tokenizer = AutoTokenizer.from_pretrained(PROTBERT_MODEL_NAME, use_fast=False)
|
| 99 |
+
|
| 100 |
encoder = AutoModel.from_pretrained(PROTBERT_MODEL_NAME).to(device)
|
| 101 |
encoder.eval()
|
| 102 |
|
requirements.txt
CHANGED
|
@@ -8,3 +8,4 @@ plotly>=5.14.0
|
|
| 8 |
requests>=2.28.0
|
| 9 |
py3dmol>=2.0.0
|
| 10 |
transformers>=4.40.0
|
|
|
|
|
|
| 8 |
requests>=2.28.0
|
| 9 |
py3dmol>=2.0.0
|
| 10 |
transformers>=4.40.0
|
| 11 |
+
sentencepiece>=0.1.99
|