omniverse1 commited on
Commit
649d480
·
verified ·
1 Parent(s): 170adfa

update app

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -30,18 +30,19 @@ from config import IDX_STOCKS, TECHNICAL_INDICATORS, PREDICTION_CONFIG
30
  @spaces.GPU(duration=120)
31
  def load_model():
32
  """Load the Amazon Chronos-Bolt model for time series forecasting"""
33
- # FIX 1: Use AutoModelForSeq2SeqLM and trust_remote_code=True for T5-based model
34
  model = AutoModelForSeq2SeqLM.from_pretrained(
35
  "amazon/chronos-bolt-base",
36
  torch_dtype=torch.bfloat16,
37
  device_map="auto",
38
  trust_remote_code=True
39
  )
40
- # FIX 2: Use AutoTokenizer and rely SOLELY on trust_remote_code=True
41
- # This forces the loading of the custom ChronosTokenizer class, bypassing the conflicting T5 conversion logic.
42
  tokenizer = AutoTokenizer.from_pretrained(
43
  "amazon/chronos-bolt-base",
44
- trust_remote_code=True
 
 
45
  )
46
  return model, tokenizer
47
 
 
30
  @spaces.GPU(duration=120)
31
  def load_model():
32
  """Load the Amazon Chronos-Bolt model for time series forecasting"""
33
+ # Use AutoModelForSeq2SeqLM and trust_remote_code=True for T5-based model
34
  model = AutoModelForSeq2SeqLM.from_pretrained(
35
  "amazon/chronos-bolt-base",
36
  torch_dtype=torch.bfloat16,
37
  device_map="auto",
38
  trust_remote_code=True
39
  )
40
+ # FIX: Use AutoTokenizer with a combination of flags to bypass the problematic tiktoken conversion logic
 
41
  tokenizer = AutoTokenizer.from_pretrained(
42
  "amazon/chronos-bolt-base",
43
+ trust_remote_code=True,
44
+ use_fast=False,
45
+ force_download=True
46
  )
47
  return model, tokenizer
48