Some files missing

#5
by KongJohnny - opened

I am trying to load your model with the following code

from Longformer_zh import LongformerZhForMaksedLM
tokenizer = LongformerTokenizerFast.from_pretrained('ValkyriaLenneth/longformer_zh')

But I get this error:

[INFO|tokenization_utils_base.py:1800] 2025-08-25 02:34:37,128 >> loading file vocab.json
[INFO|tokenization_utils_base.py:1800] 2025-08-25 02:34:37,128 >> loading file merges.txt
[INFO|tokenization_utils_base.py:1800] 2025-08-25 02:34:37,128 >> loading file tokenizer.json
[INFO|tokenization_utils_base.py:1800] 2025-08-25 02:34:37,128 >> loading file added_tokens.json
[INFO|tokenization_utils_base.py:1800] 2025-08-25 02:34:37,128 >> loading file special_tokens_map.json
[INFO|tokenization_utils_base.py:1800] 2025-08-25 02:34:37,128 >> loading file tokenizer_config.json
[INFO|configuration_utils.py:658] 2025-08-25 02:34:37,128 >> loading configuration file ./pretrained_models/longformer_zh/config.json
[INFO|configuration_utils.py:712] 2025-08-25 02:34:37,129 >> Model config LongformerConfig {
  "_name_or_path": "./pretrained_models/longformer_zh",
  "architectures": [
    "LongformerModel"
  ],
  "attention_probs_dropout_prob": 0.1,
  "attention_window": [
    512,
    512,
    512,
    512,
    512,
    512,
    512,
    512,
    512,
    512,
    512,
    512
  ],
  "bos_token_id": 0,
  "classifier_dropout": null,
  "directionality": "bidi",
  "eos_token_id": 2,
  "gradient_checkpointing": false,
  "hidden_act": "gelu",
  "hidden_dropout_prob": 0.1,
  "hidden_size": 768,
  "initializer_range": 0.02,
  "intermediate_size": 3072,
  "layer_norm_eps": 1e-12,
  "max_position_embeddings": 4096,
  "model_type": "longformer",
  "num_attention_heads": 12,
  "num_hidden_layers": 12,
  "onnx_export": false,
  "pad_token_id": 0,
  "pooler_fc_size": 768,
  "pooler_num_attention_heads": 12,
  "pooler_num_fc_layers": 3,
  "pooler_size_per_head": 128,
  "pooler_type": "first_token_transform",
  "position_embedding_type": "absolute",
  "sep_token_id": 2,
  "transformers_version": "4.26.0",
  "type_vocab_size": 2,
  "vocab_size": 21128
}

Traceback (most recent call last):
  File "./src/main_text.py", line 839, in <module>
    main()
  File "./src/main_text.py", line 221, in main
    tokenizer = LongformerTokenizerFast.from_pretrained(
  File "/opt/conda/envs/torch1.12.1/lib/python3.8/site-packages/transformers/tokenization_utils_base.py", line 1804, in from_pretrained
    return cls._from_pretrained(
  File "/opt/conda/envs/torch1.12.1/lib/python3.8/site-packages/transformers/tokenization_utils_base.py", line 1834, in _from_pretrained
    slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
  File "/opt/conda/envs/torch1.12.1/lib/python3.8/site-packages/transformers/tokenization_utils_base.py", line 1959, in _from_pretrained
    tokenizer = cls(*init_inputs, **init_kwargs)
  File "/opt/conda/envs/torch1.12.1/lib/python3.8/site-packages/transformers/models/longformer/tokenization_longformer.py", line 241, in __init__
    with open(merges_file, encoding="utf-8") as merges_handle:
TypeError: expected str, bytes or os.PathLike object, not NoneType

Due to the file "vocab.json" being missing, I cannot load your model. Besides, I didn't find other related files in your repo β€”β€” "merges.txt", "tokenizer.json", ...

Sign up or log in to comment