model_tools / pytorch_to_safetensors.py
Naphula's picture
Upload 2 files
2e55c41 verified
import os
import torch
# --- AGGRESSIVE FIX: Bypass Security Check ---
# We must import these modules specifically to patch the function where it is used
import transformers.modeling_utils
import transformers.utils.import_utils
# Disable the check in both locations
transformers.modeling_utils.check_torch_load_is_safe = lambda: None
transformers.utils.import_utils.check_torch_load_is_safe = lambda: None
# ---------------------------------------------
from transformers import AutoModelForCausalLM, AutoTokenizer
# 1. Path to your local PyTorch model
input_path = r"B:\7B\!models--Gryphe--Tiamat-7b"
# 2. Path where you want the SafeTensors version
output_path = r"B:\7B\!models--Gryphe--Tiamat-7b\safe"
print(f"Loading model from {input_path}...")
# Load the model
model = AutoModelForCausalLM.from_pretrained(
input_path,
torch_dtype=torch.bfloat16,
device_map="cpu",
low_cpu_mem_usage=True
)
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(input_path)
print(f"Saving to {output_path}...")
if not os.path.exists(output_path):
os.makedirs(output_path)
# 3. Save with safe_serialization=True
model.save_pretrained(
output_path,
safe_serialization=True,
max_shard_size="5GB"
)
tokenizer.save_pretrained(output_path)
print("Conversion complete.")