cuda suppor: tmteb_tr_cli_cuda.py

#2
by NovaYear - opened

https://github.com/selmanbaysan/mteb_tr
^^ after -->
install torch cuda before run script.

pip uninstall torch
pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cu130

::SCRIPT::

#!/usr/bin/env python3

import argparse
import mteb
from mteb import MTEB
from sentence_transformers import SentenceTransformer
import os
import sys
import gc
import torch # Bellek temizliği için gerekli

def clear_memory():
    """GPU ve RAM belleğini temizler"""
    gc.collect()
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

def evaluate_model(model_name, output_folder):
    try:
        # Benchmark içindeki tüm görevleri liste olarak al
        mteb_tr = mteb.get_benchmark("MTEB(Turkish)")
        
        print(f"Loading model: {model_name}")
        model = SentenceTransformer(model_name, trust_remote_code=True)
        
        # Tüm benchmark'ı tek seferde çalıştırmak yerine görevleri tek tek dönüyoruz
        for task in mteb_tr:
            task_name = task.metadata.name
            print(f"\n>>> Running task: {task_name}")
            
            try:
                # Sadece ilgili görev için bir MTEB objesi oluştur
                evaluation = MTEB(tasks=[task])
                
                # Modeli ve çıktı klasörünü vererek çalıştır
                evaluation.run(model, output_folder=output_folder)
                
                print(f"--- Task {task_name} completed ---")
            
            except Exception as task_error:
                print(f"Error in task {task_name}: {str(task_error)}", file=sys.stderr)
            
            finally:
                # HER GÖREV SONRASI TEMİZLİK
                clear_memory()
                print(f"Memory cleared after {task_name}.")

        print("\nEvaluation completed successfully!")
        return True
        
    except Exception as e:
        print(f"Error during initialization: {str(e)}", file=sys.stderr)
        return False

def main():
    parser = argparse.ArgumentParser(
        description="Run MTEB-TR benchmark evaluation for a given model",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    
    parser.add_argument(
        "model_name",
        help="Name or path of the model to evaluate (e.g., 'sentence-transformers/LaBSE' or path to local model)"
    )
    
    parser.add_argument(
        "--output-folder",
        "-o",
        default="results",
        help="Path to save the evaluation results"
    )
    
    args = parser.parse_args()
    
    success = evaluate_model(args.model_name, args.output_folder)
    sys.exit(0 if success else 1)

if __name__ == "__main__":
    main() 

Sign up or log in to comment