{ "model_name": "finetuned-ibmgraniteGGUF", "quantization_type": "q8_0", "source_model": "/tmp/inputs/merged_model_dir/data", "gguf_file": "finetuned-ibmgraniteGGUF.gguf", "file_size_mb": 8281.94, "conversion_tool": "llama.cpp" }