File size: 241 Bytes
a08d268
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
{
  "model_name": "finetuned-ibmgraniteGGUF",
  "quantization_type": "q8_0",
  "source_model": "/tmp/inputs/merged_model_dir/data",
  "gguf_file": "finetuned-ibmgraniteGGUF.gguf",
  "file_size_mb": 8281.94,
  "conversion_tool": "llama.cpp"
}