File size: 243 Bytes
f4f0a00
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
{
  "model_name": "newfinetuned-distilgptGGUF",
  "quantization_type": "q8_0",
  "source_model": "/tmp/inputs/merged_model_dir/data",
  "gguf_file": "newfinetuned-distilgptGGUF.gguf",
  "file_size_mb": 87.06,
  "conversion_tool": "llama.cpp"
}