| #!/bin/bash |
|
|
| |
| |
|
|
| set -e |
|
|
| MODEL_NAME="llama3-dementia-care:latest" |
| EXPORT_DIR="./model_export" |
| CURRENT_DIR=$(pwd) |
|
|
| echo "π Preparing Llama 3 Dementia Care model for Hugging Face upload..." |
| echo "==================================================" |
|
|
| |
| if ! command -v ollama &> /dev/null; then |
| echo "β Error: Ollama is not installed or not in PATH" |
| echo "Please install Ollama first: https://ollama.com" |
| exit 1 |
| fi |
|
|
| |
| if ! ollama list | grep -q "$MODEL_NAME"; then |
| echo "β Error: Model $MODEL_NAME not found" |
| echo "Available models:" |
| ollama list |
| exit 1 |
| fi |
|
|
| echo "β
Found model: $MODEL_NAME" |
|
|
| |
| mkdir -p "$EXPORT_DIR" |
| cd "$EXPORT_DIR" |
|
|
| echo "π Created export directory: $EXPORT_DIR" |
|
|
| |
| echo "π Exporting model information..." |
| ollama show "$MODEL_NAME" > model_details.txt |
| ollama show "$MODEL_NAME" --modelfile > exported_modelfile.txt |
|
|
| echo "π Model details saved to:" |
| echo " - model_details.txt" |
| echo " - exported_modelfile.txt" |
|
|
| |
| cat > export_README.md << 'EOF' |
| |
|
|
| This directory contains the exported files from your Ollama model that need to be converted for Hugging Face. |
|
|
| |
| - `model_details.txt` - Detailed model information from Ollama |
| - `exported_modelfile.txt` - The Modelfile configuration |
| - `export_README.md` - This file |
|
|
| |
|
|
| |
| 1. You'll need to manually extract the model weights from Ollama's blob storage |
| 2. Convert them to PyTorch/Safetensors format |
| 3. Create proper tokenizer files |
|
|
| |
| 1. Install ollama-python: `pip install ollama` |
| 2. Use conversion scripts like: |
| - https://github.com/ollama/ollama/blob/main/docs/modelfile.md |
| - Community conversion tools |
|
|
| |
| 1. Start with the base Llama 3 8B model from Hugging Face |
| 2. Fine-tune it with your dementia care dataset |
| 3. Upload the fine-tuned model |
|
|
| |
| - Ollama stores models in a specific format that may require conversion |
| - The model weights are typically in `/Users/[username]/.ollama/models/blobs/` |
| - You may need to use specialized tools to extract and convert the weights |
|
|
| For more information, visit: https://ollama.com/blog/modelfile |
| EOF |
|
|
| echo "π Created export_README.md with next steps" |
|
|
| |
| echo "π Locating model blob files..." |
| OLLAMA_MODELS_DIR="$HOME/.ollama/models" |
| if [ -d "$OLLAMA_MODELS_DIR" ]; then |
| echo "π Ollama models directory: $OLLAMA_MODELS_DIR" |
| |
| |
| BLOB_SHA=$(grep "^FROM" exported_modelfile.txt | grep "sha256" | awk -F'sha256-' '{print $2}') |
| if [ -n "$BLOB_SHA" ]; then |
| echo "π Model blob SHA: $BLOB_SHA" |
| BLOB_PATH="$OLLAMA_MODELS_DIR/blobs/sha256-$BLOB_SHA" |
| if [ -f "$BLOB_PATH" ]; then |
| echo "β
Found model blob: $BLOB_PATH" |
| echo "π Blob size: $(ls -lh "$BLOB_PATH" | awk '{print $5}')" |
| |
| |
| echo "Model Blob Information:" > blob_info.txt |
| echo "SHA256: $BLOB_SHA" >> blob_info.txt |
| echo "Path: $BLOB_PATH" >> blob_info.txt |
| echo "Size: $(ls -lh "$BLOB_PATH" | awk '{print $5}')" >> blob_info.txt |
| echo "Modified: $(ls -l "$BLOB_PATH" | awk '{print $6, $7, $8}')" >> blob_info.txt |
| else |
| echo "β Model blob not found at expected location" |
| fi |
| else |
| echo "β Could not extract blob SHA from Modelfile" |
| fi |
| else |
| echo "β Ollama models directory not found" |
| fi |
|
|
| cd "$CURRENT_DIR" |
|
|
| echo "" |
| echo "π Export preparation complete!" |
| echo "==================================================" |
| echo "π Files exported to: $EXPORT_DIR" |
| echo "" |
| echo "β οΈ IMPORTANT: Converting Ollama models to Hugging Face format requires additional steps:" |
| echo "" |
| echo "π Conversion Options:" |
| echo "1. Use ollama-python and conversion tools" |
| echo "2. Extract and convert model weights manually" |
| echo "3. Re-train using the base Llama 3 model on Hugging Face" |
| echo "" |
| echo "π Resources:" |
| echo "- Ollama documentation: https://ollama.com/blog/modelfile" |
| echo "- Hugging Face model upload: https://huggingface.co/docs/transformers/model_sharing" |
| echo "" |
| echo "β
Your repository structure is ready for Hugging Face!" |
| echo "π Repository files created:" |
| ls -la "$CURRENT_DIR" | grep -E '\.(md|json|txt|py)$|Modelfile|NOTICE' |
|
|
| echo "" |
| echo "π Next: Upload your repository to Hugging Face and add the converted model weights." |
|
|