Spaces:
Running
Running
| import os | |
| from huggingface_hub import InferenceClient | |
| # Initialize client with your model and token | |
| client = InferenceClient( | |
| model="mistralai/Mistral-7B-Instruct-v0.3", | |
| token=os.environ["HF_TOKEN"] # Recommended: set your token in env vars | |
| ) | |
| # Example: Text Generation | |
| output = client.text_generation("What is the capital of France?") | |
| print(output) | |