# -*- coding: utf-8 -*- """L3.1-Dark-Reasoning-LewdPlay-evo-Hermes-R1-Uncensored-8B.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/DavidAU/L3.1-Dark-Reasoning-LewdPlay-evo-Hermes-R1-Uncensored-8B.ipynb # Local Inference on GPU Model page: https://huggingface.co/DavidAU/L3.1-Dark-Reasoning-LewdPlay-evo-Hermes-R1-Uncensored-8B """ # Install transformers library # This command should be run in a Colab cell, not directly in a Python script. # !pip install -U transformers from transformers import pipeline # Load the model pipeline = pipeline("text-generation", model="DavidAU/L3.1-Dark-Reasoning-LewdPlay-evo-Hermes-R1-Uncensored-8B") def chat_with_model(prompt): messages = [ {"role": "user", "content": prompt}, ] response = pipeline(messages) # The response structure might vary, so we'll try to extract the content safely. # This assumes the last generated text is the bot's response. if response and isinstance(response, list) and len(response) > 0: generated_text = response[0].get("generated_text") if generated_text and isinstance(generated_text, list) and len(generated_text) > 0: last_message = generated_text[-1] if isinstance(last_message, dict) and "content" in last_message: return last_message["content"] return "Sorry, I couldn't generate a response." # Example usage in a Colab cell: # print(chat_with_model("Hello, how are you?")) # print(chat_with_model("Tell me a story.")) # To make it interactive in a Colab environment, you would typically use # input() within a loop in a separate cell, or use Colab's form features. # For a simple demonstration within the notebook: user_query_1 = "Hello, how are you?" bot_response_1 = chat_with_model(user_query_1) print(f"User: {user_query_1}") print(f"Bot: {bot_response_1}") user_query_2 = "What can you do?" bot_response_2 = chat_with_model(user_query_2) print(f"User: {user_query_2}") print(f"Bot: {bot_response_2}")