| import sys |
| import json |
| import os |
| from datetime import datetime |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
| from peft import PeftModel, PeftConfig |
|
|
| |
| def load_model(): |
| base_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" |
| adapter_path = "Harish2002/cli-lora-tinyllama" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(base_model) |
| model = AutoModelForCausalLM.from_pretrained(base_model) |
| model = PeftModel.from_pretrained(model, adapter_path) |
| return tokenizer, model |
|
|
| |
| def generate_plan(prompt, tokenizer, model): |
| pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256) |
| output = pipe(prompt)[0]['generated_text'] |
| return output.strip() |
|
|
| |
| def is_shell_command(line): |
| return line.startswith(("git", "bash", "tar", "gzip", "grep", "python", "./", "cd", "ls")) |
|
|
| |
| def log_trace(prompt, response): |
| os.makedirs("logs", exist_ok=True) |
| trace = { |
| "timestamp": datetime.utcnow().isoformat(), |
| "input": prompt, |
| "response": response |
| } |
| with open("logs/trace.jsonl", "a") as f: |
| f.write(json.dumps(trace) + "\n") |
|
|
| |
| if __name__ == "__main__": |
| if len(sys.argv) < 2: |
| print("Usage: python agent.py \"Your instruction here\"") |
| sys.exit(1) |
|
|
| user_input = sys.argv[1] |
| tokenizer, model = load_model() |
| result = generate_plan(user_input, tokenizer, model) |
|
|
| |
| print("\nGenerated Plan:\n") |
| print(result) |
|
|
| first_line = result.splitlines()[0] |
| if is_shell_command(first_line): |
| print("\nDry-run:") |
| print(f"echo {first_line}") |
|
|
| log_trace(user_input, result) |
|
|