Sgridda commited on
Commit
5f40b94
·
1 Parent(s): 2588618
Files changed (1) hide show
  1. main.py +32 -17
main.py CHANGED
@@ -80,17 +80,15 @@ def run_ai_inference(diff: str) -> str:
80
  if not model or not tokenizer:
81
  raise RuntimeError("Model is not loaded.")
82
 
83
- # Improved prompt for codegen-350M-mono
84
- prompt = (
85
- "Below is a Python function. Please provide a code review comment with suggestions for improvement, in natural language. "
86
- "Do not repeat the code.\n"
87
- f"{diff[:800]}\n"
88
- "Review comment:"
89
- )
90
  encoded = tokenizer(
91
  prompt,
92
  return_tensors="pt",
93
- max_length=1024,
94
  truncation=True,
95
  padding="max_length"
96
  )
@@ -100,21 +98,38 @@ def run_ai_inference(diff: str) -> str:
100
  outputs = model.generate(
101
  input_ids=input_ids,
102
  attention_mask=attention_mask,
103
- max_new_tokens=128,
104
  do_sample=True,
105
- temperature=0.7,
106
- top_p=0.95,
107
  num_return_sequences=1,
108
  pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
109
  eos_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
110
  use_cache=True
111
  )
112
  response_text = tokenizer.decode(outputs[0][input_ids.shape[1]:], skip_special_tokens=True)
113
- # Post-process: filter out code-like lines and fallback if needed
114
- review_lines = [line.strip() for line in response_text.strip().split('\n') if line.strip()]
115
- # Filter out lines that look like code
116
- comment_lines = [l for l in review_lines if not l.startswith("def ") and not l.startswith("class ") and not l.endswith(":") and not l.startswith("#")]
117
- review = comment_lines[0] if comment_lines else "Consider adding a docstring and input validation."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  return review
119
 
120
  def parse_ai_response(response_text: str) -> list[ReviewComment]:
@@ -190,7 +205,7 @@ def root_html():
190
  <ul>
191
  <li>Mode: AI-Powered</li>
192
  <li>AI Model: Salesforce/codegen-350M-mono</li>
193
- <li>Response Time: ~30-35 seconds</li>
194
  </ul>
195
  </body>
196
  </html>
 
80
  if not model or not tokenizer:
81
  raise RuntimeError("Model is not loaded.")
82
 
83
+ # Simple, direct prompt for codegen-350M-mono
84
+ prompt = f"""Code:
85
+ {diff[:500]}
86
+
87
+ Review: This code could be improved by adding"""
 
 
88
  encoded = tokenizer(
89
  prompt,
90
  return_tensors="pt",
91
+ max_length=512, # Reduced from 1024 for faster processing
92
  truncation=True,
93
  padding="max_length"
94
  )
 
98
  outputs = model.generate(
99
  input_ids=input_ids,
100
  attention_mask=attention_mask,
101
+ max_new_tokens=32, # Further reduced for speed
102
  do_sample=True,
103
+ temperature=0.9,
104
+ top_p=0.85,
105
  num_return_sequences=1,
106
  pad_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
107
  eos_token_id=tokenizer.eos_token_id if tokenizer.eos_token_id is not None else tokenizer.pad_token_id,
108
  use_cache=True
109
  )
110
  response_text = tokenizer.decode(outputs[0][input_ids.shape[1]:], skip_special_tokens=True)
111
+
112
+ # Clean up the response
113
+ response_text = response_text.strip()
114
+
115
+ # Remove artifacts and clean up
116
+ if response_text.startswith("adding"):
117
+ response_text = "Adding " + response_text[6:]
118
+
119
+ # Take only the first sentence or meaningful phrase
120
+ sentences = response_text.split('.')
121
+ if sentences and len(sentences[0].strip()) > 10:
122
+ review = sentences[0].strip() + "."
123
+ else:
124
+ # Fallback to first meaningful line
125
+ lines = [line.strip() for line in response_text.split('\n') if line.strip()]
126
+ if lines and len(lines[0]) > 5:
127
+ review = lines[0]
128
+ if not review.endswith('.'):
129
+ review += "."
130
+ else:
131
+ review = "Consider adding proper documentation and error handling."
132
+
133
  return review
134
 
135
  def parse_ai_response(response_text: str) -> list[ReviewComment]:
 
205
  <ul>
206
  <li>Mode: AI-Powered</li>
207
  <li>AI Model: Salesforce/codegen-350M-mono</li>
208
+ <li>Response Time: ~15-25 seconds</li>
209
  </ul>
210
  </body>
211
  </html>