neuralgeekroot commited on
Commit
b119084
·
1 Parent(s): 512b5ce

Updated the code for automated Code peer review

Browse files
Files changed (5) hide show
  1. .github/workflows/main.yml +26 -0
  2. .gitignore +3 -0
  3. README.md +0 -2
  4. app.py +141 -0
  5. requirements.txt +7 -0
.github/workflows/main.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Space
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ lfs: false
17
+
18
+ - name: Ignore large files
19
+ run : git filter-branch --index-filter 'git rm -rf --cached --ignore-unmatch "Automated Code"' HEAD
20
+
21
+ - name: Push to hub
22
+ env:
23
+ HF_TOKEN: ${{ secrets.HF_TOKEN_BLOG_AI }}
24
+ run: git push --force https://neuralgeekroot:$HF_TOKEN@huggingface.co/spaces/neuralgeekroot/AutomatedCodeReview main
25
+
26
+
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ automatedCodeReview/
2
+ .env
3
+ CodingPeerReview.zip
README.md CHANGED
@@ -18,5 +18,3 @@ This Application automates code using an open-source LLM
18
  1. Clone repo
19
  2. Install dependencies: `pip install -r requirements.txt`
20
  3. Create env keys
21
- # Automated-Code-Peer-Review
22
- # Automated-Code-Peer-Review
 
18
  1. Clone repo
19
  2. Install dependencies: `pip install -r requirements.txt`
20
  3. Create env keys
 
 
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langgraph.graph import StateGraph, START, END
4
+ from typing_extensions import TypedDict
5
+ from enum import Enum
6
+ from langchain_groq import ChatGroq
7
+ from langchain_core.prompts import PromptTemplate
8
+ import streamlit as st
9
+ import langsmith
10
+
11
+ # Load environment variables
12
+ load_dotenv()
13
+ os.environ['GROQ_API_KEY'] = os.getenv('GROQ_API_KEY')
14
+ os.environ['LANGCHAIN_API_KEY'] = os.getenv('LANGCHAIN_API_KEY')
15
+ os.environ['LANGSMITH_TRACING_V2'] = 'true'
16
+ os.environ['LANGCHAIN_PROJECT_NAME'] = os.getenv('LANGCHAIN_PROJECT_NAME')
17
+
18
+ class Step(Enum):
19
+ INPUT = "input"
20
+ REVIEW = "review"
21
+ IMPROVISATION = "improvisation"
22
+ APPROVAL = "approval"
23
+ APPROVED = "approved"
24
+
25
+ # Define the state structure
26
+ class CodingState(TypedDict):
27
+ code: str
28
+ step: Step
29
+
30
+ # Define prompts in a configuration dictionary
31
+ PROMPTS = {
32
+ "coder": "Create a code as per the {code} provided",
33
+ "peer": "Review the code provided by the coder: {code}. "
34
+ "If correction is needed, return 'improvisation'. "
35
+ "If suggestions are needed, return 'approval'. "
36
+ "If the code is correct, return 'approved'.",
37
+ "manager": "Add necessary docstrings to the following code and approve it: {code}"
38
+ }
39
+
40
+ # Initialize the LLM
41
+ llm = ChatGroq(model="qwen-2.5-32b")
42
+
43
+ # Define the coder node
44
+ def coder(state):
45
+ """Based on the input message the code is created by the coder."""
46
+ print("Coder Node: Generating code...")
47
+ try:
48
+ prompt = PromptTemplate.from_template(PROMPTS["coder"])
49
+ chain = prompt | llm
50
+ result = chain.invoke({'code': state['code']})
51
+ print(f"Coder Node: Generated code: {result.content}")
52
+ return {'code': result.content, 'step': Step.REVIEW}
53
+ except Exception as e:
54
+ print(f"Coder Node: Error generating code - {e}")
55
+ return {'code': state['code'], 'step': Step.IMPROVISATION}
56
+
57
+ # Define the peer node
58
+ def peer(state):
59
+ """Reviewing the code provided by the coder and determining the next step."""
60
+ print("Peer Node: Reviewing code...")
61
+ try:
62
+ prompt = PromptTemplate.from_template(PROMPTS["peer"])
63
+ chain = prompt | llm
64
+ result = chain.invoke({'code': state['code']})
65
+
66
+ # Extract the decision step from result
67
+ decision = result.content.strip().lower()
68
+
69
+ # Validate decision
70
+ valid_decisions = [Step.IMPROVISATION.value, Step.APPROVAL.value, Step.APPROVED.value]
71
+ if decision not in valid_decisions:
72
+ print(f"Peer Node: Invalid decision '{decision}'. Defaulting to 'approval'.")
73
+ decision = Step.APPROVAL.value # Default fallback
74
+
75
+ return {"code": state["code"], "step": Step(decision)}
76
+ except Exception as e:
77
+ print(f"Peer Node: Error reviewing code - {e}")
78
+ return {"code": state["code"], "step": Step.IMPROVISATION}
79
+
80
+ def manager(state):
81
+ """Add docstrings to the code and approve it."""
82
+ print("Manager Node: Adding docstrings and approving code...")
83
+ try:
84
+ prompt = PromptTemplate.from_template(PROMPTS["manager"])
85
+ chain = prompt | llm
86
+ result = chain.invoke({'code': state['code']})
87
+ print(f"Manager Node: Approved code: {result.content}")
88
+ return {'code': result.content, 'step': Step.APPROVED}
89
+ except Exception as e:
90
+ print(f"Manager Node: Error approving code - {e}")
91
+ return {'code': state['code'], 'step': Step.APPROVAL}
92
+
93
+ # Define the code validity function
94
+ def code_validity(state):
95
+ """Determine the next step based on the current state."""
96
+ print(f"Code Validity: Current step: {state['step'].value}")
97
+ if state['step'] == Step.IMPROVISATION:
98
+ return "coder"
99
+ elif state['step'] == Step.APPROVAL:
100
+ return "manager"
101
+ elif state['step'] == Step.APPROVED:
102
+ return END
103
+
104
+ # Build the workflow
105
+ builder = StateGraph(CodingState)
106
+
107
+ # Add nodes
108
+ builder.add_node("coder", coder)
109
+ builder.add_node("peer", peer)
110
+ builder.add_node("manager", manager)
111
+
112
+ # Add edges
113
+ builder.add_edge(START, "coder")
114
+ builder.add_edge("coder", "peer")
115
+ builder.add_conditional_edges("peer", code_validity, {"coder": "coder", "manager": "manager", END: END})
116
+ builder.add_edge("manager", END)
117
+
118
+ # Compile the workflow
119
+ workflow = builder.compile()
120
+
121
+ # Streamlit frontend
122
+ st.title("Automated Code Peer Review")
123
+ st.write("Submit your code for an automated peer review using an open-source LLM.")
124
+
125
+ # Text area for code input
126
+ code = st.text_area("Paste your code here:", height=300)
127
+
128
+ if st.button("Generate Code"):
129
+ if code.strip() == "":
130
+ st.error("Please paste some code to review.")
131
+ else:
132
+ with st.spinner("Generating review..."):
133
+ try:
134
+ # Invoke the workflow
135
+ result = workflow.invoke({"code": code, 'step': Step.INPUT})
136
+ st.success("Review Generated!")
137
+ st.write("### Code Review Feedback")
138
+ st.write(result['code'])
139
+
140
+ except Exception as e:
141
+ st.error(f"An error occurred: {e}")
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ langchain
2
+ langgraph
3
+ langsmith
4
+ streamlit
5
+ langchain_groq
6
+ dotenv
7
+ langchain_community