Spaces:
Sleeping
Sleeping
Update utils/mistral.py
Browse files- utils/mistral.py +34 -12
utils/mistral.py
CHANGED
|
@@ -44,20 +44,34 @@ def call_llm(messages, max_tokens=2048, temperature=0.3):
|
|
| 44 |
|
| 45 |
|
| 46 |
# Function to clean model output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
def Data_Cleaner(text):
|
| 48 |
-
pattern = r".*?format:"
|
| 49 |
-
result = re.split(pattern, text, maxsplit=1)
|
| 50 |
-
if len(result) > 1:
|
| 51 |
-
text_after_format = result[1].strip().strip('`').strip('json')
|
| 52 |
-
else:
|
| 53 |
-
text_after_format = text.strip().strip('`').strip('json')
|
| 54 |
-
|
| 55 |
try:
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
|
| 63 |
# Function to call LLM and process output
|
|
@@ -95,6 +109,10 @@ def Model_ProfessionalDetails_Output(resume, client=None):
|
|
| 95 |
try:
|
| 96 |
response = call_llm([system_role, user_prompt], max_tokens=3000, temperature=0.35)
|
| 97 |
clean_response = Data_Cleaner(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
parsed_response = json.loads(clean_response)
|
| 99 |
except Exception as e:
|
| 100 |
logging.error(f"LLM Error: {e}")
|
|
@@ -129,6 +147,10 @@ def Model_PersonalDetails_Output(resume, client=None):
|
|
| 129 |
try:
|
| 130 |
response = call_llm([system_role, user_prompt], max_tokens=2000, temperature=0.35)
|
| 131 |
clean_response = Data_Cleaner(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
parsed_response = json.loads(clean_response)
|
| 133 |
except Exception as e:
|
| 134 |
print("JSON Decode Error:", e)
|
|
|
|
| 44 |
|
| 45 |
|
| 46 |
# Function to clean model output
|
| 47 |
+
# def Data_Cleaner(text):
|
| 48 |
+
# pattern = r".*?format:"
|
| 49 |
+
# result = re.split(pattern, text, maxsplit=1)
|
| 50 |
+
# if len(result) > 1:
|
| 51 |
+
# text_after_format = result[1].strip().strip('`').strip('json')
|
| 52 |
+
# else:
|
| 53 |
+
# text_after_format = text.strip().strip('`').strip('json')
|
| 54 |
+
|
| 55 |
+
# try:
|
| 56 |
+
# json.loads(text_after_format)
|
| 57 |
+
# return text_after_format
|
| 58 |
+
# except json.JSONDecodeError:
|
| 59 |
+
# logging.error("Data cleaning led to invalid JSON")
|
| 60 |
+
# return text
|
| 61 |
def Data_Cleaner(text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
try:
|
| 63 |
+
# Extract JSON block using regex
|
| 64 |
+
json_match = re.search(r'\{.*\}', text, re.DOTALL)
|
| 65 |
+
if json_match:
|
| 66 |
+
json_str = json_match.group(0)
|
| 67 |
+
return json_str
|
| 68 |
+
else:
|
| 69 |
+
raise ValueError("No JSON found in response")
|
| 70 |
+
|
| 71 |
+
except Exception as e:
|
| 72 |
+
logging.error(f"JSON extraction failed: {e}")
|
| 73 |
+
return None
|
| 74 |
+
|
| 75 |
|
| 76 |
|
| 77 |
# Function to call LLM and process output
|
|
|
|
| 109 |
try:
|
| 110 |
response = call_llm([system_role, user_prompt], max_tokens=3000, temperature=0.35)
|
| 111 |
clean_response = Data_Cleaner(response)
|
| 112 |
+
|
| 113 |
+
if not clean_response:
|
| 114 |
+
raise ValueError("Empty or invalid LLM response")
|
| 115 |
+
|
| 116 |
parsed_response = json.loads(clean_response)
|
| 117 |
except Exception as e:
|
| 118 |
logging.error(f"LLM Error: {e}")
|
|
|
|
| 147 |
try:
|
| 148 |
response = call_llm([system_role, user_prompt], max_tokens=2000, temperature=0.35)
|
| 149 |
clean_response = Data_Cleaner(response)
|
| 150 |
+
|
| 151 |
+
if not clean_response:
|
| 152 |
+
raise ValueError("Empty or invalid LLM response")
|
| 153 |
+
|
| 154 |
parsed_response = json.loads(clean_response)
|
| 155 |
except Exception as e:
|
| 156 |
print("JSON Decode Error:", e)
|