Atishay Jain commited on
Commit
610b7e5
·
1 Parent(s): 1e6d041

feat: implement negotiation environment wrapper and LLM-based inference script

Browse files
__pycache__/env_wrapper.cpython-312.pyc ADDED
Binary file (5.06 kB). View file
 
env_wrapper.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ class Opponent:
4
+ def __init__(self, type_str, value, role):
5
+ self.type = type_str
6
+ self.opponent_value = value
7
+ self.opponent_role = role
8
+ if type_str == "greedy":
9
+ self.r, self.alpha, self.patience, self.epsilon = 0.05, 0.7, 10, 5
10
+ elif type_str == "fair":
11
+ self.r, self.alpha, self.patience, self.epsilon = 0.15, 0.4, 7, 10
12
+ elif type_str == "impatient":
13
+ self.r, self.alpha, self.patience, self.epsilon = 0.25, 0.2, 3, 15
14
+ else:
15
+ self.r, self.alpha, self.patience, self.epsilon = 0.15, 0.4, 7, 10
16
+ self.concession_rate = self.r
17
+
18
+ def get_response(self, round_num, current_offer, agent_offer, agent_action_type):
19
+ if agent_action_type != "OFFER":
20
+ return "REJECT", 0
21
+
22
+ if self.opponent_role == "seller" and agent_offer >= self.opponent_value:
23
+ return "ACCEPT", agent_offer
24
+ if self.opponent_role == "buyer" and agent_offer <= self.opponent_value:
25
+ return "ACCEPT", agent_offer
26
+
27
+ if round_num > self.patience:
28
+ self.concession_rate = min(0.4, self.concession_rate + 0.05)
29
+
30
+ target = self.opponent_value
31
+ delta = target - current_offer
32
+ next_offer = current_offer + self.concession_rate * delta
33
+ next_offer = (1.0 - self.alpha) * next_offer + self.alpha * current_offer
34
+ next_offer += random.randint(-self.epsilon, self.epsilon)
35
+ next_offer = max(100, min(1000, int(next_offer)))
36
+ return "OFFER", next_offer
37
+
38
+ class EnvWrapper:
39
+ def __init__(self, opp_type="fair", a_val=800, o_val=500, agent_role="buyer"):
40
+ self.agent_value = a_val
41
+ self.opponent_value = o_val
42
+ self.role = agent_role
43
+ self.opp_role = "seller" if agent_role == "buyer" else "buyer"
44
+ self.opp = Opponent(opp_type, o_val, self.opp_role)
45
+ self.max_rounds = 20
46
+ self.reset()
47
+
48
+ def reset(self):
49
+ self.round = 0
50
+ if self.role == "buyer":
51
+ self.current_offer = self.agent_value + 200
52
+ else:
53
+ self.current_offer = max(100, self.agent_value - 200)
54
+ self.last_opp_action = "START"
55
+ self.last_opp_offer = self.current_offer
56
+
57
+ def step(self, action_str, action_price=0):
58
+ self.round += 1
59
+ aggressive = False
60
+ done = False
61
+ reward = 0.0
62
+
63
+ if action_str == "ACCEPT":
64
+ deal_price = self.last_opp_offer
65
+ done = True
66
+ profit = deal_price - self.agent_value if self.role == "seller" else self.agent_value - deal_price
67
+ t_factor = 1.0 - (self.round / self.max_rounds)
68
+ reward = profit * t_factor
69
+ if profit < 0: reward -= 20
70
+
71
+ elif action_str == "REJECT":
72
+ reward = -50.0
73
+ done = True
74
+
75
+ elif action_str.startswith("OFFER"):
76
+ aggressive = abs(action_price - self.opponent_value) > 150
77
+ opp_action, opp_price = self.opp.get_response(self.round, self.current_offer, action_price, "OFFER")
78
+ if opp_action == "ACCEPT":
79
+ deal_price = action_price
80
+ done = True
81
+ self.last_opp_action = "ACCEPT"
82
+ self.last_opp_offer = deal_price
83
+
84
+ profit = deal_price - self.agent_value if self.role == "seller" else self.agent_value - deal_price
85
+ t_factor = 1.0 - (self.round / self.max_rounds)
86
+ reward = profit * t_factor
87
+ if profit < 0: reward -= 20
88
+ if aggressive: reward -= 2
89
+ else:
90
+ self.current_offer = opp_price
91
+ self.last_opp_action = "OFFER"
92
+ self.last_opp_offer = opp_price
93
+ if self.round >= self.max_rounds:
94
+ reward = -50.0
95
+ done = True
96
+
97
+ if not done:
98
+ reward = 0.0
99
+
100
+ return reward, done
inference.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ from openai import OpenAI
5
+ from env_wrapper import EnvWrapper
6
+
7
+ def main():
8
+ api_base_url = os.getenv("API_BASE_URL", "https://router.huggingface.co/v1")
9
+ model_name = os.getenv("MODEL_NAME", "meta-llama/Meta-Llama-3-8B-Instruct")
10
+ hf_token = os.getenv("HF_TOKEN")
11
+
12
+ if not hf_token:
13
+ print("ERROR: HF_TOKEN environment variable is not set.")
14
+ sys.exit(1)
15
+
16
+ env = EnvWrapper(opp_type="fair", a_val=300, o_val=700, agent_role="buyer")
17
+ env.max_rounds = 4
18
+ env.reset()
19
+
20
+ print(f"[START] task=negotiation env=custom model={model_name}")
21
+
22
+ client = OpenAI(base_url=api_base_url, api_key=hf_token)
23
+
24
+ done = False
25
+ step_n = 0
26
+ rewards = []
27
+
28
+ while not done and step_n < env.max_rounds:
29
+ step_n += 1
30
+
31
+ prompt = f"""You are negotiating as a {env.role}.
32
+ State:
33
+ * Current offer: {env.current_offer}
34
+ * Round: {env.round}
35
+ * Max rounds: {env.max_rounds}
36
+
37
+ Choose ONE:
38
+ * OFFER <price> (Preferred: counter-offer if you do not like the price!)
39
+ * ACCEPT
40
+ * REJECT"""
41
+
42
+ action_str = "REJECT"
43
+ action_price = 0
44
+ error_msg = "null"
45
+
46
+ try:
47
+ response = client.chat.completions.create(
48
+ model=model_name,
49
+ messages=[{"role": "user", "content": prompt}],
50
+ max_tokens=20,
51
+ temperature=0.3
52
+ )
53
+ llm_text = response.choices[0].message.content.strip()
54
+
55
+ match = re.search(r'(OFFER\s+\d+|ACCEPT|REJECT)', llm_text, re.IGNORECASE)
56
+ if match:
57
+ action_str = match.group(1).upper()
58
+ else:
59
+ error_msg = "parsing failed, retrying"
60
+ response = client.chat.completions.create(
61
+ model=model_name,
62
+ messages=[{"role": "user", "content": prompt}, {"role": "assistant", "content": llm_text}, {"role": "user", "content": "Output strictly ONLY ONE of: 'OFFER <price>', 'ACCEPT', or 'REJECT'."}],
63
+ max_tokens=15,
64
+ temperature=0.1
65
+ )
66
+ llm_text2 = response.choices[0].message.content.strip()
67
+ match2 = re.search(r'(OFFER\s+\d+|ACCEPT|REJECT)', llm_text2, re.IGNORECASE)
68
+ if match2:
69
+ action_str = match2.group(1).upper()
70
+ error_msg = "null"
71
+ else:
72
+ action_str = "REJECT"
73
+ error_msg = "parse error on retry, defaulting to REJECT"
74
+ except Exception as e:
75
+ error_msg = "API_Error"
76
+ action_str = "REJECT"
77
+
78
+ if action_str.startswith("OFFER"):
79
+ try:
80
+ action_price = int(action_str.split()[1])
81
+ except ValueError:
82
+ action_str = "REJECT"
83
+ action_price = 0
84
+ error_msg = "invalid price format"
85
+ elif action_str == "ACCEPT":
86
+ action_str = "ACCEPT"
87
+ elif action_str == "REJECT":
88
+ action_str = "REJECT"
89
+
90
+ # Strip potential garbage
91
+ if "OFFER" in action_str:
92
+ action_str = f"OFFER {action_price}"
93
+
94
+ reward, d = env.step(action_str, action_price)
95
+ done = d
96
+ rewards.append(reward)
97
+
98
+ print(f"[STEP] step={step_n} action={action_str} reward={reward:.2f} done={str(done).lower()} error={error_msg}")
99
+
100
+
101
+ # SCORING
102
+ max_possible_reward = float(abs(env.agent_value - env.opponent_value))
103
+ score = sum(rewards) / max_possible_reward if max_possible_reward > 0 else 0.0
104
+ score = max(0.0, min(1.0, score))
105
+ success = score > 0.3
106
+
107
+ rewards_str = ",".join([f"{r:.2f}" for r in rewards])
108
+ print(f"[END] success={str(success).lower()} steps={step_n} score={score:.4f} rewards={rewards_str}")
109
+
110
+ if __name__ == "__main__":
111
+ main()