RayMelius Claude Opus 4.6 commited on
Commit
04e24fd
Β·
1 Parent(s): 263ace8

Fix MDF order book depth and CH AI reference price fallback

Browse files

MDF now places 3 levels of resting bids and asks per symbol each cycle,
ensuring order books always have liquidity. CH AI trader falls back to
securities.txt reference prices when books are empty. CH daily obligation
raised to 20.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

clearing_house/ch_ai_trader.py CHANGED
@@ -213,10 +213,29 @@ def _run_simulation_cycle():
213
  time.sleep(0.5) # stagger submissions
214
 
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  def _fetch_bbos() -> dict:
217
- """Get BBO for all symbols from Matcher API."""
218
  try:
219
- # Load securities list to know symbols
220
  secs_file = os.getenv("SECURITIES_FILE", "/app/data/securities.txt")
221
  symbols = []
222
  try:
@@ -248,6 +267,22 @@ def _fetch_bbos() -> dict:
248
  bbos[sym] = {"best_bid": best_bid, "best_ask": best_ask}
249
  except Exception:
250
  pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  return bbos
252
  except Exception as e:
253
  print(f"[CH-AI] BBO fetch error: {e}")
 
213
  time.sleep(0.5) # stagger submissions
214
 
215
 
216
+ def _load_reference_prices() -> dict:
217
+ """Load reference prices from securities.txt as fallback when books are empty."""
218
+ ref = {}
219
+ secs_file = os.getenv("SECURITIES_FILE", "/app/data/securities.txt")
220
+ try:
221
+ with open(secs_file) as f:
222
+ for line in f:
223
+ line = line.strip()
224
+ if line and not line.startswith("#"):
225
+ parts = line.split()
226
+ if len(parts) >= 3:
227
+ sym, _, current = parts[0], float(parts[1]), float(parts[2])
228
+ ref[sym] = current
229
+ elif len(parts) >= 2:
230
+ ref[parts[0]] = float(parts[1])
231
+ except Exception:
232
+ pass
233
+ return ref
234
+
235
+
236
  def _fetch_bbos() -> dict:
237
+ """Get BBO for all symbols from Matcher API, falling back to reference prices."""
238
  try:
 
239
  secs_file = os.getenv("SECURITIES_FILE", "/app/data/securities.txt")
240
  symbols = []
241
  try:
 
267
  bbos[sym] = {"best_bid": best_bid, "best_ask": best_ask}
268
  except Exception:
269
  pass
270
+
271
+ # Fall back to reference prices for symbols with no live BBO
272
+ if len(bbos) < len(symbols):
273
+ ref_prices = _load_reference_prices()
274
+ spread = 0.10
275
+ for sym in symbols:
276
+ if sym not in bbos and sym in ref_prices:
277
+ mid = ref_prices[sym]
278
+ bbos[sym] = {
279
+ "best_bid": round(mid - spread, 2),
280
+ "best_ask": round(mid + spread, 2),
281
+ }
282
+ if ref_prices:
283
+ print(f"[CH-AI] Using reference prices for {len(bbos)} symbols "
284
+ f"({len(bbos) - len([s for s in bbos if bbos[s].get('best_bid')])} from file)")
285
+
286
  return bbos
287
  except Exception as e:
288
  print(f"[CH-AI] BBO fetch error: {e}")
clearing_house/ch_database.py CHANGED
@@ -13,7 +13,7 @@ import os
13
  CH_DB_PATH = os.getenv("CH_DB_PATH", "/app/data/clearing_house.db")
14
  CH_MEMBERS = [f"USR{i:02d}" for i in range(1, 11)]
15
  CH_STARTING_CAPITAL = 100_000.0
16
- CH_DAILY_OBLIGATION = 10 # minimum securities (qty sum) per trading day
17
 
18
  _local = threading.local()
19
 
 
13
  CH_DB_PATH = os.getenv("CH_DB_PATH", "/app/data/clearing_house.db")
14
  CH_MEMBERS = [f"USR{i:02d}" for i in range(1, 11)]
15
  CH_STARTING_CAPITAL = 100_000.0
16
+ CH_DAILY_OBLIGATION = 20 # minimum securities (qty sum) per trading day
17
 
18
  _local = threading.local()
19
 
md_feeder/mdf_simulator.py CHANGED
@@ -131,47 +131,48 @@ if __name__ == "__main__":
131
 
132
  mid = vals["current"]
133
  half_spread = 0.10
134
-
135
- side = random.choice(["BUY", "SELL"])
136
- rand = random.random()
137
-
138
- if rand < 0.90:
139
- # Passive: place orders away from mid to rest on book
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  if side == "BUY":
141
- base = mid - half_spread
142
- offset = random.randint(1, 50) * Config.TICK_SIZE
143
- price = round(base - offset, 2)
144
  else:
145
- base = mid + half_spread
146
- offset = random.randint(1, 50) * Config.TICK_SIZE
147
- price = round(base + offset, 2)
148
- else:
149
- # Aggressive: cross the spread to create trades
150
- if side == "BUY":
151
- price = round(mid + half_spread + random.randint(1, 5) * Config.TICK_SIZE, 2)
152
- else:
153
- price = round(mid - half_spread - random.randint(1, 5) * Config.TICK_SIZE, 2)
154
-
155
- qty = random.choice([50, 100, 150, 200, 250])
156
- order = make_order(sym, side, price, qty)
157
- producer.send(Config.ORDERS_TOPIC, order)
158
- print(f"[MDF] Order: {order}")
159
 
160
  # Simulate small price drift (10% chance, max 2 ticks)
161
  if random.random() < 0.10:
162
- drift = random.choice([-2, -1, 1, 2]) * Config.TICK_SIZE
163
  new_price = vals["current"] + drift
164
  if new_price >= 1.00:
165
  vals["current"] = round(new_price, 2)
166
  save_securities(_securities)
167
 
168
- best_bid = mid - half_spread
169
- best_ask = mid + half_spread
170
- bid_size = random.choice([50, 100, 200])
171
- ask_size = random.choice([50, 100, 200])
172
  snap = make_snapshot(sym, best_bid, best_ask, bid_size, ask_size)
173
  producer.send(Config.SNAPSHOTS_TOPIC, snap)
174
- print(f"[MDF] Snapshot: {snap}")
175
 
176
  time.sleep(ORDER_INTERVAL)
177
 
 
131
 
132
  mid = vals["current"]
133
  half_spread = 0.10
134
+ tick = Config.TICK_SIZE
135
+
136
+ # Always place a resting BID and ASK to maintain book depth
137
+ for depth_level in range(3):
138
+ offset = random.randint(1 + depth_level * 3, 3 + depth_level * 5) * tick
139
+ bid_price = round(mid - half_spread - offset, 2)
140
+ ask_price = round(mid + half_spread + offset, 2)
141
+ bid_qty = random.choice([50, 100, 150, 200])
142
+ ask_qty = random.choice([50, 100, 150, 200])
143
+
144
+ bid_order = make_order(sym, "BUY", bid_price, bid_qty)
145
+ ask_order = make_order(sym, "SELL", ask_price, ask_qty)
146
+ producer.send(Config.ORDERS_TOPIC, bid_order)
147
+ producer.send(Config.ORDERS_TOPIC, ask_order)
148
+ print(f"[MDF] Depth: {sym} BID {bid_qty}@{bid_price:.2f} ASK {ask_qty}@{ask_price:.2f}")
149
+
150
+ # Occasionally add an aggressive order to generate trades (20%)
151
+ if random.random() < 0.20:
152
+ side = random.choice(["BUY", "SELL"])
153
  if side == "BUY":
154
+ price = round(mid + half_spread + random.randint(1, 3) * tick, 2)
 
 
155
  else:
156
+ price = round(mid - half_spread - random.randint(1, 3) * tick, 2)
157
+ qty = random.choice([50, 100, 150])
158
+ aggr_order = make_order(sym, side, price, qty)
159
+ producer.send(Config.ORDERS_TOPIC, aggr_order)
160
+ print(f"[MDF] Aggr: {sym} {side} {qty}@{price:.2f}")
 
 
 
 
 
 
 
 
 
161
 
162
  # Simulate small price drift (10% chance, max 2 ticks)
163
  if random.random() < 0.10:
164
+ drift = random.choice([-2, -1, 1, 2]) * tick
165
  new_price = vals["current"] + drift
166
  if new_price >= 1.00:
167
  vals["current"] = round(new_price, 2)
168
  save_securities(_securities)
169
 
170
+ best_bid = round(mid - half_spread, 2)
171
+ best_ask = round(mid + half_spread, 2)
172
+ bid_size = random.choice([100, 200, 300])
173
+ ask_size = random.choice([100, 200, 300])
174
  snap = make_snapshot(sym, best_bid, best_ask, bid_size, ask_size)
175
  producer.send(Config.SNAPSHOTS_TOPIC, snap)
 
176
 
177
  time.sleep(ORDER_INTERVAL)
178
 
notebooks/stockex-clearing-house-llm-fine-tuning.ipynb CHANGED
@@ -1251,7 +1251,7 @@
1251
  },
1252
  {
1253
  "cell_type": "code",
1254
- "source": "import gc\nfrom peft import PeftModel as PeftModelMerge\n\n# ── Free GPU memory ────────────────────────────────────────────────\nprint(\"Freeing GPU memory for CPU merge...\")\ntry:\n del trainer\nexcept NameError:\n pass\ntry:\n del model\nexcept NameError:\n pass\ngc.collect()\ntorch.cuda.empty_cache()\n\n# ── Load base model on CPU in float16 (~14GB RAM for 7B) ──────────\nprint(f\"Loading base model on CPU: {BASE_MODEL}\")\nbase_model_cpu = AutoModelForCausalLM.from_pretrained(\n BASE_MODEL,\n torch_dtype=torch.float16,\n device_map=\"cpu\",\n trust_remote_code=True,\n low_cpu_mem_usage=True,\n)\n\n# ── Apply LoRA adapter and merge ──────────────────────────────────\nprint(f\"Applying adapter from {OUTPUT_DIR}...\")\nmerged_model = PeftModelMerge.from_pretrained(\n base_model_cpu,\n OUTPUT_DIR,\n torch_dtype=torch.float16,\n device_map=\"cpu\",\n)\n\nprint(\"Merging adapter into base model...\")\nmerged_model = merged_model.merge_and_unload()\nprint(\"Merge complete.\")\n\n# ── Push full model + tokenizer to HF Hub ─────────────────────────\nprint(f\"Pushing full merged model to {OUTPUT_REPO}...\")\nmerged_model.push_to_hub(\n OUTPUT_REPO,\n commit_message=f\"Full merged model: QLoRA fine-tuned {BASE_MODEL}\",\n token=HF_TOKEN,\n max_shard_size=\"2GB\",\n)\ntokenizer.push_to_hub(\n OUTPUT_REPO,\n commit_message=f\"Tokenizer for {BASE_MODEL}\",\n token=HF_TOKEN,\n)\nprint(f\"Full model pushed to https://huggingface.co/{OUTPUT_REPO}\")\n\n# ── Cleanup CPU model ─────────────────────────────────────────────\ndel base_model_cpu, merged_model\ngc.collect()\nprint(\"Done β€” model is now usable via HF Inference Router API.\")",
1255
  "metadata": {
1256
  "id": "save-model",
1257
  "trusted": true
 
1251
  },
1252
  {
1253
  "cell_type": "code",
1254
+ "source": "import gc\nfrom peft import PeftModel as PeftModelMerge\n\nprint(\"Merging adapter into base model on GPU...\")\n\nbase_model_gpu = AutoModelForCausalLM.from_pretrained(\n BASE_MODEL,\n torch_dtype=torch.float16,\n device_map=\"auto\",\n trust_remote_code=True,\n low_cpu_mem_usage=True,\n)\n\nmerged_model = PeftModelMerge.from_pretrained(\n base_model_gpu, OUTPUT_REPO,\n torch_dtype=torch.float16,\n)\n\nmerged_model = merged_model.merge_and_unload()\nprint(\"Merge complete.\")\n\n# Save locally first, then push (avoids keeping model in memory during upload)\nMERGED_DIR = \"/kaggle/working/merged\"\nprint(f\"Saving merged model to {MERGED_DIR}...\")\nmerged_model.save_pretrained(MERGED_DIR, max_shard_size=\"2GB\")\n\n# Load and save tokenizer (may not be in memory if training cells were skipped)\ntokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)\ntokenizer.save_pretrained(MERGED_DIR)\n\ndel base_model_gpu, merged_model\ngc.collect()\ntorch.cuda.empty_cache()\n\n# Push from disk\nfrom huggingface_hub import HfApi\napi = HfApi(token=HF_TOKEN)\napi.upload_folder(\n folder_path=MERGED_DIR,\n repo_id=OUTPUT_REPO,\n commit_message=f\"Full merged model: QLoRA fine-tuned {BASE_MODEL}\",\n)\nprint(f\"Full model pushed to https://huggingface.co/{OUTPUT_REPO}\")",
1255
  "metadata": {
1256
  "id": "save-model",
1257
  "trusted": true
shared/config.py CHANGED
@@ -40,5 +40,5 @@ class Config:
40
  CH_DB_PATH: str = os.getenv("CH_DB_PATH", "/app/data/clearing_house.db")
41
  CH_MEMBERS: list = [f"USR{i:02d}" for i in range(1, 11)]
42
  CH_STARTING_CAPITAL: float = 100_000.0
43
- CH_DAILY_OBLIGATION: int = 10
44
  CH_SERVICE_URL: str = os.getenv("CH_SERVICE_URL", "http://localhost:5004")
 
40
  CH_DB_PATH: str = os.getenv("CH_DB_PATH", "/app/data/clearing_house.db")
41
  CH_MEMBERS: list = [f"USR{i:02d}" for i in range(1, 11)]
42
  CH_STARTING_CAPITAL: float = 100_000.0
43
+ CH_DAILY_OBLIGATION: int = 20
44
  CH_SERVICE_URL: str = os.getenv("CH_SERVICE_URL", "http://localhost:5004")