gaurv007 commited on
Commit
a61dcf1
Β·
verified Β·
1 Parent(s): 79c33ca

v4.2: Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -64
app.py CHANGED
@@ -1,6 +1,15 @@
1
  """
2
- ClauseGuard β€” World's Best Legal Contract Analysis Tool (v4.1)
3
  ═══════════════════════════════════════════════════════════════
 
 
 
 
 
 
 
 
 
4
  Fixes in v4.1:
5
  β€’ FIX: Bounded LRU caches (chunk_cache, prediction_cache) β€” no more memory leaks
6
  β€’ FIX: NLI input format β€” pass (text_a, text_b) tuple, not [SEP]-concatenated string
@@ -44,6 +53,7 @@ import io
44
  import uuid
45
  import tempfile
46
  import hashlib
 
47
  from collections import defaultdict, OrderedDict
48
  from datetime import datetime
49
  from functools import lru_cache
@@ -80,6 +90,14 @@ try:
80
  except Exception:
81
  pass
82
 
 
 
 
 
 
 
 
 
83
  # ── Import submodules ───────────────────────────────────────────────
84
  from compare import compare_contracts, render_comparison_html
85
  from obligations import extract_obligations, render_obligations_html
@@ -142,7 +160,12 @@ _UNFAIR_LABELS = [
142
  "Jurisdiction", "Arbitration"
143
  ]
144
 
145
- _ALL_LABELS = CUAD_LABELS + _UNFAIR_LABELS
 
 
 
 
 
146
 
147
  RISK_MAP = {
148
  # Critical
@@ -198,6 +221,11 @@ RISK_MAP = {
198
  "Other": "LOW",
199
  "ROFR/ROFO/ROFN": "LOW",
200
  "Contract by using": "LOW",
 
 
 
 
 
201
  }
202
 
203
  DESC_MAP = {label: label.replace("_", " ") for label in _ALL_LABELS}
@@ -238,6 +266,11 @@ DESC_MAP.update({
238
  "Irrevocable or Perpetual License": "License that cannot be revoked or lasts indefinitely.",
239
  "Unlimited/All-You-Can-Eat License": "License with no usage limits.",
240
  "Notice Period to Terminate Renewal": "Required notice period before automatic renewal.",
 
 
 
 
 
241
  })
242
 
243
  RISK_WEIGHTS = {"CRITICAL": 40, "HIGH": 20, "MEDIUM": 10, "LOW": 3}
@@ -267,31 +300,39 @@ for _i in range(41):
267
  # ═══════════════════════════════════════════════════════════════════════
268
 
269
  class BoundedCache:
270
- """Thread-safe bounded LRU cache using OrderedDict."""
 
 
 
271
  def __init__(self, maxsize=1000):
272
  self._cache = OrderedDict()
273
  self._maxsize = maxsize
 
274
 
275
  def get(self, key, default=None):
276
- if key in self._cache:
277
- self._cache.move_to_end(key)
278
- return self._cache[key]
279
- return default
 
280
 
281
  def put(self, key, value):
282
- if key in self._cache:
283
- self._cache.move_to_end(key)
284
- self._cache[key] = value
285
- else:
286
- if len(self._cache) >= self._maxsize:
287
- self._cache.popitem(last=False)
288
- self._cache[key] = value
 
289
 
290
  def __contains__(self, key):
291
- return key in self._cache
 
292
 
293
  def __len__(self):
294
- return len(self._cache)
 
295
 
296
 
297
  # ═══════════════════════════════════════════════════════════════════════
@@ -301,7 +342,7 @@ class BoundedCache:
301
  cuad_tokenizer = None
302
  cuad_model = None
303
  ner_pipeline = None
304
- nli_pipeline = None
305
  _model_status = {"cuad": "not_loaded", "ner": "not_loaded", "nli": "not_loaded"}
306
 
307
  def _load_cuad_model():
@@ -349,20 +390,16 @@ def _load_ner_model():
349
  _model_status["ner"] = f"failed: {e}"
350
 
351
  def _load_nli_model():
352
- global nli_pipeline, _model_status, _HAS_NLI_MODEL
353
- if not _HAS_TORCH:
354
- _model_status["nli"] = "unavailable"
355
  return
356
  try:
357
- print("[ClauseGuard] Loading NLI model: cross-encoder/nli-deberta-v3-base")
358
- nli_pipeline = pipeline(
359
- "text-classification",
360
- model="cross-encoder/nli-deberta-v3-base",
361
- device=-1,
362
- )
363
  _HAS_NLI_MODEL = True
364
  _model_status["nli"] = "loaded"
365
- print("[ClauseGuard] NLI model loaded successfully")
366
  except Exception as e:
367
  print(f"[ClauseGuard] NLI model load failed (using heuristic fallback): {e}")
368
  _model_status["nli"] = f"failed: {e}"
@@ -430,6 +467,18 @@ def parse_document(file_path):
430
 
431
  _chunk_cache = BoundedCache(maxsize=500)
432
 
 
 
 
 
 
 
 
 
 
 
 
 
433
  def split_clauses(text):
434
  """Deterministic, structure-aware clause splitting.
435
  Same input ALWAYS produces same output. Normalized text is hashed
@@ -443,18 +492,7 @@ def split_clauses(text):
443
  text = re.sub(r'\n{3,}', '\n\n', text.strip())
444
 
445
  # First try to detect numbered sections (1., 2., 3.1, (a), etc.)
446
- section_pattern = re.compile(
447
- r'(?:^|\n\n)'
448
- r'(?='
449
- r'\d+(?:\.\d+)*[.)]\s' # 1. 2. 3.1. 3.1)
450
- r'|[A-Z]{2,}[A-Z\s]*\n' # ALL CAPS HEADERS
451
- r'|\([a-z]\)\s' # (a) (b) (c)
452
- r'|(?:Section|Article|Clause)\s+\d+' # Section 1, Article 2
453
- r')',
454
- re.MULTILINE
455
- )
456
-
457
- positions = [m.start() for m in section_pattern.finditer(text)]
458
 
459
  if len(positions) >= 3:
460
  clauses = []
@@ -688,14 +726,19 @@ _REGEX_PATTERNS = {
688
  "Price Restriction": [r"price.*(?:restriction|limitation|ceiling|cap|floor)", r"(?:shall|may).*not.*(?:increase|raise|exceed).*price"],
689
  }
690
 
 
 
 
 
 
691
  def _classify_regex(text):
692
  """Regex fallback β€” returns pattern match, NOT fake confidence."""
693
  text_lower = text.lower()
694
  results = []
695
  seen = set()
696
- for label, patterns in _REGEX_PATTERNS.items():
697
  for pat in patterns:
698
- if re.search(pat, text_lower):
699
  if label not in seen:
700
  risk = RISK_MAP.get(label, "MEDIUM")
701
  results.append({
@@ -816,29 +859,21 @@ def _extract_entities_regex(text):
816
  # ═══════════════════════════════════════════════════════════════════════
817
 
818
  def _run_nli(text_a, text_b):
819
- """Run NLI pipeline with correct input format for cross-encoder.
820
- FIX v4.1: cross-encoder expects {'text': a, 'text_pair': b} or a dict,
821
- but the HF pipeline for text-classification with cross-encoder accepts
822
- a dict input: {"text": text_a, "text_pair": text_b}.
823
- The simplest correct way is to pass them as a list of dicts."""
824
  try:
825
- # The cross-encoder/nli-deberta-v3-base pipeline expects two texts.
826
- # Passing as a dict with text and text_pair is the correct format.
827
- result = nli_pipeline(
828
- {"text": text_a[:256], "text_pair": text_b[:256]},
829
- truncation=True,
830
- )
831
- return result
832
- except Exception:
833
- # Some pipeline versions accept positional (text, text_pair) as tuple
834
- try:
835
- return nli_pipeline(
836
- text_a[:256],
837
- text_pair=text_b[:256],
838
- truncation=True,
839
- )
840
- except Exception:
841
- return None
842
 
843
 
844
  def detect_contradictions(clause_results, raw_text=""):
@@ -857,7 +892,7 @@ def detect_contradictions(clause_results, raw_text=""):
857
  clause_texts_by_label[cr["label"]].append(cr.get("text", ""))
858
 
859
  # ── 1. Semantic NLI (if model available) ──
860
- if _HAS_NLI_MODEL and nli_pipeline is not None:
861
  conflict_pairs = [
862
  ("Uncapped Liability", "Cap on Liability",
863
  "Liability cannot be both uncapped and capped simultaneously."),
 
1
  """
2
+ ClauseGuard β€” World's Best Legal Contract Analysis Tool (v4.2)
3
  ═══════════════════════════════════════════════════════════════
4
+ Fixes in v4.2:
5
+ β€’ FIX: NLI now uses CrossEncoder.predict() β€” contradictions actually work
6
+ β€’ FIX: BoundedCache uses threading.RLock β€” no more race conditions
7
+ β€’ FIX: Pre-compiled ALL regex patterns at module level (perf)
8
+ β€’ FIX: Added missing regex labels to RISK_MAP/DESC_MAP
9
+ β€’ FIX: Extension risk formula matches backend
10
+ β€’ FIX: Extension API_BASE URL corrected
11
+ β€’ FIX: API CORS localhost requires explicit opt-in
12
+
13
  Fixes in v4.1:
14
  β€’ FIX: Bounded LRU caches (chunk_cache, prediction_cache) β€” no more memory leaks
15
  β€’ FIX: NLI input format β€” pass (text_a, text_b) tuple, not [SEP]-concatenated string
 
53
  import uuid
54
  import tempfile
55
  import hashlib
56
+ import threading
57
  from collections import defaultdict, OrderedDict
58
  from datetime import datetime
59
  from functools import lru_cache
 
90
  except Exception:
91
  pass
92
 
93
+ # ── CrossEncoder for NLI (soft-fail) ──────────────────────────────────
94
+ _HAS_CROSS_ENCODER = False
95
+ try:
96
+ from sentence_transformers import CrossEncoder as _CrossEncoder
97
+ _HAS_CROSS_ENCODER = True
98
+ except ImportError:
99
+ pass
100
+
101
  # ── Import submodules ───────────────────────────────────────────────
102
  from compare import compare_contracts, render_comparison_html
103
  from obligations import extract_obligations, render_obligations_html
 
160
  "Jurisdiction", "Arbitration"
161
  ]
162
 
163
+ # FIX v4.2: Include regex-only labels that aren't in CUAD or Unfair lists
164
+ _EXTRA_REGEX_LABELS = [
165
+ "Indemnification", "Confidentiality", "Force Majeure", "Penalties"
166
+ ]
167
+
168
+ _ALL_LABELS = CUAD_LABELS + _UNFAIR_LABELS + _EXTRA_REGEX_LABELS
169
 
170
  RISK_MAP = {
171
  # Critical
 
221
  "Other": "LOW",
222
  "ROFR/ROFO/ROFN": "LOW",
223
  "Contract by using": "LOW",
224
+ # FIX v4.2: Added regex-only labels that were missing from RISK_MAP
225
+ "Indemnification": "HIGH",
226
+ "Confidentiality": "MEDIUM",
227
+ "Force Majeure": "LOW",
228
+ "Penalties": "HIGH",
229
  }
230
 
231
  DESC_MAP = {label: label.replace("_", " ") for label in _ALL_LABELS}
 
266
  "Irrevocable or Perpetual License": "License that cannot be revoked or lasts indefinitely.",
267
  "Unlimited/All-You-Can-Eat License": "License with no usage limits.",
268
  "Notice Period to Terminate Renewal": "Required notice period before automatic renewal.",
269
+ # FIX v4.2: Added descriptions for regex-only labels
270
+ "Indemnification": "Obligation to compensate the other party for losses or damages.",
271
+ "Confidentiality": "Restrictions on sharing proprietary or sensitive information.",
272
+ "Force Majeure": "Excuses performance due to extraordinary events beyond control.",
273
+ "Penalties": "Financial penalties for breach or late performance.",
274
  })
275
 
276
  RISK_WEIGHTS = {"CRITICAL": 40, "HIGH": 20, "MEDIUM": 10, "LOW": 3}
 
300
  # ═══════════════════════════════════════════════════════════════════════
301
 
302
  class BoundedCache:
303
+ """Thread-safe bounded LRU cache using OrderedDict + RLock.
304
+ FIX v4.2: Added threading.RLock to prevent race conditions under
305
+ Gradio's concurrent request handling. OrderedDict compound operations
306
+ (contains + setitem + move_to_end + popitem) are NOT atomic even with GIL."""
307
  def __init__(self, maxsize=1000):
308
  self._cache = OrderedDict()
309
  self._maxsize = maxsize
310
+ self._lock = threading.RLock()
311
 
312
  def get(self, key, default=None):
313
+ with self._lock:
314
+ if key in self._cache:
315
+ self._cache.move_to_end(key)
316
+ return self._cache[key]
317
+ return default
318
 
319
  def put(self, key, value):
320
+ with self._lock:
321
+ if key in self._cache:
322
+ self._cache.move_to_end(key)
323
+ self._cache[key] = value
324
+ else:
325
+ if len(self._cache) >= self._maxsize:
326
+ self._cache.popitem(last=False)
327
+ self._cache[key] = value
328
 
329
  def __contains__(self, key):
330
+ with self._lock:
331
+ return key in self._cache
332
 
333
  def __len__(self):
334
+ with self._lock:
335
+ return len(self._cache)
336
 
337
 
338
  # ═══════════════════════════════════════════════════════════════════════
 
342
  cuad_tokenizer = None
343
  cuad_model = None
344
  ner_pipeline = None
345
+ nli_model = None # FIX v4.2: CrossEncoder instead of pipeline
346
  _model_status = {"cuad": "not_loaded", "ner": "not_loaded", "nli": "not_loaded"}
347
 
348
  def _load_cuad_model():
 
390
  _model_status["ner"] = f"failed: {e}"
391
 
392
  def _load_nli_model():
393
+ global nli_model, _model_status, _HAS_NLI_MODEL
394
+ if not _HAS_CROSS_ENCODER:
395
+ _model_status["nli"] = "unavailable (sentence-transformers not installed)"
396
  return
397
  try:
398
+ print("[ClauseGuard] Loading NLI model: cross-encoder/nli-deberta-v3-base (CrossEncoder)")
399
+ nli_model = _CrossEncoder("cross-encoder/nli-deberta-v3-base")
 
 
 
 
400
  _HAS_NLI_MODEL = True
401
  _model_status["nli"] = "loaded"
402
+ print("[ClauseGuard] NLI CrossEncoder loaded successfully")
403
  except Exception as e:
404
  print(f"[ClauseGuard] NLI model load failed (using heuristic fallback): {e}")
405
  _model_status["nli"] = f"failed: {e}"
 
467
 
468
  _chunk_cache = BoundedCache(maxsize=500)
469
 
470
+ # FIX v4.2: Pre-compile section pattern at module level (was recompiling per call)
471
+ _SECTION_PATTERN = re.compile(
472
+ r'(?:^|\n\n)'
473
+ r'(?='
474
+ r'\d+(?:\.\d+)*[.)]\s' # 1. 2. 3.1. 3.1)
475
+ r'|[A-Z]{2,}[A-Z\s]*\n' # ALL CAPS HEADERS
476
+ r'|\([a-z]\)\s' # (a) (b) (c)
477
+ r'|(?:Section|Article|Clause)\s+\d+' # Section 1, Article 2
478
+ r')',
479
+ re.MULTILINE
480
+ )
481
+
482
  def split_clauses(text):
483
  """Deterministic, structure-aware clause splitting.
484
  Same input ALWAYS produces same output. Normalized text is hashed
 
492
  text = re.sub(r'\n{3,}', '\n\n', text.strip())
493
 
494
  # First try to detect numbered sections (1., 2., 3.1, (a), etc.)
495
+ positions = [m.start() for m in _SECTION_PATTERN.finditer(text)]
 
 
 
 
 
 
 
 
 
 
 
496
 
497
  if len(positions) >= 3:
498
  clauses = []
 
726
  "Price Restriction": [r"price.*(?:restriction|limitation|ceiling|cap|floor)", r"(?:shall|may).*not.*(?:increase|raise|exceed).*price"],
727
  }
728
 
729
+ # FIX v4.2: Pre-compile regex patterns at module level (was recompiling per call)
730
+ _REGEX_PATTERNS_COMPILED = {}
731
+ for _label, _pats in _REGEX_PATTERNS.items():
732
+ _REGEX_PATTERNS_COMPILED[_label] = [re.compile(p, re.IGNORECASE) for p in _pats]
733
+
734
  def _classify_regex(text):
735
  """Regex fallback β€” returns pattern match, NOT fake confidence."""
736
  text_lower = text.lower()
737
  results = []
738
  seen = set()
739
+ for label, patterns in _REGEX_PATTERNS_COMPILED.items():
740
  for pat in patterns:
741
+ if pat.search(text_lower):
742
  if label not in seen:
743
  risk = RISK_MAP.get(label, "MEDIUM")
744
  results.append({
 
859
  # ═══════════════════════════════════════════════════════════════════════
860
 
861
  def _run_nli(text_a, text_b):
862
+ """Run NLI using CrossEncoder with correct input format.
863
+ FIX v4.2: Use sentence_transformers.CrossEncoder.predict() which accepts
864
+ a list of (text_a, text_b) tuples. Returns scores for [contradiction, entailment, neutral].
865
+ The old code used pipeline("text-classification") with dict input, which was broken."""
 
866
  try:
867
+ # CrossEncoder.predict returns numpy array of shape (n_pairs, 3)
868
+ # Columns: [contradiction, entailment, neutral]
869
+ scores = nli_model.predict([(text_a[:256], text_b[:256])])
870
+ label_mapping = ["contradiction", "entailment", "neutral"]
871
+ top_idx = int(scores[0].argmax())
872
+ top_score = float(scores[0][top_idx])
873
+ return [{"label": label_mapping[top_idx], "score": top_score}]
874
+ except Exception as e:
875
+ print(f"[ClauseGuard] NLI inference error: {e}")
876
+ return None
 
 
 
 
 
 
 
877
 
878
 
879
  def detect_contradictions(clause_results, raw_text=""):
 
892
  clause_texts_by_label[cr["label"]].append(cr.get("text", ""))
893
 
894
  # ── 1. Semantic NLI (if model available) ──
895
+ if _HAS_NLI_MODEL and nli_model is not None:
896
  conflict_pairs = [
897
  ("Uncapped Liability", "Cap on Liability",
898
  "Liability cannot be both uncapped and capped simultaneously."),