Sahil al farib commited on
Commit
80ec89b
·
1 Parent(s): d9528d9

Fix gradio 5 compatibility

Browse files
facteval/__init__.py CHANGED
@@ -7,6 +7,17 @@ import warnings as _warnings
7
  import logging as _logging
8
  import contextlib as _contextlib
9
  import io as _io
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # Suppress safetensors / accelerate noise
12
  _os.environ.setdefault("SAFETENSORS_LOG_LEVEL", "error")
@@ -14,16 +25,22 @@ _os.environ.setdefault("ACCELERATE_LOG_LEVEL", "error")
14
  _logging.getLogger("safetensors").setLevel(_logging.ERROR)
15
  _logging.getLogger("accelerate").setLevel(_logging.ERROR)
16
 
17
- # Suppress HF Hub unauthenticated request warnings
 
 
18
  _logging.getLogger("huggingface_hub.utils._http").setLevel(_logging.ERROR)
19
  _logging.getLogger("huggingface_hub").setLevel(_logging.ERROR)
20
 
21
- # Suppress transformers info-level noise
 
 
 
22
  _logging.getLogger("transformers.modeling_utils").setLevel(_logging.ERROR)
23
  _logging.getLogger("transformers.generation.configuration_utils").setLevel(_logging.ERROR)
24
 
25
  # Suppress FutureWarning about clean_up_tokenization_spaces
26
  _warnings.filterwarnings("ignore", category=FutureWarning, module="transformers")
 
27
 
28
 
29
  @_contextlib.contextmanager
 
7
  import logging as _logging
8
  import contextlib as _contextlib
9
  import io as _io
10
+ import atexit as _atexit
11
+
12
+ # Suppress multiprocess ResourceTracker.__del__ error on Windows (Python 3.12+)
13
+ # This is a known bug in the multiprocess package, not FactEval.
14
+ def _suppress_multiprocess_error():
15
+ try:
16
+ import multiprocess.resource_tracker as _rt
17
+ _rt.ResourceTracker.__del__ = lambda self: None
18
+ except Exception:
19
+ pass
20
+ _suppress_multiprocess_error()
21
 
22
  # Suppress safetensors / accelerate noise
23
  _os.environ.setdefault("SAFETENSORS_LOG_LEVEL", "error")
 
25
  _logging.getLogger("safetensors").setLevel(_logging.ERROR)
26
  _logging.getLogger("accelerate").setLevel(_logging.ERROR)
27
 
28
+ # Suppress HF Hub download noise (symlink warnings, progress bars)
29
+ _os.environ.setdefault("HF_HUB_DISABLE_SYMLINKS_WARNING", "1")
30
+ _os.environ.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "1")
31
  _logging.getLogger("huggingface_hub.utils._http").setLevel(_logging.ERROR)
32
  _logging.getLogger("huggingface_hub").setLevel(_logging.ERROR)
33
 
34
+ # Suppress transformers sharding + generation config noise
35
+ _os.environ.setdefault("TRANSFORMERS_VERBOSITY", "error")
36
+ _os.environ.setdefault("TRANSFORMERS_NO_ADVISORY_WARNINGS", "1")
37
+ _logging.getLogger("transformers").setLevel(_logging.ERROR)
38
  _logging.getLogger("transformers.modeling_utils").setLevel(_logging.ERROR)
39
  _logging.getLogger("transformers.generation.configuration_utils").setLevel(_logging.ERROR)
40
 
41
  # Suppress FutureWarning about clean_up_tokenization_spaces
42
  _warnings.filterwarnings("ignore", category=FutureWarning, module="transformers")
43
+ _warnings.filterwarnings("ignore", category=UserWarning, module="huggingface_hub")
44
 
45
 
46
  @_contextlib.contextmanager
facteval/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (3.92 kB). View file
 
facteval/__pycache__/calibrator.cpython-312.pyc ADDED
Binary file (4.36 kB). View file
 
facteval/__pycache__/claim_extractor.cpython-312.pyc ADDED
Binary file (6.23 kB). View file
 
facteval/__pycache__/cli.cpython-312.pyc ADDED
Binary file (5.34 kB). View file
 
facteval/__pycache__/config.cpython-312.pyc ADDED
Binary file (861 Bytes). View file
 
facteval/__pycache__/core.cpython-312.pyc ADDED
Binary file (12.2 kB). View file
 
facteval/__pycache__/models.cpython-312.pyc ADDED
Binary file (2.53 kB). View file
 
facteval/__pycache__/retriever.cpython-312.pyc ADDED
Binary file (6.87 kB). View file
 
facteval/__pycache__/verifier.cpython-312.pyc ADDED
Binary file (10.7 kB). View file
 
facteval/cli.py CHANGED
@@ -107,7 +107,7 @@ def _parse_input(args) -> tuple[str | None, list[str]]:
107
  """Parse answer and contexts from file or CLI flags."""
108
  # Option 1: JSON file
109
  if args.input_file:
110
- with open(args.input_file, "r", encoding="utf-8") as f:
111
  data = json.load(f)
112
  return data.get("answer"), data.get("contexts", [])
113
 
 
107
  """Parse answer and contexts from file or CLI flags."""
108
  # Option 1: JSON file
109
  if args.input_file:
110
+ with open(args.input_file, "r", encoding="utf-8-sig") as f:
111
  data = json.load(f)
112
  return data.get("answer"), data.get("contexts", [])
113
 
facteval/core.py CHANGED
@@ -37,21 +37,27 @@ _calibrator_path: str | None = None
37
  def _get_extractor() -> ClaimExtractor:
38
  global _extractor
39
  if _extractor is None:
 
40
  _extractor = ClaimExtractor()
 
41
  return _extractor
42
 
43
 
44
  def _get_retriever() -> EvidenceRetriever:
45
  global _retriever
46
  if _retriever is None:
 
47
  _retriever = EvidenceRetriever()
 
48
  return _retriever
49
 
50
 
51
  def _get_verifier() -> Verifier:
52
  global _verifier
53
  if _verifier is None:
 
54
  _verifier = Verifier()
 
55
  return _verifier
56
 
57
 
 
37
  def _get_extractor() -> ClaimExtractor:
38
  global _extractor
39
  if _extractor is None:
40
+ print("⏳ Loading claim extractor (Qwen 1.5B)...", flush=True)
41
  _extractor = ClaimExtractor()
42
+ print("✅ Claim extractor ready.", flush=True)
43
  return _extractor
44
 
45
 
46
  def _get_retriever() -> EvidenceRetriever:
47
  global _retriever
48
  if _retriever is None:
49
+ print("⏳ Loading retriever (MiniLM + FAISS)...", flush=True)
50
  _retriever = EvidenceRetriever()
51
+ print("✅ Retriever ready.", flush=True)
52
  return _retriever
53
 
54
 
55
  def _get_verifier() -> Verifier:
56
  global _verifier
57
  if _verifier is None:
58
+ print("⏳ Loading verifier (DeBERTa NLI)...", flush=True)
59
  _verifier = Verifier()
60
+ print("✅ Verifier ready.", flush=True)
61
  return _verifier
62
 
63
 
facteval/verifier.py CHANGED
@@ -228,8 +228,8 @@ class Verifier:
228
  """Generate a human-readable reason for the verdict."""
229
  ev_short = evidence[:80] + "..." if len(evidence) > 80 else evidence
230
  if label == FactLabel.SUPPORTED:
231
- return f"Supported by evidence: \"{ev_short}\""
232
  elif label == FactLabel.CONTRADICTED:
233
- return f"Contradicts evidence: \"{ev_short}\""
234
  else:
235
- return f"Evidence is neutralneither confirms nor denies: \"{ev_short}\""
 
228
  """Generate a human-readable reason for the verdict."""
229
  ev_short = evidence[:80] + "..." if len(evidence) > 80 else evidence
230
  if label == FactLabel.SUPPORTED:
231
+ return f"Matched evidence: \"{ev_short}\""
232
  elif label == FactLabel.CONTRADICTED:
233
+ return f"Contradicted by: \"{ev_short}\""
234
  else:
235
+ return f"No strong matchevidence is neutral: \"{ev_short}\""