nkshirsa's picture
Add tests/test_db.py
30832a1 verified
"""
PhD Research OS — Unit Tests (Phase 0 requirement: 20+ tests)
"""
import os
import sys
import json
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from phd_research_os.db import (
init_db, get_db, create_claim, get_claim, update_claim, search_claims,
create_source, get_source, create_goal, get_goals_by_priority,
create_conflict, create_decision, create_override,
log_api_usage, get_cost_summary, to_fixed, from_fixed
)
TEST_DB = "test_research_os.db"
@pytest.fixture(autouse=True)
def setup_teardown():
"""Create fresh DB for each test."""
init_db(TEST_DB)
yield
if os.path.exists(TEST_DB):
os.remove(TEST_DB)
if os.path.exists(TEST_DB + "-wal"):
os.remove(TEST_DB + "-wal")
if os.path.exists(TEST_DB + "-shm"):
os.remove(TEST_DB + "-shm")
def get_test_conn():
return get_db(TEST_DB)
# ============================================================
# Fixed-Point Math Tests
# ============================================================
def test_fixed_point_conversion():
assert to_fixed(0.85) == 850
assert from_fixed(850) == 0.85
def test_fixed_point_precision():
assert to_fixed(0.123) == 123
assert to_fixed(0.999) == 999
assert to_fixed(0.0) == 0
assert to_fixed(1.0) == 1000
def test_fixed_point_roundtrip():
for val in [0.0, 0.001, 0.5, 0.85, 0.999, 1.0]:
assert abs(from_fixed(to_fixed(val)) - val) < 0.001
# ============================================================
# Claim CRUD Tests
# ============================================================
def test_create_claim_complete():
conn = get_test_conn()
cid = create_claim(conn, "Graphene shows mobility > 10000 cm²/Vs", "Fact", 0.92,
evidence_strength=0.95, study_quality_weight=1.0,
journal_tier_weight=1.0, completeness_penalty=1.0)
claim = get_claim(conn, cid)
assert claim is not None
assert claim['status'] == 'Complete'
assert claim['epistemic_tag'] == 'Fact'
assert abs(claim['confidence'] - 0.92) < 0.01
conn.close()
def test_create_claim_incomplete():
conn = get_test_conn()
cid = create_claim(conn, "Temperature may affect binding", "Hypothesis", 0.35,
missing_fields=["temperature", "pH", "ionic_strength"])
claim = get_claim(conn, cid)
assert claim['status'] == 'Incomplete'
assert len(claim['missing_fields']) == 3
conn.close()
def test_claim_id_format():
conn = get_test_conn()
cid = create_claim(conn, "Test", "Fact", 0.5)
assert cid.startswith("CLM_")
conn.close()
def test_claim_schema_version():
conn = get_test_conn()
cid = create_claim(conn, "Test", "Fact", 0.5)
claim = get_claim(conn, cid)
assert claim['schema_version'] == '1.0'
conn.close()
def test_get_nonexistent_claim():
conn = get_test_conn()
assert get_claim(conn, "CLM_NONEXIST") is None
conn.close()
def test_update_claim():
conn = get_test_conn()
cid = create_claim(conn, "Old text", "Fact", 0.5)
update_claim(conn, cid, text="Updated text", confidence=0.9)
claim = get_claim(conn, cid)
assert claim['text'] == "Updated text"
assert abs(claim['confidence'] - 0.9) < 0.01
conn.close()
def test_search_claims_by_text():
conn = get_test_conn()
create_claim(conn, "Graphene biosensor detection", "Fact", 0.8)
create_claim(conn, "Lithium battery cycling", "Fact", 0.7)
results = search_claims(conn, query="graphene")
assert len(results) == 1
assert "graphene" in results[0]['text'].lower()
conn.close()
def test_search_claims_by_epistemic_tag():
conn = get_test_conn()
create_claim(conn, "Measured value", "Fact", 0.9)
create_claim(conn, "We hypothesize", "Hypothesis", 0.3)
results = search_claims(conn, epistemic_tag="Hypothesis")
assert len(results) == 1
conn.close()
def test_search_claims_by_min_confidence():
conn = get_test_conn()
create_claim(conn, "High confidence", "Fact", 0.95)
create_claim(conn, "Low confidence", "Hypothesis", 0.2)
results = search_claims(conn, min_confidence=0.5)
assert len(results) == 1
assert results[0]['confidence'] >= 0.5
conn.close()
def test_create_50_claims():
"""Phase 0 acceptance: Can create 50 Claim Objects programmatically."""
conn = get_test_conn()
ids = []
for i in range(50):
cid = create_claim(conn, f"Claim number {i}",
["Fact", "Interpretation", "Hypothesis"][i % 3],
round(0.1 + (i * 0.018), 3))
ids.append(cid)
assert len(ids) == 50
assert len(set(ids)) == 50 # All unique
conn.close()
def test_claim_parameters():
conn = get_test_conn()
params = {"temperature_C": 25.0, "pH": 7.4, "ionic_strength_mM": 10.0}
cid = create_claim(conn, "Test", "Fact", 0.5, parameters=params)
claim = get_claim(conn, cid)
assert claim['parameters'] == params
conn.close()
# ============================================================
# Source Tests
# ============================================================
def test_create_source():
conn = get_test_conn()
doi = create_source(conn, "10.1234/test", "Test Paper",
["Author A", "Author B"], 2024, "Nature", 1)
src = get_source(conn, doi)
assert src is not None
assert src['title'] == "Test Paper"
assert len(src['authors']) == 2
conn.close()
def test_source_not_found():
conn = get_test_conn()
assert get_source(conn, "10.9999/nonexist") is None
conn.close()
# ============================================================
# Goal Tests
# ============================================================
def test_create_goal():
conn = get_test_conn()
gid = create_goal(conn, "Achieve sub-fM LOD", "high")
goals = get_goals_by_priority(conn)
assert len(goals) == 1
assert goals[0]['priority'] == 'high'
assert goals[0]['status'] == 'Active'
conn.close()
def test_goals_sorted_by_priority():
conn = get_test_conn()
create_goal(conn, "Low priority", "low")
create_goal(conn, "High priority", "high")
create_goal(conn, "Medium priority", "medium")
goals = get_goals_by_priority(conn)
assert goals[0]['priority'] == 'high'
assert goals[1]['priority'] == 'medium'
assert goals[2]['priority'] == 'low'
conn.close()
# ============================================================
# Conflict Tests
# ============================================================
def test_create_conflict():
conn = get_test_conn()
cid_a = create_claim(conn, "Sensitivity increases", "Fact", 0.8)
cid_b = create_claim(conn, "Sensitivity decreases", "Fact", 0.7)
conf_id = create_conflict(conn, cid_a, cid_b, "value_mismatch",
"Different surface chemistry",
["surface treatment", "buffer composition"])
row = conn.execute("SELECT * FROM conflicts WHERE conflict_id = ?", (conf_id,)).fetchone()
assert row is not None
assert dict(row)['hypothesis_confidence'] == 'low' # ALWAYS low
assert dict(row)['resolution_status'] == 'Unresolved'
conn.close()
# ============================================================
# Override Tests
# ============================================================
def test_expert_override():
conn = get_test_conn()
cid = create_claim(conn, "Override test", "Fact", 0.5)
ovr_id = create_override(conn, cid, "Dr. Smith",
"Direct experimental evidence", 0.95)
claim = get_claim(conn, cid)
assert abs(claim['confidence'] - 0.95) < 0.01
assert claim['expert_override'] is not None
assert claim['expert_override']['who'] == 'Dr. Smith'
conn.close()
# ============================================================
# Decision Tests
# ============================================================
def test_create_decision():
conn = get_test_conn()
gid = create_goal(conn, "Test goal", "high")
dec_id = create_decision(conn, "experiment", "Run control experiment",
0.72, gid, priority="high",
estimated_effort="2 weeks")
row = conn.execute("SELECT * FROM decisions WHERE decision_id = ?", (dec_id,)).fetchone()
assert row is not None
assert dict(row)['status'] == 'Proposed'
conn.close()
# ============================================================
# API Usage Logging Tests
# ============================================================
def test_api_usage_logging():
conn = get_test_conn()
log_api_usage(conn, "claude-haiku", 500, 200, 0.001, "claim_extraction")
log_api_usage(conn, "claude-haiku", 300, 150, 0.0008, "conflict_detection")
summary = get_cost_summary(conn, days=1)
assert summary['num_calls'] == 2
conn.close()
if __name__ == "__main__":
pytest.main([__file__, "-v"])