neovalle's picture
Upload app.py
91e0e03 verified
"""
Discourse Compass β€” Gradio App for Linguists & General Public
=============================================================
β€’ Interactive 3D Plotly scatter (rotate, zoom, pan)
β€’ Custom naming for poles and discourses
β€’ Plain-language results for non-technical users
β€’ Sentence embeddings via all-mpnet-base-v2 (768-dim)
"""
import gradio as gr
import numpy as np
import plotly.graph_objects as go
from sentence_transformers import SentenceTransformer
from sklearn.decomposition import PCA
from scipy.spatial.distance import cosine, euclidean
# ── Model ─────────────────────────────────────────────────────────────────────
MODEL_NAME = "all-mpnet-base-v2"
MODEL_DIM = 768
_model = None
def get_model():
global _model
if _model is None:
_model = SentenceTransformer(MODEL_NAME)
return _model
# ── Maths helpers ─────────────────────────────────────────────────────────────
def parse_sentences(text):
return [s.strip() for s in text.strip().splitlines() if s.strip()]
def unit(v):
n = np.linalg.norm(v)
return v / n if n > 1e-12 else v
def angle_between(u, v):
c = abs(float(np.dot(unit(u), unit(v))))
return float(np.degrees(np.arccos(min(c, 1.0))))
def thematic_breadth(vecs):
return float(np.linalg.norm(vecs - vecs.mean(axis=0), "fro"))
def principal_axis(vecs):
if vecs.shape[0] < 2:
return np.zeros(vecs.shape[1]), np.eye(vecs.shape[1])
vals, evecs = np.linalg.eigh(np.cov(vecs, rowvar=False))
order = np.argsort(vals)[::-1]
return vals[order], evecs[:, order]
def semantic_heart(vecs):
return vecs.mean(axis=0)
# ── Plain-language interpretation helpers ─────────────────────────────────────
def breadth_label(score, all_scores):
mn, mx = min(all_scores), max(all_scores)
if mx == mn:
return "moderate"
r = (score - mn) / (mx - mn)
if r < 0.33:
return "tightly focused"
if r < 0.66:
return "moderately varied"
return "wide-ranging"
def orientation_label(angle):
if angle < 20:
return "closely tracks the pole-to-pole spectrum"
if angle < 45:
return "partly follows the pole-to-pole spectrum"
if angle < 70:
return "drifts away from the pole-to-pole spectrum"
return "varies independently of the pole-to-pole spectrum"
def strength_label(pct):
if pct > 0.6:
return "very consistent β€” sentences cluster in one direction"
if pct > 0.35:
return "moderately consistent"
return "diverse β€” sentences spread in many directions"
def pull_label(cos_a, cos_b, name_a, name_b):
diff = abs(cos_a - cos_b)
closer = name_a if cos_a < cos_b else name_b
if diff < 0.05:
return f"sits roughly halfway between {name_a} and {name_b}"
elif diff < 0.15:
return f"leans toward {closer}"
else:
return f"clearly closer to {closer}"
# ── Plotly colour palette ─────────────────────────────────────────────────────
COLORS = {
"A": "#5aa8ff",
"B": "#ff6b6b",
"D1": "#3dd6a3",
"D2": "#ffcc55",
}
BG_COLOR = "#0d0f1c"
GRID_COLOR = "#1c2040"
TEXT_COLOR = "#cdd5f0"
# ── Interactive Plotly 3D renderer ────────────────────────────────────────────
def build_plotly_figure(
pts_a, pts_b, pts_d1, pts_d2,
c_a, c_b, c_d1, c_d2,
ev_a, ev_b, ev_d1, ev_d2,
pca_ev,
name_a, name_b, name_d1, name_d2,
):
fig = go.Figure()
# ── Sentence dots ─────────────────────────────────────────────────────
for pts, key, name, symbol in [
(pts_a, "A", name_a, "circle"),
(pts_b, "B", name_b, "circle"),
(pts_d1, "D1", name_d1, "square"),
(pts_d2, "D2", name_d2, "square"),
]:
fig.add_trace(go.Scatter3d(
x=pts[:, 0], y=pts[:, 1], z=pts[:, 2],
mode="markers",
marker=dict(size=5, color=COLORS[key], symbol=symbol,
opacity=0.7, line=dict(width=0.5, color="white")),
name=f"{name} sentences",
legendgroup=key,
hovertemplate=f"{name} sentence<br>(%{{x:.3f}}, %{{y:.3f}}, %{{z:.3f}})<extra></extra>",
))
# ── Centroids (diamonds) ──────────────────────────────────────────────
for c3, key, name in [
(c_a, "A", name_a),
(c_b, "B", name_b),
(c_d1, "D1", name_d1),
(c_d2, "D2", name_d2),
]:
fig.add_trace(go.Scatter3d(
x=[c3[0]], y=[c3[1]], z=[c3[2]],
mode="markers+text",
marker=dict(size=10, color=COLORS[key], symbol="diamond",
line=dict(width=2, color="white")),
text=[f"β—† {name}"],
textposition="top center",
textfont=dict(color=COLORS[key], size=11),
name=f"β—† Centre of {name}",
legendgroup=key,
showlegend=True,
hovertemplate=f"Centre of {name}<br>(%{{x:.3f}}, %{{y:.3f}}, %{{z:.3f}})<extra></extra>",
))
# ── Pole axis (dashed line A↔B) ───────────────────────────────────────
fig.add_trace(go.Scatter3d(
x=[c_a[0], c_b[0]], y=[c_a[1], c_b[1]], z=[c_a[2], c_b[2]],
mode="lines",
line=dict(color="white", width=3, dash="dash"),
name=f"Spectrum: {name_a} ↔ {name_b}",
opacity=0.5,
hoverinfo="skip",
))
# ── Spokes: discourse centres β†’ pole centres ──────────────────────────
for c_disc, key, dname in [(c_d1, "D1", name_d1), (c_d2, "D2", name_d2)]:
for pole_pt, pname in [(c_a, name_a), (c_b, name_b)]:
fig.add_trace(go.Scatter3d(
x=[c_disc[0], pole_pt[0]],
y=[c_disc[1], pole_pt[1]],
z=[c_disc[2], pole_pt[2]],
mode="lines",
line=dict(color=COLORS[key], width=1.5, dash="dot"),
opacity=0.4,
showlegend=False,
hoverinfo="skip",
))
# ── Principal direction arrows ────────────────────────────────────────
scale = 0.15
for c3, ev3, key, name in [
(c_a, ev_a, "A", name_a),
(c_b, ev_b, "B", name_b),
(c_d1, ev_d1, "D1", name_d1),
(c_d2, ev_d2, "D2", name_d2),
]:
tip = c3 + ev3 * scale
tail = c3 - ev3 * scale
fig.add_trace(go.Scatter3d(
x=[tail[0], tip[0]], y=[tail[1], tip[1]], z=[tail[2], tip[2]],
mode="lines",
line=dict(color=COLORS[key], width=6),
showlegend=False,
hovertemplate=f"Direction of variation β€” {name}<extra></extra>",
))
# arrowhead
fig.add_trace(go.Scatter3d(
x=[tip[0]], y=[tip[1]], z=[tip[2]],
mode="markers",
marker=dict(size=5, color=COLORS[key], symbol="diamond"),
showlegend=False,
hoverinfo="skip",
))
# ── Layout ────────────────────────────────────────────────────────────
axis_template = dict(
backgroundcolor=BG_COLOR,
gridcolor=GRID_COLOR,
showbackground=True,
color=TEXT_COLOR,
tickfont=dict(size=9, color=TEXT_COLOR),
)
fig.update_layout(
scene=dict(
xaxis=dict(title=f"Meaning Axis 1 ({pca_ev[0]:.0%})", **axis_template),
yaxis=dict(title=f"Meaning Axis 2 ({pca_ev[1]:.0%})", **axis_template),
zaxis=dict(title=f"Meaning Axis 3 ({pca_ev[2]:.0%})", **axis_template),
),
paper_bgcolor=BG_COLOR,
plot_bgcolor=BG_COLOR,
font=dict(color=TEXT_COLOR),
title=dict(
text=(
f"Discourse Compass β€” {name_a} vs {name_b}<br>"
f"<span style='font-size:12px;color:#5a6488;'>"
f"Drag to rotate Β· Scroll to zoom Β· {sum(pca_ev):.0%} of meaning variation shown</span>"
),
x=0.5,
font=dict(size=16),
),
legend=dict(
bgcolor="rgba(19,22,42,0.9)",
bordercolor=GRID_COLOR,
borderwidth=1,
font=dict(size=10, color=TEXT_COLOR),
),
margin=dict(l=0, r=0, t=60, b=0),
height=620,
)
return fig
# ── Core analysis ─────────────────────────────────────────────────────────────
def run_analysis(text_a, text_b, text_d1, text_d2,
name_a, name_b, name_d1, name_d2):
# Default names if blank
name_a = name_a.strip() or "Pole A"
name_b = name_b.strip() or "Pole B"
name_d1 = name_d1.strip() or "Discourse 1"
name_d2 = name_d2.strip() or "Discourse 2"
sents_a = parse_sentences(text_a)
sents_b = parse_sentences(text_b)
sents_d1 = parse_sentences(text_d1)
sents_d2 = parse_sentences(text_d2)
errors = []
if not sents_a:
errors.append(f"{name_a} needs at least 1 sentence.")
if not sents_b:
errors.append(f"{name_b} needs at least 1 sentence.")
if not sents_d1:
errors.append(f"{name_d1} needs at least 1 sentence.")
if not sents_d2:
errors.append(f"{name_d2} needs at least 1 sentence.")
if errors:
return "⚠ " + " | ".join(errors), None
model = get_model()
all_sents = sents_a + sents_b + sents_d1 + sents_d2
all_vecs = model.encode(all_sents, normalize_embeddings=False,
show_progress_bar=False)
na, nb, nd1, nd2 = len(sents_a), len(sents_b), len(sents_d1), len(sents_d2)
vecs_a = all_vecs[:na]
vecs_b = all_vecs[na:na + nb]
vecs_d1 = all_vecs[na + nb:na + nb + nd1]
vecs_d2 = all_vecs[na + nb + nd1:]
# Semantic Hearts (centroids)
heart_a = semantic_heart(vecs_a)
heart_b = semantic_heart(vecs_b)
heart_d1 = semantic_heart(vecs_d1)
heart_d2 = semantic_heart(vecs_d2)
# Thematic Breadth (spread)
bread_a = thematic_breadth(vecs_a)
bread_b = thematic_breadth(vecs_b)
bread_d1 = thematic_breadth(vecs_d1)
bread_d2 = thematic_breadth(vecs_d2)
all_breads = [bread_a, bread_b, bread_d1, bread_d2]
# Pole Orientation (eigenanalysis)
pole_vec = heart_b - heart_a
def cloud_eigen(vecs):
vals, evecs = principal_axis(vecs)
main = evecs[:, 0]
ang = angle_between(main, pole_vec)
exp = vals[0] / vals.sum() if vals.sum() > 1e-12 else 0.0
return main, ang, exp
ev_a, ang_a, exp_a = cloud_eigen(vecs_a)
ev_b, ang_b, exp_b = cloud_eigen(vecs_b)
ev_d1, ang_d1, exp_d1 = cloud_eigen(vecs_d1)
ev_d2, ang_d2, exp_d2 = cloud_eigen(vecs_d2)
# Centroid projection onto pole axis (scalar position)
pole_dir = unit(pole_vec)
proj_d1 = float(np.dot(heart_d1 - heart_a, pole_dir))
proj_d2 = float(np.dot(heart_d2 - heart_a, pole_dir))
pole_len = float(np.linalg.norm(pole_vec))
pct_d1 = proj_d1 / pole_len if pole_len > 1e-12 else 0.5
pct_d2 = proj_d2 / pole_len if pole_len > 1e-12 else 0.5
# PCA to 3D (visualisation only)
stack = np.vstack([all_vecs, heart_a, heart_b, heart_d1, heart_d2])
pca = PCA(n_components=3, random_state=42)
proj_3d = pca.fit_transform(stack)
pca_ev = pca.explained_variance_ratio_
n = len(all_sents)
pts_a_3d = proj_3d[:na]
pts_b_3d = proj_3d[na:na + nb]
pts_d1_3d = proj_3d[na + nb:na + nb + nd1]
pts_d2_3d = proj_3d[na + nb + nd1:n]
c_a_3d, c_b_3d = proj_3d[n], proj_3d[n + 1]
c_d1_3d, c_d2_3d = proj_3d[n + 2], proj_3d[n + 3]
# Rotate eigenvectors into 3D PCA space
ev_a_3d = unit(pca.components_ @ ev_a)
ev_b_3d = unit(pca.components_ @ ev_b)
ev_d1_3d = unit(pca.components_ @ ev_d1)
ev_d2_3d = unit(pca.components_ @ ev_d2)
# Build interactive Plotly figure
fig = build_plotly_figure(
pts_a_3d, pts_b_3d, pts_d1_3d, pts_d2_3d,
c_a_3d, c_b_3d, c_d1_3d, c_d2_3d,
ev_a_3d, ev_b_3d, ev_d1_3d, ev_d2_3d,
pca_ev,
name_a, name_b, name_d1, name_d2,
)
# ── Build plain-language report ───────────────────────────────────────
# Pole separation quality
pole_cos = float(cosine(heart_a, heart_b))
if pole_cos > 0.4:
sep_word = "strong"
sep_note = "The two poles are clearly distinct β€” results are reliable."
elif pole_cos > 0.2:
sep_word = "moderate"
sep_note = "The poles are reasonably distinct β€” results are meaningful."
else:
sep_word = "weak"
sep_note = "The poles are quite similar β€” consider using more contrasting sentences."
# Position bar (pole A = left anchor, pole B = right anchor)
def position_bar(pct, width=40):
pos = max(0, min(1, pct))
idx = int(round(pos * width))
bar = "β–‘" * idx + "●" + "β–‘" * (width - idx)
return bar
# Plain position description
def position_desc(pct, na, nb):
if pct <= 0.10:
return f"very close to the {na} pole"
elif pct <= 0.30:
return f"closer to {na}"
elif pct <= 0.45:
return f"slightly leaning toward {na}"
elif pct <= 0.55:
return f"roughly midway between {na} and {nb}"
elif pct <= 0.70:
return f"slightly leaning toward {nb}"
elif pct <= 0.90:
return f"closer to {nb}"
else:
return f"very close to the {nb} pole"
desc_d1 = position_desc(pct_d1, name_a, name_b)
desc_d2 = position_desc(pct_d2, name_a, name_b)
# Gap between texts
gap = abs(pct_d1 - pct_d2)
if gap < 0.05:
gap_desc = "no meaningful difference in position"
elif gap < 0.15:
gap_desc = "a small difference in position"
elif gap < 0.30:
gap_desc = "a moderate difference in position"
elif gap < 0.50:
gap_desc = "a substantial difference in position"
else:
gap_desc = "a very large difference in position"
# Cluster tightness as reliability
def reliability_label(spread, all_spreads):
mn, mx = min(all_spreads), max(all_spreads)
r = (spread - mn) / (mx - mn) if mx > mn else 0.5
if r < 0.25:
return "very consistent β€” position score is highly reliable"
elif r < 0.50:
return "fairly consistent β€” position score is reliable"
elif r < 0.75:
return "somewhat varied β€” position score is an average across different angles"
else:
return "wide-ranging β€” position score averages over quite different sentences"
rel_d1 = reliability_label(bread_d1, all_breads)
rel_d2 = reliability_label(bread_d2, all_breads)
# Axis relevance (brief caveat only)
def axis_relevance_note(angle):
if angle < 30:
return "sentences differ mainly along the pole spectrum"
elif angle < 60:
return "sentences differ partly along the spectrum, partly on other dimensions"
else:
return "sentences differ mainly on dimensions unrelated to this spectrum"
note_d1 = axis_relevance_note(ang_d1)
note_d2 = axis_relevance_note(ang_d2)
# Overall verdict
closer_to_a = name_d1 if pct_d1 < pct_d2 else name_d2
closer_to_b = name_d2 if pct_d1 < pct_d2 else name_d1
if gap < 0.05:
verdict = (f"No clear difference: {name_d1} and {name_d2} occupy very "
f"similar positions on the {name_a}↔{name_b} spectrum.")
else:
verdict = (f"{closer_to_a} aligns more closely with {name_a}; "
f"{closer_to_b} aligns more closely with {name_b}. "
f"There is {gap_desc} between them ({gap:.0%} of the full spectrum).")
# Caveats
caveats = []
if sep_word == "weak":
caveats.append(f"Pole separation is weak β€” the two poles are not very distinct in meaning space. "
f"Try adding more contrasting sentences to each pole.")
if bread_d1 > bread_b and bread_d1 > bread_a:
caveats.append(f"{name_d1} is more wide-ranging than either pole corpus β€” "
f"its position score averages over quite varied content.")
if bread_d2 > bread_b and bread_d2 > bread_a:
caveats.append(f"{name_d2} is more wide-ranging than either pole corpus β€” "
f"its position score averages over quite varied content.")
W = 62
report_lines = [
f"{'═' * W}",
f" DISCOURSE COMPASS β€” Results",
f"{'═' * W}",
f"",
f" AXIS: {name_a} ←{'─' * 16}β†’ {name_b}",
f" Pole separation: {sep_word} β€” {sep_note}",
f" ({na} sentences in {name_a} pole Β· {nb} in {name_b} pole)",
f"",
f"{'─' * W}",
f" WHERE EACH TEXT SITS ON THE SPECTRUM",
f"{'─' * W}",
f" 0% = {name_a} pole 100% = {name_b} pole",
f"",
f" {name_a} pole",
f" {'β–‘' * 20}●{'β–‘' * 20} (0%)",
f"",
f" {name_d1} ({nd1} sentences)",
f" {position_bar(pct_d1)} ({pct_d1:.0%})",
f" β†’ {desc_d1}",
f"",
f" {name_d2} ({nd2} sentences)",
f" {position_bar(pct_d2)} ({pct_d2:.0%})",
f" β†’ {desc_d2}",
f"",
f" {name_b} pole",
f" {'β–‘' * 20}●{'β–‘' * 20} (100%)",
f"",
f" Gap between {name_d1} and {name_d2}: {gap:.0%} of the spectrum",
f" β†’ {gap_desc.capitalize()}.",
f"",
f"{'─' * W}",
f" HOW RELIABLY DO THE SENTENCES CLUSTER?",
f"{'─' * W}",
f" A tight cluster means all sentences point in the same",
f" direction β€” the position score is a reliable summary.",
f" A loose cluster means sentences pull in different",
f" directions β€” the score is an average and less decisive.",
f"",
f" {name_d1}: {rel_d1}.",
f" {name_d2}: {rel_d2}.",
f"",
f" For reference β€” how wide-ranging are the pole corpora?",
f" {name_a} pole: {breadth_label(bread_a, all_breads)}",
f" {name_b} pole: {breadth_label(bread_b, all_breads)}",
f"",
f"{'─' * W}",
f" AXIS ALIGNMENT NOTE",
f"{'─' * W}",
f" Do sentences within each text vary along the pole",
f" spectrum, or mainly on unrelated dimensions?",
f"",
f" {name_d1}: {note_d1}.",
f" {name_d2}: {note_d2}.",
f"",
]
if caveats:
report_lines += [
f"{'─' * W}",
f" ⚠ CAVEATS",
f"{'─' * W}",
]
for c in caveats:
report_lines.append(f" β€’ {c}")
report_lines.append(f"")
report_lines += [
f"{'─' * W}",
f" SUMMARY",
f"{'─' * W}",
f" {verdict}",
f"",
f"{'═' * W}",
f" All measurements use the full {MODEL_DIM}-dimensional meaning",
f" space of {MODEL_NAME}. The 3D map is a simplified view",
f" for visual orientation β€” rotate and zoom it above.",
f"{'═' * W}",
]
report = "\n".join(report_lines)
return report, fig
# ── Demo placeholders ─────────────────────────────────────────────────────────
PLACEHOLDER_A = """\
The economy is growing rapidly.
Unemployment is at a record low.
Businesses are thriving and profits are up.
Consumer spending is at an all-time high."""
PLACEHOLDER_B = """\
Climate change is an existential crisis.
We must reduce carbon emissions immediately.
Renewable energy is the only sustainable future.
The planet is warming at an alarming rate."""
PLACEHOLDER_D1 = """\
The stock market reached a new record today.
Interest rates are being adjusted to control inflation.
Foreign direct investment increased by 12% this quarter."""
PLACEHOLDER_D2 = """\
Arctic ice sheets are melting faster than predicted.
Scientists warn of irreversible tipping points.
Carbon capture technology is advancing but not fast enough."""
# ── Explainer content ─────────────────────────────────────────────────────────
EXPLAINER_HOW = """
### How does this tool work?
Every sentence carries meaning. This tool uses an AI language model to translate
each sentence into a **point in meaning-space** β€” an invisible map where sentences
that mean similar things sit close together, and sentences with very different
meanings sit far apart.
You define **two poles** by giving example sentences for each β€” for instance,
*economic growth* vs *climate crisis*. These poles create a spectrum.
Then you enter two sets of text (the "discourses") and the tool measures
where each one sits on that spectrum. The results tell you:
- **Which pole each text is closer to** (and by how much)
- **How spread out** each set of sentences is (focused vs wide-ranging)
- **What direction** the sentences vary in (along the spectrum, or off to the side)
The 3D map lets you **see** the results β€” each dot is a sentence, and you can
rotate and zoom to explore how they cluster.
"""
# ── CSS ───────────────────────────────────────────────────────────────────────
CSS = """
body, .gradio-container { background: #0d0f1c !important; }
.gr-panel, .gr-form { background: #13162a !important;
border: 1px solid #1c2040 !important; }
textarea, input { background: #181b30 !important;
color: #dde4f8 !important;
border: 1px solid #262c50 !important;
border-radius: 8px !important; }
label span { color: #8892bb !important;
font-size: 0.84rem !important;
font-weight: 600 !important; }
.run-btn { background: linear-gradient(135deg, #4a7fff, #9b59f5)
!important;
border: none !important;
font-weight: 800 !important;
font-size: 1.05rem !important;
letter-spacing: 0.03em !important;
border-radius: 10px !important; }
.run-btn:hover { opacity: 0.86 !important; }
.output-text textarea { font-family: 'Courier New', monospace !important;
font-size: 0.79rem !important;
color: #7dd8f8 !important;
line-height: 1.55 !important; }
h1, h2, h3, h4 { color: #dde4f8 !important; }
.gr-accordion { border: 1px solid #1c2040 !important;
border-radius: 10px !important; }
.name-box input { font-weight: 700 !important;
font-size: 0.95rem !important; }
"""
# ── UI ────────────────────────────────────────────────────────────────────────
with gr.Blocks(title="Discourse Compass") as demo:
# ── Header ────────────────────────────────────────────────────────────
gr.HTML("""
<div style="padding: 8px 0 20px 0;">
<h1 style="color:#dde4f8; font-size:2rem; font-weight:900;
margin-bottom:6px; letter-spacing:-0.5px;">
🧭 Discourse Compass
</h1>
<p style="color:#5a6488; font-size:0.92rem; margin:0; max-width:700px;">
Define two semantic poles with example sentences, then find out where
any text sits between them β€” with plain-language explanations.
</p>
</div>""")
with gr.Accordion("πŸ’‘ How does this work? (click to read)", open=False):
gr.Markdown(EXPLAINER_HOW)
gr.HTML("<hr style='border-color:#1c2040; margin: 8px 0 20px 0;'>")
# ── Step 1: Poles ─────────────────────────────────────────────────────
gr.HTML("""
<h3 style="color:#dde4f8; margin-bottom:4px;">Step 1 β€” Define your two poles</h3>
<p style="color:#5a6488; font-size:0.86rem; margin:0 0 14px 0;">
Enter several sentences that represent each extreme. One sentence per line.
</p>""")
with gr.Row():
with gr.Column():
gr.HTML("<span style='color:#5aa8ff;font-weight:700;'>πŸ”΅ POLE A</span>")
name_a_box = gr.Textbox(label="Name for Pole A",
value="Economic Growth",
elem_classes=["name-box"])
pole_a = gr.Textbox(label="Sentences β€” one per line",
lines=7, value=PLACEHOLDER_A)
with gr.Column():
gr.HTML("<span style='color:#ff6b6b;font-weight:700;'>πŸ”΄ POLE B</span>")
name_b_box = gr.Textbox(label="Name for Pole B",
value="Climate Crisis",
elem_classes=["name-box"])
pole_b = gr.Textbox(label="Sentences β€” one per line",
lines=7, value=PLACEHOLDER_B)
gr.HTML("<hr style='border-color:#1c2040; margin: 20px 0;'>")
# ── Step 2: Discourses ────────────────────────────────────────────────
gr.HTML("""
<h3 style="color:#dde4f8; margin-bottom:4px;">Step 2 β€” Enter the texts to analyse</h3>
<p style="color:#5a6488; font-size:0.86rem; margin:0 0 14px 0;">
These are the texts whose position between the poles you want to measure.
</p>""")
with gr.Row():
with gr.Column():
gr.HTML("<span style='color:#3dd6a3;font-weight:700;'>🟒 TEXT 1</span>")
name_d1_box = gr.Textbox(label="Name for Text 1",
value="Financial News",
elem_classes=["name-box"])
disc1 = gr.Textbox(label="Sentences β€” one per line",
lines=5, value=PLACEHOLDER_D1)
with gr.Column():
gr.HTML("<span style='color:#ffcc55;font-weight:700;'>🟑 TEXT 2</span>")
name_d2_box = gr.Textbox(label="Name for Text 2",
value="Climate Reporting",
elem_classes=["name-box"])
disc2 = gr.Textbox(label="Sentences β€” one per line",
lines=5, value=PLACEHOLDER_D2)
# ── Run button ────────────────────────────────────────────────────────
gr.HTML("<div style='margin: 24px 0 8px 0;'>")
run_btn = gr.Button("⚑ Run Analysis", variant="primary",
size="lg", elem_classes=["run-btn"])
gr.HTML("</div>")
gr.HTML("<hr style='border-color:#1c2040; margin: 24px 0 16px 0;'>")
# ── Results ───────────────────────────────────────────────────────────
gr.HTML("""
<h3 style="color:#dde4f8; margin: 0 0 4px 0;">πŸ“Š Interactive Semantic Map</h3>
<p style="color:#5a6488; font-size:0.84rem; margin:0 0 12px 0;">
Each dot is a sentence. Diamonds (β—†) mark the centre of each group.
<strong>Drag to rotate Β· scroll to zoom Β· click legend items to toggle.</strong>
</p>""")
plot_out = gr.Plot(label="Semantic Map")
gr.HTML("<hr style='border-color:#1c2040; margin: 24px 0 16px 0;'>")
gr.HTML("""
<h3 style="color:#dde4f8; margin: 0 0 4px 0;">πŸ“‹ Results Report</h3>
<p style="color:#5a6488; font-size:0.84rem; margin:0 0 10px 0;">
Plain-language summary of every measurement.
</p>""")
text_out = gr.Textbox(label="Results", lines=42, interactive=False,
elem_classes=["output-text"])
# ── Wire up events ────────────────────────────────────────────────────
run_btn.click(
fn=run_analysis,
inputs=[pole_a, pole_b, disc1, disc2,
name_a_box, name_b_box, name_d1_box, name_d2_box],
outputs=[text_out, plot_out],
)
gr.HTML(f"""
<p style="color:#1e2440; font-size:0.74rem; text-align:center;
margin-top:28px; padding-bottom:12px;">
All measurements use the full {MODEL_DIM}-dimensional meaning space of
<code>{MODEL_NAME}</code>.
The 3D map is a simplified view (PCA) for orientation only.
</p>""")
if __name__ == "__main__":
demo.launch(share=True, css=CSS)