jbobym commited on
Commit
4b88c4e
·
verified ·
1 Parent(s): d8c7fbf

Add EN/FR bilingual UI + Hôpital Montfort theme (Montfort blue + turquoise)

Browse files
Files changed (1) hide show
  1. app.py +310 -62
app.py CHANGED
@@ -1,4 +1,4 @@
1
- """HF Spaces Gradio app — chronic wound classifier (4-class).
2
 
3
  Self-contained: no wound_classifier package install required. The model
4
  architecture and transforms are inlined here so this file plus the .pt
@@ -7,6 +7,10 @@ checkpoint and requirements.txt are everything the Space needs.
7
  If the architecture or transform here drifts from
8
  src/wound_classifier/{modeling/models.py, features.py} the Space and the
9
  training pipeline will silently disagree. Keep them in sync.
 
 
 
 
10
  """
11
 
12
  from __future__ import annotations
@@ -24,14 +28,166 @@ CKPT_PATH = Path(__file__).parent / "cv_baseline_fold5_best.pt"
24
  IMAGE_SIZE = 224
25
  IMAGENET_MEAN = (0.485, 0.456, 0.406)
26
  IMAGENET_STD = (0.229, 0.224, 0.225)
27
- CLASS_NAMES = {
28
- "D": "Diabetic ulcer",
29
- "P": "Pressure ulcer",
30
- "S": "Surgical wound",
31
- "V": "Venous ulcer",
32
- }
33
  IDX_TO_CLASS = ["D", "P", "S", "V"]
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  def _build_model(num_classes: int = 4) -> nn.Module:
37
  model: nn.Module = efficientnet_b0(weights=None)
@@ -66,87 +222,179 @@ MODEL = _load_model(CKPT_PATH)
66
  TRANSFORM = _build_transform()
67
 
68
 
69
- LOW_CONFIDENCE_THRESHOLD = 0.5
 
70
 
71
 
72
- def classify(image: Image.Image) -> tuple[dict[str, float], str]:
 
 
 
 
 
 
73
  if image is None:
74
  return {}, ""
 
75
  rgb = image.convert("RGB")
76
  x = TRANSFORM(rgb).unsqueeze(0)
77
  with torch.inference_mode():
78
  logits = MODEL(x)
79
  probs = torch.softmax(logits, dim=1).squeeze(0).numpy()
80
- label_probs = {CLASS_NAMES[IDX_TO_CLASS[i]]: float(probs[i]) for i in range(4)}
 
 
81
 
82
  top_label, top_prob = max(label_probs.items(), key=lambda kv: kv[1])
 
 
83
  notes: list[str] = []
84
  if top_prob < LOW_CONFIDENCE_THRESHOLD:
85
  notes.append(
86
- f"⚠️ **Low confidence** (top class {top_label} at {top_prob:.0%}). "
87
- "This often means the photo isn't a clear close-up of a wound, or the wound type "
88
- "isn't one of the four the model was trained on. The model has no \"not a wound\" "
89
- "option — it will always pick one of D / P / S / V even when the image isn't a wound."
90
- )
91
- if top_label == "Pressure ulcer":
92
- notes.append(
93
- "⚠️ **Pressure-class predictions are the model's weak spot** "
94
- "(~0.41 accuracy on the held-out test set). Treat this prediction with extra skepticism."
95
  )
 
 
 
96
  return label_probs, "\n\n".join(notes)
97
 
98
 
99
- DESCRIPTION = """\
100
- Upload a close-up photo of a chronic wound and the model returns its best guess at the wound type
101
- (diabetic ulcer, pressure ulcer, surgical wound, or venous ulcer) with per-class probabilities.
102
 
103
- **Before you upload a few things worth knowing:**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
- - **Frame the wound in the center.** The model resizes and center-crops to 224×224 pixels, so a wound
106
- off in the corner of a high-resolution phone photo will get cropped out. Close-up clinical-style
107
- framing (the wound fills most of the frame) gives the cleanest predictions.
108
- - **JPEG or PNG works fine.** Anything PIL can open.
109
- - **There is no "not a wound" option.** Upload a face, a sandwich, a sunset — the model will
110
- confidently place it into one of the four wound classes anyway. The low-confidence warning below
111
- is the only signal you'll get that the image probably isn't what the model was trained on.
112
- - **Pressure-ulcer predictions are weakest** (~41% accuracy on the held-out test set). When the model
113
- says "Pressure ulcer," treat the call with extra skepticism.
 
 
 
114
 
115
- **This is a research demo, not a medical device.** It does not diagnose, triage, or replace clinician
116
- judgement. Trained on the public AZH Chronic Wound Database (Anisuzzaman et al. 2022) using
117
- EfficientNet-B0 with patient-grouped 10-fold cross-validation; the deployed checkpoint
118
- (`cv_baseline_fold5_best.pt`) reaches **0.8152 top-1** on the held-out AZH Test set (n=184).
119
- """
120
 
121
- ARTICLE = """\
122
- **Methodology in one paragraph.** EfficientNet-B0 (ImageNet-pretrained) two-phase fine-tuning:
123
- phase 1 freezes the backbone and trains the 4-class head for 5 epochs at lr=1e-3; phase 2 unfreezes
124
- and fine-tunes the full network for 15 epochs at lr=1e-4. Patient-grouped 10-fold CV ensures the same
125
- patient's images never appear in both train and val. Training and validation under random-stratified
126
- splits would inflate accuracy by ~3–30pp on AZH (we measured this directly); we report the patient-
127
- grouped numbers as the honest evaluation. The shipped checkpoint is the highest single fold on the
128
- held-out Test set; a 10-fold soft-vote ensemble averages 0.7989 on the same set.
 
 
 
 
 
 
129
 
130
- **Out of scope.** Not for clinical decision-making. No claim of diagnostic accuracy on real patient
131
- cohorts. No fairness audit across skin tones (known gap). English-only UI. No mobile / offline build.
132
 
133
- **Citations.** Anisuzzaman et al. 2022, *Multi-modal wound classification using wound image and
134
- location by deep neural network*, Sci. Rep. 12:20057.
135
- """
 
 
 
 
136
 
 
 
137
 
138
- demo = gr.Interface(
139
- fn=classify,
140
- inputs=gr.Image(type="pil", label="Wound photograph (close-up, centered)"),
141
- outputs=[
142
- gr.Label(num_top_classes=4, label="Predicted wound type"),
143
- gr.Markdown(label="Notes"),
144
- ],
145
- title="Chronic Wound Classifier — 4-class AZH demo",
146
- description=DESCRIPTION,
147
- article=ARTICLE,
148
- flagging_mode="never",
149
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
 
152
  if __name__ == "__main__":
 
1
+ """HF Spaces Gradio app — chronic wound classifier (4-class), bilingual EN/FR.
2
 
3
  Self-contained: no wound_classifier package install required. The model
4
  architecture and transforms are inlined here so this file plus the .pt
 
7
  If the architecture or transform here drifts from
8
  src/wound_classifier/{modeling/models.py, features.py} the Space and the
9
  training pipeline will silently disagree. Keep them in sync.
10
+
11
+ Theming approximates Hôpital Montfort (Ottawa) brand colors, sourced from
12
+ the live hopitalmontfort.com stylesheet: primary "Montfort blue" #00729a,
13
+ turquoise accent #47c9cd, warm cream surface #f1ede5.
14
  """
15
 
16
  from __future__ import annotations
 
28
  IMAGE_SIZE = 224
29
  IMAGENET_MEAN = (0.485, 0.456, 0.406)
30
  IMAGENET_STD = (0.229, 0.224, 0.225)
 
 
 
 
 
 
31
  IDX_TO_CLASS = ["D", "P", "S", "V"]
32
 
33
+ CLASS_NAMES: dict[str, dict[str, str]] = {
34
+ "en": {
35
+ "D": "Diabetic ulcer",
36
+ "P": "Pressure ulcer",
37
+ "S": "Surgical wound",
38
+ "V": "Venous ulcer",
39
+ },
40
+ "fr": {
41
+ "D": "Ulcère diabétique",
42
+ "P": "Escarre",
43
+ "S": "Plaie chirurgicale",
44
+ "V": "Ulcère veineux",
45
+ },
46
+ }
47
+
48
+ LOW_CONFIDENCE_THRESHOLD = 0.5
49
+
50
+ # ---------- Localized strings ---------------------------------------------------
51
+
52
+ TITLE: dict[str, str] = {
53
+ "en": "# Chronic Wound Classifier — 4-class AZH demo",
54
+ "fr": "# Classification des plaies chroniques — démo AZH 4 classes",
55
+ }
56
+
57
+ DESCRIPTION: dict[str, str] = {
58
+ "en": """\
59
+ Upload a close-up photo of a chronic wound and the model returns its best guess at the wound type
60
+ (diabetic ulcer, pressure ulcer, surgical wound, or venous ulcer) with per-class probabilities.
61
+
62
+ **Before you upload — a few things worth knowing:**
63
+
64
+ - **Frame the wound in the center.** The model resizes and center-crops to 224×224 pixels, so a wound
65
+ off in the corner of a high-resolution phone photo will get cropped out. Close-up clinical-style
66
+ framing (the wound fills most of the frame) gives the cleanest predictions.
67
+ - **JPEG or PNG works fine.** Anything PIL can open.
68
+ - **There is no "not a wound" option.** Upload a face, a sandwich, a sunset — the model will
69
+ confidently place it into one of the four wound classes anyway. The low-confidence warning below
70
+ is the only signal you'll get that the image probably isn't what the model was trained on.
71
+ - **Pressure-ulcer predictions are weakest** (~41% accuracy on the held-out test set). When the model
72
+ says "Pressure ulcer," treat the call with extra skepticism.
73
+
74
+ **This is a research demo, not a medical device.** It does not diagnose, triage, or replace clinician
75
+ judgement. Trained on the public AZH Chronic Wound Database (Anisuzzaman et al. 2022) using
76
+ EfficientNet-B0 with patient-grouped 10-fold cross-validation; the deployed checkpoint
77
+ (`cv_baseline_fold5_best.pt`) reaches **0.8152 top-1** on the held-out AZH Test set (n=184).
78
+ """,
79
+ "fr": """\
80
+ Téléversez une photo en gros plan d'une plaie chronique : le modèle propose son meilleur diagnostic
81
+ parmi quatre types de plaies (ulcère diabétique, escarre, plaie chirurgicale, ulcère veineux) avec
82
+ les probabilités par classe.
83
+
84
+ **À lire avant de téléverser :**
85
+
86
+ - **Cadrez la plaie au centre de l'image.** Le modèle redimensionne et recadre au centre à 224×224 pixels :
87
+ une plaie située dans un coin d'une photo haute résolution sera coupée. Un cadrage clinique rapproché
88
+ (la plaie occupe la majeure partie du cadre) donne les prédictions les plus propres.
89
+ - **JPEG ou PNG conviennent.** Tout format que PIL sait ouvrir.
90
+ - **Il n'y a pas d'option « ce n'est pas une plaie ».** Téléversez un visage, un sandwich, un coucher
91
+ de soleil : le modèle classera l'image dans l'une des quatre classes de plaies, et avec confiance.
92
+ L'avertissement de faible confiance ci-dessous est le seul signal indiquant que l'image n'est
93
+ probablement pas conforme aux données d'entraînement.
94
+ - **Les prédictions d'escarre sont les moins fiables** (~41 % d'exactitude sur le jeu de test).
95
+ Lorsque le modèle indique « Escarre », à interpréter avec une prudence accrue.
96
+
97
+ **Ceci est une démonstration de recherche, et non un dispositif médical.** Le modèle n'établit aucun
98
+ diagnostic, ne fait pas de triage et ne remplace en rien le jugement clinique. Entraîné sur la base
99
+ de données publique AZH Chronic Wound Database (Anisuzzaman et coll. 2022) avec EfficientNet-B0 et
100
+ validation croisée à 10 plis groupés par patient ; le point de contrôle déployé
101
+ (`cv_baseline_fold5_best.pt`) atteint **0,8152 top-1** sur le jeu de test AZH retenu (n=184).
102
+ """,
103
+ }
104
+
105
+ ARTICLE: dict[str, str] = {
106
+ "en": """\
107
+ **Methodology in one paragraph.** EfficientNet-B0 (ImageNet-pretrained) two-phase fine-tuning:
108
+ phase 1 freezes the backbone and trains the 4-class head for 5 epochs at lr=1e-3; phase 2 unfreezes
109
+ and fine-tunes the full network for 15 epochs at lr=1e-4. Patient-grouped 10-fold CV ensures the same
110
+ patient's images never appear in both train and val. Training and validation under random-stratified
111
+ splits would inflate accuracy by ~3–30pp on AZH (we measured this directly); we report the patient-
112
+ grouped numbers as the honest evaluation. The shipped checkpoint is the highest single fold on the
113
+ held-out Test set; a 10-fold soft-vote ensemble averages 0.7989 on the same set.
114
+
115
+ **Out of scope.** Not for clinical decision-making. No claim of diagnostic accuracy on real patient
116
+ cohorts. No fairness audit across skin tones (known gap). No mobile / offline build.
117
+
118
+ **Citation.** Anisuzzaman et al. 2022, *Multi-modal wound classification using wound image and
119
+ location by deep neural network*, Sci. Rep. 12:20057.
120
+ """,
121
+ "fr": """\
122
+ **Méthodologie en un paragraphe.** EfficientNet-B0 (préentraîné sur ImageNet), affinement en deux
123
+ phases : la phase 1 gèle le tronc et entraîne la tête à 4 classes pendant 5 époques (lr=1e-3) ; la
124
+ phase 2 dégèle et affine le réseau complet pendant 15 époques (lr=1e-4). La validation croisée à
125
+ 10 plis groupés par patient garantit que les images d'un même patient n'apparaissent jamais à la
126
+ fois en entraînement et en validation. Les découpes aléatoires stratifiées (sans regroupement par
127
+ patient) gonflent l'exactitude de ~3 à 30 pp sur AZH (mesuré directement) ; nous rapportons les
128
+ chiffres groupés par patient comme évaluation honnête. Le point de contrôle déployé est la meilleure
129
+ fold individuelle sur le jeu de test retenu ; un ensemble par vote doux à 10 plis atteint en moyenne
130
+ 0,7989 sur ce même jeu.
131
+
132
+ **Hors champ.** Aucun usage de décision clinique. Aucune prétention à une exactitude diagnostique
133
+ sur de vraies cohortes de patients. Aucun audit d'équité par teinte de peau (limite connue). Aucune
134
+ version mobile ou hors ligne.
135
+
136
+ **Référence.** Anisuzzaman et coll. 2022, *Multi-modal wound classification using wound image and
137
+ location by deep neural network*, Sci. Rep. 12:20057.
138
+ """,
139
+ }
140
+
141
+ LABELS: dict[str, dict[str, str]] = {
142
+ "en": {
143
+ "lang_radio": "Language / Langue",
144
+ "image_input": "Wound photograph (close-up, centered)",
145
+ "label_output": "Predicted wound type",
146
+ "notes_output": "Notes",
147
+ "submit": "Classify",
148
+ "clear": "Clear",
149
+ },
150
+ "fr": {
151
+ "lang_radio": "Language / Langue",
152
+ "image_input": "Photographie de la plaie (gros plan, centrée)",
153
+ "label_output": "Type de plaie prédit",
154
+ "notes_output": "Remarques",
155
+ "submit": "Classer",
156
+ "clear": "Effacer",
157
+ },
158
+ }
159
+
160
+ NOTE_LOW_CONFIDENCE: dict[str, str] = {
161
+ "en": (
162
+ "⚠️ **Low confidence** (top class {top_label} at {top_pct}). "
163
+ "This often means the photo isn't a clear close-up of a wound, or the wound type "
164
+ 'isn\'t one of the four the model was trained on. The model has no "not a wound" '
165
+ "option — it will always pick one of D / P / S / V even when the image isn't a wound."
166
+ ),
167
+ "fr": (
168
+ "⚠️ **Faible confiance** (classe principale {top_label} à {top_pct}). "
169
+ "Cela indique souvent que la photo n'est pas un gros plan clair d'une plaie, ou "
170
+ "que le type de plaie ne fait pas partie des quatre sur lesquels le modèle a été entraîné. "
171
+ "Le modèle n'a pas d'option « ce n'est pas une plaie » — il choisira toujours D / P / S / V "
172
+ "même si l'image n'est pas une plaie."
173
+ ),
174
+ }
175
+
176
+ NOTE_PRESSURE: dict[str, str] = {
177
+ "en": (
178
+ "⚠️ **Pressure-class predictions are the model's weak spot** "
179
+ "(~0.41 accuracy on the held-out test set). Treat this prediction with extra skepticism."
180
+ ),
181
+ "fr": (
182
+ "⚠️ **Les prédictions d'escarre sont le point faible du modèle** "
183
+ "(~0,41 d'exactitude sur le jeu de test retenu). Interpréter cette prédiction avec une "
184
+ "prudence accrue."
185
+ ),
186
+ }
187
+
188
+
189
+ # ---------- Model loading -------------------------------------------------------
190
+
191
 
192
  def _build_model(num_classes: int = 4) -> nn.Module:
193
  model: nn.Module = efficientnet_b0(weights=None)
 
222
  TRANSFORM = _build_transform()
223
 
224
 
225
+ def _lang_code(choice: str) -> str:
226
+ return "fr" if choice == "Français" else "en"
227
 
228
 
229
+ def _format_pct(value: float, lang: str) -> str:
230
+ pct = f"{value:.0%}"
231
+ # Use French non-breaking space + lowercase percent? Standard French formatting.
232
+ return pct.replace(".", ",") if lang == "fr" else pct
233
+
234
+
235
+ def classify(image: Image.Image | None, language_choice: str) -> tuple[dict[str, float], str]:
236
  if image is None:
237
  return {}, ""
238
+ lang = _lang_code(language_choice)
239
  rgb = image.convert("RGB")
240
  x = TRANSFORM(rgb).unsqueeze(0)
241
  with torch.inference_mode():
242
  logits = MODEL(x)
243
  probs = torch.softmax(logits, dim=1).squeeze(0).numpy()
244
+
245
+ name_map = CLASS_NAMES[lang]
246
+ label_probs = {name_map[IDX_TO_CLASS[i]]: float(probs[i]) for i in range(4)}
247
 
248
  top_label, top_prob = max(label_probs.items(), key=lambda kv: kv[1])
249
+ top_letter = next(letter for letter, name in name_map.items() if name == top_label)
250
+
251
  notes: list[str] = []
252
  if top_prob < LOW_CONFIDENCE_THRESHOLD:
253
  notes.append(
254
+ NOTE_LOW_CONFIDENCE[lang].format(
255
+ top_label=top_label, top_pct=_format_pct(top_prob, lang)
256
+ )
 
 
 
 
 
 
257
  )
258
+ if top_letter == "P":
259
+ notes.append(NOTE_PRESSURE[lang])
260
+
261
  return label_probs, "\n\n".join(notes)
262
 
263
 
264
+ # ---------- UI ------------------------------------------------------------------
 
 
265
 
266
+ # Custom theme using Hôpital Montfort brand colors (extracted from their stylesheet):
267
+ # primary "Montfort blue" #00729a, turquoise accent #47c9cd, warm cream #f1ede5.
268
+ montfort_blue = gr.themes.Color(
269
+ name="montfort_blue",
270
+ c50="#eef7fa",
271
+ c100="#c6eafa",
272
+ c200="#9bd9ed",
273
+ c300="#6ec5dd",
274
+ c400="#3aa5c4",
275
+ c500="#00729a",
276
+ c600="#005f81",
277
+ c700="#004d68",
278
+ c800="#003a4f",
279
+ c900="#002836",
280
+ c950="#001a25",
281
+ )
282
+ montfort_turquoise = gr.themes.Color(
283
+ name="montfort_turquoise",
284
+ c50="#e6fbfb",
285
+ c100="#c6f4f5",
286
+ c200="#9eeaeb",
287
+ c300="#73dde0",
288
+ c400="#47c9cd",
289
+ c500="#23b6ba",
290
+ c600="#1a9498",
291
+ c700="#147576",
292
+ c800="#0f5859",
293
+ c900="#0a3c3d",
294
+ c950="#062323",
295
+ )
296
 
297
+ theme = gr.themes.Soft(
298
+ primary_hue=montfort_blue,
299
+ secondary_hue=montfort_turquoise,
300
+ neutral_hue=gr.themes.colors.stone,
301
+ font=[gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"],
302
+ ).set(
303
+ body_background_fill="#f1ede5",
304
+ block_background_fill="white",
305
+ button_primary_background_fill="#00729a",
306
+ button_primary_background_fill_hover="#005f81",
307
+ button_primary_text_color="white",
308
+ )
309
 
 
 
 
 
 
310
 
311
+ def _localize_components(
312
+ language_choice: str,
313
+ ) -> tuple[gr.Markdown, gr.Markdown, gr.Image, gr.Label, gr.Markdown, gr.Button, gr.Button]:
314
+ lang = _lang_code(language_choice)
315
+ labels = LABELS[lang]
316
+ return (
317
+ gr.Markdown(value=TITLE[lang]),
318
+ gr.Markdown(value=DESCRIPTION[lang]),
319
+ gr.Image(label=labels["image_input"]),
320
+ gr.Label(label=labels["label_output"]),
321
+ gr.Markdown(value="", label=labels["notes_output"]),
322
+ gr.Button(value=labels["submit"]),
323
+ gr.Button(value=labels["clear"]),
324
+ )
325
 
 
 
326
 
327
+ with gr.Blocks(theme=theme, title="Chronic Wound Classifier · Hôpital Montfort demo") as demo:
328
+ language_radio = gr.Radio(
329
+ choices=["English", "Français"],
330
+ value="English",
331
+ label=LABELS["en"]["lang_radio"],
332
+ interactive=True,
333
+ )
334
 
335
+ title_md = gr.Markdown(TITLE["en"])
336
+ description_md = gr.Markdown(DESCRIPTION["en"])
337
 
338
+ with gr.Row():
339
+ with gr.Column():
340
+ image_input = gr.Image(type="pil", label=LABELS["en"]["image_input"])
341
+ with gr.Row():
342
+ submit_btn = gr.Button(LABELS["en"]["submit"], variant="primary")
343
+ clear_btn = gr.Button(LABELS["en"]["clear"])
344
+ with gr.Column():
345
+ label_output = gr.Label(num_top_classes=4, label=LABELS["en"]["label_output"])
346
+ notes_output = gr.Markdown(label=LABELS["en"]["notes_output"])
347
+
348
+ article_md = gr.Markdown(ARTICLE["en"])
349
+
350
+ submit_btn.click(
351
+ classify,
352
+ inputs=[image_input, language_radio],
353
+ outputs=[label_output, notes_output],
354
+ )
355
+ image_input.change(
356
+ classify,
357
+ inputs=[image_input, language_radio],
358
+ outputs=[label_output, notes_output],
359
+ )
360
+ clear_btn.click(
361
+ lambda: (None, {}, ""),
362
+ inputs=[],
363
+ outputs=[image_input, label_output, notes_output],
364
+ )
365
+
366
+ def _on_language_change(
367
+ language_choice: str, current_image: Image.Image | None
368
+ ) -> tuple[dict, dict, dict, dict, dict, dict, dict, str]:
369
+ lang = _lang_code(language_choice)
370
+ labels = LABELS[lang]
371
+ # Re-run inference so the on-screen probability labels switch languages too.
372
+ new_probs, new_notes = classify(current_image, language_choice)
373
+ return (
374
+ gr.update(value=TITLE[lang]),
375
+ gr.update(value=DESCRIPTION[lang]),
376
+ gr.update(value=ARTICLE[lang]),
377
+ gr.update(label=labels["image_input"]),
378
+ gr.update(label=labels["label_output"], value=new_probs),
379
+ gr.update(value=labels["submit"]),
380
+ gr.update(value=labels["clear"]),
381
+ new_notes,
382
+ )
383
+
384
+ language_radio.change(
385
+ _on_language_change,
386
+ inputs=[language_radio, image_input],
387
+ outputs=[
388
+ title_md,
389
+ description_md,
390
+ article_md,
391
+ image_input,
392
+ label_output,
393
+ submit_btn,
394
+ clear_btn,
395
+ notes_output,
396
+ ],
397
+ )
398
 
399
 
400
  if __name__ == "__main__":