nusaibah0110 commited on
Commit
925c34c
·
1 Parent(s): 5dcf9d6

NewModeladded

Browse files
backend/Colpo/inference.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ============================================================
2
+ # Colposcopy Inference Backend
3
+ # Production-ready | VS Code | Hugging Face compatible
4
+ # ============================================================
5
+
6
+ import os
7
+ import cv2
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn as nn
11
+ import joblib
12
+ from torchvision import transforms, models
13
+ from PIL import Image
14
+
15
+ # ------------------------------------------------------------
16
+ # DEVICE
17
+ # ------------------------------------------------------------
18
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
+
20
+ # ------------------------------------------------------------
21
+ # PATHS (RELATIVE — REQUIRED FOR DEPLOYMENT)
22
+ # ------------------------------------------------------------
23
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
24
+ MODEL_DIR = os.path.join(BASE_DIR, "models")
25
+ OUTPUT_DIR = os.path.join(BASE_DIR, "outputs")
26
+
27
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
28
+
29
+ SEG_MODEL_PATH = os.path.join(MODEL_DIR, "seg_yolov8n_best.pt")
30
+ FUSION_MODEL_PATH = os.path.join(MODEL_DIR, "fusion_model.pth")
31
+ CLF_PATH = os.path.join(MODEL_DIR, "logreg_classifier.joblib")
32
+
33
+ # ------------------------------------------------------------
34
+ # LOAD MODELS (ONCE)
35
+ # ------------------------------------------------------------
36
+ from ultralytics import YOLO
37
+
38
+ seg_model = YOLO(SEG_MODEL_PATH)
39
+ clf = joblib.load(CLF_PATH)
40
+
41
+ # ------------------------------------------------------------
42
+ # FUSION MODEL DEFINITION
43
+ # ------------------------------------------------------------
44
+ class ImageEncoder(nn.Module):
45
+ def __init__(self):
46
+ super().__init__()
47
+ base = models.resnet18(pretrained=False)
48
+ self.backbone = nn.Sequential(*list(base.children())[:-1])
49
+ self.fc = nn.Linear(512, 512)
50
+
51
+ def forward(self, x):
52
+ x = self.backbone(x)
53
+ return self.fc(x.view(x.size(0), -1))
54
+
55
+
56
+ class FeatureEncoder(nn.Module):
57
+ def __init__(self):
58
+ super().__init__()
59
+ self.net = nn.Sequential(
60
+ nn.Linear(7, 64),
61
+ nn.ReLU(),
62
+ nn.Linear(64, 64)
63
+ )
64
+
65
+ def forward(self, x):
66
+ return self.net(x)
67
+
68
+
69
+ class FusionModel(nn.Module):
70
+ def __init__(self):
71
+ super().__init__()
72
+ self.img_enc = ImageEncoder()
73
+ self.feat_enc = FeatureEncoder()
74
+ self.norm = nn.BatchNorm1d(576)
75
+
76
+ def forward(self, img, feat):
77
+ img_emb = self.img_enc(img)
78
+ feat_emb = self.feat_enc(feat)
79
+ return self.norm(torch.cat([img_emb, feat_emb], dim=1))
80
+
81
+
82
+ fusion_model = FusionModel().to(device)
83
+ fusion_model.load_state_dict(torch.load(FUSION_MODEL_PATH, map_location=device))
84
+ fusion_model.eval()
85
+
86
+ # ------------------------------------------------------------
87
+ # IMAGE TRANSFORM
88
+ # ------------------------------------------------------------
89
+ transform = transforms.Compose([
90
+ transforms.Resize((224, 224)),
91
+ transforms.ToTensor()
92
+ ])
93
+
94
+ # ------------------------------------------------------------
95
+ # CONSTANTS
96
+ # ------------------------------------------------------------
97
+ CERVIX_ID = 0
98
+ SCJ_ID = 1
99
+ ACET_ID = 3
100
+ MIN_ACET_RATIO = 0.01
101
+
102
+ # ------------------------------------------------------------
103
+ # GEOMETRY UTILITIES
104
+ # ------------------------------------------------------------
105
+ def polygon_to_mask(polygon, H, W):
106
+ pts = np.array([[int(x * W), int(y * H)] for x, y in polygon], np.int32)
107
+ mask = np.zeros((H, W), dtype=np.uint8)
108
+ cv2.fillPoly(mask, [pts], 1)
109
+ return mask
110
+
111
+
112
+ def mask_area(mask):
113
+ return mask.sum() / mask.size
114
+
115
+
116
+ def centroid_distance(mask1, mask2):
117
+ if mask2 is None:
118
+ return 1.0
119
+
120
+ ys1, xs1 = np.where(mask1 == 1)
121
+ ys2, xs2 = np.where(mask2 == 1)
122
+
123
+ if len(xs1) == 0 or len(xs2) == 0:
124
+ return 1.0
125
+
126
+ c1 = np.array([xs1.mean(), ys1.mean()])
127
+ c2 = np.array([xs2.mean(), ys2.mean()])
128
+
129
+ return np.linalg.norm(c1 - c2) / max(mask1.shape)
130
+
131
+
132
+ def overlap_ratio(mask1, mask2):
133
+ if mask2 is None:
134
+ return 0.0
135
+ inter = np.logical_and(mask1, mask2).sum()
136
+ return inter / mask1.sum() if mask1.sum() > 0 else 0.0
137
+
138
+ # ------------------------------------------------------------
139
+ # LOAD YOLO POLYGONS
140
+ # ------------------------------------------------------------
141
+ def load_yolo_segmentation(label_path):
142
+ objects = []
143
+ if not os.path.exists(label_path):
144
+ return objects
145
+
146
+ with open(label_path) as f:
147
+ for line in f:
148
+ parts = list(map(float, line.strip().split()))
149
+ cls = int(parts[0])
150
+ coords = parts[1:]
151
+ polygon = [(coords[i], coords[i + 1]) for i in range(0, len(coords), 2)]
152
+ objects.append({"cls": cls, "polygon": polygon})
153
+ return objects
154
+
155
+ # ------------------------------------------------------------
156
+ # FEATURE EXTRACTION
157
+ # ------------------------------------------------------------
158
+ def extract_features_from_label(label_path, H, W):
159
+ objects = load_yolo_segmentation(label_path)
160
+
161
+ cervix_masks, scj_masks, acet_masks = [], [], []
162
+
163
+ for obj in objects:
164
+ m = polygon_to_mask(obj["polygon"], H, W)
165
+ if obj["cls"] == CERVIX_ID:
166
+ cervix_masks.append(m)
167
+ elif obj["cls"] == SCJ_ID:
168
+ scj_masks.append(m)
169
+ elif obj["cls"] == ACET_ID:
170
+ acet_masks.append(m)
171
+
172
+ cervix = max(cervix_masks, key=lambda m: m.sum()) if cervix_masks else np.zeros((H, W))
173
+ scj = max(scj_masks, key=lambda m: m.sum()) if scj_masks else None
174
+
175
+ cervix_area = mask_area(cervix)
176
+
177
+ acet_union = np.zeros((H, W), dtype=np.uint8)
178
+ for m in acet_masks:
179
+ acet_union = np.maximum(acet_union, m)
180
+
181
+ acet_union = acet_union * cervix
182
+
183
+ if acet_union.sum() > 0:
184
+ acet_union = cv2.morphologyEx(
185
+ acet_union, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8)
186
+ )
187
+
188
+ acet_area = mask_area(acet_union)
189
+ acet_present = int(cervix_area > 0 and acet_area / cervix_area >= MIN_ACET_RATIO)
190
+
191
+ if acet_present:
192
+ dist_acet_scj = centroid_distance(acet_union, scj)
193
+ lesion_center_dist = centroid_distance(acet_union, cervix)
194
+ overlap_lesion_scj = overlap_ratio(acet_union, scj)
195
+ else:
196
+ dist_acet_scj = lesion_center_dist = 1.0
197
+ overlap_lesion_scj = 0.0
198
+
199
+ return torch.tensor([
200
+ acet_present,
201
+ 1 if acet_present else 0,
202
+ acet_area if acet_present else 0.0,
203
+ acet_area / cervix_area if acet_present else 0.0,
204
+ dist_acet_scj,
205
+ lesion_center_dist,
206
+ overlap_lesion_scj
207
+ ], dtype=torch.float32)
208
+
209
+ # ------------------------------------------------------------
210
+ # SAVE VISUALIZATION FOR UI
211
+ # ------------------------------------------------------------
212
+ def save_overlay(image_path, label_path, out_path):
213
+ image = np.array(Image.open(image_path).convert("RGB"))
214
+ H, W, _ = image.shape
215
+
216
+ objects = load_yolo_segmentation(label_path)
217
+
218
+ cervix = np.zeros((H, W))
219
+ scj = np.zeros((H, W))
220
+ acet = np.zeros((H, W))
221
+
222
+ for obj in objects:
223
+ m = polygon_to_mask(obj["polygon"], H, W)
224
+ if obj["cls"] == CERVIX_ID:
225
+ cervix = np.maximum(cervix, m)
226
+ elif obj["cls"] == SCJ_ID:
227
+ scj = np.maximum(scj, m)
228
+ elif obj["cls"] == ACET_ID:
229
+ acet = np.maximum(acet, m)
230
+
231
+ overlay = image.copy()
232
+ overlay[cervix == 1] = 0.7 * overlay[cervix == 1] + 0.3 * np.array([0, 0, 255])
233
+ overlay[scj == 1] = 0.7 * overlay[scj == 1] + 0.3 * np.array([0, 255, 0])
234
+ overlay[acet == 1] = 0.7 * overlay[acet == 1] + 0.3 * np.array([255, 0, 0])
235
+
236
+ Image.fromarray(overlay.astype(np.uint8)).save(out_path)
237
+
238
+ # ------------------------------------------------------------
239
+ # PUBLIC API — UI CALLS THIS
240
+ # ------------------------------------------------------------
241
+ def run_inference(image_path: str) -> dict:
242
+ results = seg_model(image_path, conf=0.15, save_txt=True, save=False)
243
+
244
+ save_dir = results[0].save_dir
245
+ name = os.path.splitext(os.path.basename(image_path))[0]
246
+ label_path = os.path.join(save_dir, "labels", f"{name}.txt")
247
+
248
+ if not os.path.exists(label_path):
249
+ return {"decision": "Segmentation failed"}
250
+
251
+ image = Image.open(image_path).convert("RGB")
252
+ W, H = image.size
253
+
254
+ img_tensor = transform(image).unsqueeze(0).to(device)
255
+ feat = extract_features_from_label(label_path, H, W)
256
+ feat_tensor = feat.unsqueeze(0).to(device)
257
+
258
+ with torch.no_grad():
259
+ embedding = fusion_model(img_tensor, feat_tensor)
260
+
261
+ prob = clf.predict_proba(embedding.cpu().numpy())[0, 1]
262
+ acet_present = int(feat[0].item())
263
+
264
+ if acet_present == 0:
265
+ decision = "Low-confidence normal (no acet detected)" if prob < 0.2 else "Uncertain – lesion may be subtle"
266
+ else:
267
+ decision = "Likely Normal" if prob < 0.2 else "Borderline – Review" if prob < 0.5 else "Likely Abnormal"
268
+
269
+ overlay_path = os.path.join(OUTPUT_DIR, f"{name}_overlay.png")
270
+ save_overlay(image_path, label_path, overlay_path)
271
+
272
+ return {
273
+ "decision": decision,
274
+ "probability_abnormal": float(prob),
275
+ "acet_present": acet_present,
276
+ "overlay_image": overlay_path
277
+ }
backend/Colpo/models/fusion_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a725cea39895cb3f0acb035157bee91436e37e2da56798736f8f213d38a18e3b
3
+ size 45867942
backend/Colpo/models/logreg_classifier.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e417aa94c90767a2edf6cc1b77a9511f8574eb659ce9fa5a769d8b64ada4b3a7
3
+ size 5487
backend/Colpo/models/seg_yolov8n_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fea0cf93675dca9af1798b18a1da5782cd80ac1087bd4b1e89bfa54364e187f
3
+ size 6788084
backend/app.py CHANGED
@@ -11,6 +11,7 @@ os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
11
  os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
12
 
13
  import json
 
14
  import uuid
15
  import datetime
16
  import numpy as np
@@ -279,6 +280,22 @@ except Exception as e:
279
 
280
  yolo_colposcopy = YOLO("yolo_colposcopy.pt")
281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
  # =====================================================
283
 
284
  # RESNET FEATURE EXTRACTORS FOR CIN
@@ -431,10 +448,10 @@ async def predict(model_name: str = Form(...), file: UploadFile = File(...)):
431
  print(f"Received prediction request - model: {model_name}, file: {file.filename}")
432
 
433
  # Validate model name
434
- if model_name not in ["yolo", "mwt", "cin", "histopathology"]:
435
  return JSONResponse(
436
  content={
437
- "error": f"Invalid model_name: {model_name}. Must be one of: yolo, mwt, cin, histopathology"
438
  },
439
  status_code=400
440
  )
@@ -620,9 +637,63 @@ async def predict(model_name: str = Form(...), file: UploadFile = File(...)):
620
  )
621
 
622
  return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
623
  elif model_name == "histopathology":
624
- result = predict_histopathology(image)
625
- return result
626
 
627
 
628
  else:
 
11
  os.environ["YOLO_CONFIG_DIR"] = "/tmp/Ultralytics"
12
 
13
  import json
14
+ import importlib.util
15
  import uuid
16
  import datetime
17
  import numpy as np
 
280
 
281
  yolo_colposcopy = YOLO("yolo_colposcopy.pt")
282
 
283
+ # Load the Manalife Pathora colposcopy fusion pipeline
284
+ COLPO_INFERENCE_PATH = os.path.join(os.path.dirname(__file__), "Colpo", "inference.py")
285
+ colpo_inference = None
286
+
287
+ try:
288
+ spec = importlib.util.spec_from_file_location("colpo_inference", COLPO_INFERENCE_PATH)
289
+ if spec and spec.loader:
290
+ colpo_inference = importlib.util.module_from_spec(spec)
291
+ spec.loader.exec_module(colpo_inference)
292
+ print("✅ Loaded Manalife Pathora colposcopy inference module.")
293
+ else:
294
+ raise ImportError("Invalid import spec for Colpo inference module")
295
+ except Exception as e:
296
+ colpo_inference = None
297
+ print(f"⚠️ Could not load Manalife Pathora colposcopy inference: {e}")
298
+
299
  # =====================================================
300
 
301
  # RESNET FEATURE EXTRACTORS FOR CIN
 
448
  print(f"Received prediction request - model: {model_name}, file: {file.filename}")
449
 
450
  # Validate model name
451
+ if model_name not in ["yolo", "mwt", "cin", "histopathology", "manalife_pathora_model"]:
452
  return JSONResponse(
453
  content={
454
+ "error": f"Invalid model_name: {model_name}. Must be one of: yolo, mwt, cin, histopathology, manalife_pathora_model"
455
  },
456
  status_code=400
457
  )
 
637
  )
638
 
639
  return response
640
+ elif model_name == "manalife_pathora_model":
641
+ if colpo_inference is None or not hasattr(colpo_inference, "run_inference"):
642
+ return JSONResponse(
643
+ content={"error": "Pathora colposcopy model not available on server."},
644
+ status_code=503,
645
+ )
646
+
647
+ # Save the incoming image to disk for the fusion pipeline
648
+ input_name = f"colpo_input_{uuid.uuid4().hex[:8]}.png"
649
+ input_path = os.path.join(IMAGES_DIR, input_name)
650
+ with open(input_path, "wb") as f:
651
+ f.write(contents)
652
+
653
+ try:
654
+ fusion_result = colpo_inference.run_inference(input_path)
655
+ except Exception as e:
656
+ return JSONResponse(
657
+ content={"error": f"Colposcopy fusion inference failed: {e}"},
658
+ status_code=500,
659
+ )
660
+
661
+ prob_abn = float(fusion_result.get("probability_abnormal", 0.0))
662
+ acet_present = int(fusion_result.get("acet_present", 0))
663
+ decision_text = fusion_result.get("decision", "Decision unavailable")
664
+
665
+ overlay_src = fusion_result.get("overlay_image")
666
+ overlay_url = None
667
+ if overlay_src and os.path.isfile(overlay_src):
668
+ overlay_name = f"colpo_overlay_{uuid.uuid4().hex[:8]}.png"
669
+ overlay_dst = os.path.join(IMAGES_DIR, overlay_name)
670
+ try:
671
+ shutil.copy(overlay_src, overlay_dst)
672
+ overlay_url = f"/outputs/images/{overlay_name}"
673
+ except Exception as copy_err:
674
+ print(f"⚠️ Failed to copy overlay image: {copy_err}")
675
+
676
+ # Fallback: expose the raw input if overlay is missing
677
+ if not overlay_url:
678
+ overlay_url = f"/outputs/images/{input_name}"
679
+
680
+ return {
681
+ "model_used": "Manalife_Pathora_model",
682
+ "decision": decision_text,
683
+ "probability_abnormal": round(prob_abn, 3),
684
+ "acet_present": acet_present,
685
+ "annotated_image_url": overlay_url,
686
+ "summary": {
687
+ "model_used": "Manalife_Pathora_model",
688
+ "decision": decision_text,
689
+ "probability_abnormal": round(prob_abn, 3),
690
+ "acet_present": "Yes" if acet_present else "No",
691
+ "ai_interpretation": decision_text,
692
+ },
693
+ }
694
  elif model_name == "histopathology":
695
+ result = predict_histopathology(image)
696
+ return result
697
 
698
 
699
  else:
frontend/src/components/ResultsPanel.tsx CHANGED
@@ -80,6 +80,10 @@ export function ResultsPanel({ uploadedImage, result, loading }: ResultsPanelPro
80
  confidence,
81
  } = (result || {}) as any;
82
 
 
 
 
 
83
  const handleDownload = () => {
84
  if (annotated_image_url) {
85
  const link = document.createElement("a");
@@ -145,7 +149,7 @@ export function ResultsPanel({ uploadedImage, result, loading }: ResultsPanelPro
145
  {/* Summary Section - model-specific rendering (colposcopy, cytology, histopathology) */}
146
  {summary && (() => {
147
  const model = (model_used || "").toString();
148
- const isColpo = /colpo|colposcopy/i.test(model);
149
  const isCyto = /cyto|cytology/i.test(model);
150
  const isHistoLike = /mwt|cin|histopath/i.test(model);
151
 
@@ -154,13 +158,31 @@ export function ResultsPanel({ uploadedImage, result, loading }: ResultsPanelPro
154
  const pred = (summary.prediction || summary.result || "").toString().toLowerCase();
155
  const isAbnormal = abnormalCount > 0 || /abnormal|positive|high-grade|malignant/.test(pred);
156
 
157
- // Colposcopy: show only Abnormal / Normal (based on abnormal_cells count or prediction)
158
  if (isColpo) {
 
 
 
 
 
 
 
 
 
 
159
  return (
160
  <div className="bg-gray-50 p-4 rounded-lg mb-6">
161
  <h3 className="text-lg font-semibold text-gray-800 mb-2">AI Summary</h3>
162
  <p className="text-gray-700 text-sm">
163
- <strong>Result:</strong> {isAbnormal ? "Abnormal" : "Normal"}
 
 
 
 
 
 
 
 
164
  </p>
165
  <div className="mt-3 text-gray-800 text-sm italic border-t pt-2">
166
  {summary.ai_interpretation || "No AI interpretation available."}
 
80
  confidence,
81
  } = (result || {}) as any;
82
 
83
+ const decisionText = String(result?.decision ?? summary?.decision ?? "").trim();
84
+ const probabilityAbnormal = (result?.probability_abnormal ?? summary?.probability_abnormal) as any;
85
+ const acetPresentFlag = result?.acet_present ?? summary?.acet_present;
86
+
87
  const handleDownload = () => {
88
  if (annotated_image_url) {
89
  const link = document.createElement("a");
 
149
  {/* Summary Section - model-specific rendering (colposcopy, cytology, histopathology) */}
150
  {summary && (() => {
151
  const model = (model_used || "").toString();
152
+ const isColpo = /colpo|colposcopy|pathora/i.test(model);
153
  const isCyto = /cyto|cytology/i.test(model);
154
  const isHistoLike = /mwt|cin|histopath/i.test(model);
155
 
 
158
  const pred = (summary.prediction || summary.result || "").toString().toLowerCase();
159
  const isAbnormal = abnormalCount > 0 || /abnormal|positive|high-grade|malignant/.test(pred);
160
 
161
+ // Colposcopy: render decision + probability + acet status
162
  if (isColpo) {
163
+ const probVal =
164
+ probabilityAbnormal === null || typeof probabilityAbnormal === "undefined"
165
+ ? null
166
+ : Number(probabilityAbnormal);
167
+ const probText = probVal === null || Number.isNaN(probVal) ? null : probVal.toFixed(3);
168
+ const acetLabel =
169
+ typeof acetPresentFlag === "undefined" || acetPresentFlag === null
170
+ ? "Unknown"
171
+ : Number(acetPresentFlag) ? "Yes" : "No";
172
+ const decision = decisionText || (isAbnormal ? "Abnormal" : "Normal");
173
  return (
174
  <div className="bg-gray-50 p-4 rounded-lg mb-6">
175
  <h3 className="text-lg font-semibold text-gray-800 mb-2">AI Summary</h3>
176
  <p className="text-gray-700 text-sm">
177
+ <strong>Decision:</strong> {decision}
178
+ {probText && (
179
+ <>
180
+ <br />
181
+ <strong>Probability of abnormality:</strong> {probText}
182
+ </>
183
+ )}
184
+ <br />
185
+ <strong>Acet present:</strong> {acetLabel}
186
  </p>
187
  <div className="mt-3 text-gray-800 text-sm italic border-t pt-2">
188
  {summary.ai_interpretation || "No AI interpretation available."}
frontend/src/components/UploadSection.tsx CHANGED
@@ -29,6 +29,7 @@ export function UploadSection({
29
  ],
30
  colposcopy: [
31
  { value: 'cin', label: 'Manalife_MaANIA_Colpo' },
 
32
 
33
  ],
34
  histopathology: [
 
29
  ],
30
  colposcopy: [
31
  { value: 'cin', label: 'Manalife_MaANIA_Colpo' },
32
+ { value: 'manalife_pathora_model', label: 'Manalife_Pathora_model' },
33
 
34
  ],
35
  histopathology: [