ciaochris commited on
Commit
7ac0c24
·
verified ·
1 Parent(s): fdbbb03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -52
app.py CHANGED
@@ -1,68 +1,93 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
- from torch.nn import functional as F
5
- import pandas as pd
 
6
 
7
- # Load base model for inference
8
- tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
9
- model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased", num_labels=2)
10
- labels = ['tech', 'health']
11
 
12
- def zero_shot_infer(text):
13
- inputs = tokenizer(text, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  with torch.no_grad():
15
- logits = model(**inputs).logits
16
- probs = F.softmax(logits, dim=1)
17
- result = {labels[i]: float(probs[0][i]) for i in range(len(labels))}
18
- return result
19
 
20
- # Initialize training data storage
21
- training_data = []
 
 
 
 
 
22
 
23
- def train_step(text, label, lr, momentum):
24
- training_data.append((text, label))
25
- return f"Stored: '{text}' as '{label}' | Total examples: {len(training_data)}"
26
 
27
- def fine_tune_model():
28
- if len(training_data) < 4:
29
- return "Need more training samples (min: 4)."
30
- df = pd.DataFrame(training_data, columns=['text', 'label'])
31
- # This placeholder suggests fine-tuning with a proper pipeline externally
32
- return "Training initiated on backend. This version supports frontend data collection only."
 
 
 
 
 
 
 
 
 
 
33
 
34
- def conscious_infer(text):
35
- output = zero_shot_infer(text)
36
- max_label = max(output, key=output.get)
37
- confidence = output[max_label]
38
- # Simulate conscious inference via contextual intuition
39
- reflection = "This concept resonates with techno-consciousness." if max_label == 'tech' else "This concept radiates healing intention."
40
- return f"Label: {max_label} (Confidence: {confidence:.2f})\nInsight: {reflection}"
41
 
42
- with gr.Blocks() as demo:
43
- gr.Markdown("## Vers3Dynamics Conscious Labeling AI")
44
- gr.Markdown("### Zero-Shot + Conscious Insight Inference")
 
45
  with gr.Row():
46
- input_text = gr.Textbox(label="Input Text")
47
- output_text = gr.Textbox(label="Inference Result")
48
  infer_btn = gr.Button("Infer with Insight")
49
- infer_btn.click(conscious_infer, inputs=input_text, outputs=output_text)
50
-
51
- gr.Markdown("### Manual Conscious Training")
 
 
52
  with gr.Row():
53
- training_text = gr.Textbox(label="Training Text")
54
- label_choice = gr.Radio(choices=labels, label="Label")
55
- with gr.Row():
56
- lr = gr.Slider(0.001, 0.1, value=0.01, label="Learning Rate")
57
- momentum = gr.Slider(0.0, 1.0, value=0.0, label="Momentum")
58
- train_output = gr.Textbox(label="Training Output")
59
  train_btn = gr.Button("Store Training Sample")
60
- train_btn.click(train_step, inputs=[training_text, label_choice, lr, momentum], outputs=train_output)
61
-
62
- gr.Markdown("### Backend Fine-Tuning Placeholder")
63
- fine_tune = gr.Button("Initiate Fine-Tune")
64
- fine_output = gr.Textbox(label="Fine-Tune Response")
65
- fine_tune.click(fine_tune_model, outputs=fine_output)
66
 
67
  if __name__ == "__main__":
68
- demo.launch()
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModel
4
+ from sklearn.metrics.pairwise import cosine_similarity
5
+ import numpy as np
6
+ from collections import deque
7
 
 
 
 
 
8
 
9
+ tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
10
+ model = AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
11
+
12
+
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ model.to(device)
15
+
16
+
17
+ class MemoryQueue:
18
+ def __init__(self, max_length=100):
19
+ self.samples = deque(maxlen=max_length)
20
+
21
+ def add(self, text, label):
22
+ self.samples.append((text, label))
23
+
24
+ def get_embeddings_labels(self):
25
+ if not self.samples:
26
+ return None, None
27
+ texts, labels = zip(*self.samples)
28
+ embeddings = [embed_text(t) for t in texts]
29
+ return torch.stack(embeddings), labels
30
+
31
+
32
+ def embed_text(text):
33
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device)
34
  with torch.no_grad():
35
+ outputs = model(**inputs)
36
+ return outputs.last_hidden_state.mean(dim=1).squeeze().cpu()
 
 
37
 
38
+ # Label Insights
39
+ label_insights = {
40
+ "health": "This concept radiates healing intention.",
41
+ "tech": "This concept relates to innovation and systems thinking.",
42
+ "nature": "This concept resonates with organic intelligence.",
43
+ "spirit": "This concept aligns with inner knowing and intuitive energy."
44
+ }
45
 
46
+ # Initialize memory
47
+ memory = MemoryQueue()
 
48
 
49
+ # Inference and Labeling Logic
50
+ def infer_with_insight(text):
51
+ if len(memory.samples) == 0:
52
+ return "Label: Unknown", "Insight: Need more training examples."
53
+
54
+ input_vec = embed_text(text).unsqueeze(0)
55
+ memory_vecs, labels = memory.get_embeddings_labels()
56
+
57
+ sims = cosine_similarity(input_vec.numpy(), memory_vecs.numpy())[0]
58
+ best_idx = np.argmax(sims)
59
+ confidence = sims[best_idx]
60
+ predicted_label = labels[best_idx]
61
+
62
+ insight = label_insights.get(predicted_label, "No insight available.")
63
+
64
+ return f"Label: {predicted_label} (Confidence: {confidence:.2f})", f"Insight: {insight}"
65
 
66
+ # Manual Labeling Function
67
+ def train_sample(text, label):
68
+ memory.add(text, label)
69
+ return f"Stored '{text}' as '{label}' | Total samples: {len(memory.samples)}"
 
 
 
70
 
71
+ # Gradio UI
72
+ with gr.Blocks() as app:
73
+ gr.Markdown("# Conscious Labeling AI")
74
+
75
  with gr.Row():
76
+ text_input = gr.Textbox(label="Input Text", placeholder="Type a concept like 'What is life'...")
77
+
78
  infer_btn = gr.Button("Infer with Insight")
79
+ label_output = gr.Textbox(label="Label")
80
+ insight_output = gr.Textbox(label="Insight")
81
+ infer_btn.click(fn=infer_with_insight, inputs=text_input, outputs=[label_output, insight_output])
82
+
83
+ gr.Markdown("### Vers3Dynamics Conscious Training")
84
  with gr.Row():
85
+ train_text = gr.Textbox(label="Training Text")
86
+ label_choice = gr.Radio(["health", "tech", "nature", "spirit"], label="Label")
87
+
 
 
 
88
  train_btn = gr.Button("Store Training Sample")
89
+ train_output = gr.Textbox(label="Training Status")
90
+ train_btn.click(fn=train_sample, inputs=[train_text, label_choice], outputs=train_output)
 
 
 
 
91
 
92
  if __name__ == "__main__":
93
+ app.launch()