Sarjinkhan2003 commited on
Commit
8dfc2e8
·
verified ·
1 Parent(s): 57bd8b4

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. README.md +14 -0
  2. config.json +28 -0
  3. pytorch_model.bin +3 -0
  4. tfidf_weights.json +0 -0
  5. tokenizer.json +0 -0
  6. tokenizer_config.json +16 -0
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: [text-classification, emotion-detection, roberta, data-augmentation, llm, hci]
3
+ datasets: [dair-ai/emotion]
4
+ ---
5
+ # MoodShift — RoBERTa+ESA+TF-IDF+FL with LLM Data Augmentation
6
+ Novel contribution: LLM-based minority class augmentation via Groq (llama-3.3-70b)
7
+
8
+ with self-consistency filtering for label fidelity.
9
+
10
+ Test Accuracy: **0.9265** | Macro F1: **0.8926**
11
+
12
+ Baseline (no augmentation): Acc=0.9235 | F1=0.8831
13
+
14
+ ICCA 2026 HCI Research — MoodShift Adaptive Chatbot
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "emotion_aware_roberta_esa_tfidf_focal_augmented",
3
+ "backbone": "roberta-base",
4
+ "num_labels": 6,
5
+ "id2label": {
6
+ "0": "sadness",
7
+ "1": "joy",
8
+ "2": "love",
9
+ "3": "anger",
10
+ "4": "fear",
11
+ "5": "surprise"
12
+ },
13
+ "label2id": {
14
+ "sadness": 0,
15
+ "joy": 1,
16
+ "love": 2,
17
+ "anger": 3,
18
+ "fear": 4,
19
+ "surprise": 5
20
+ },
21
+ "max_len": 128,
22
+ "augmentation": "llm_groq_llama33_70b_self_consistency_filtered",
23
+ "test_accuracy": 0.9265,
24
+ "test_macro_f1": 0.8926,
25
+ "test_weighted_f1": 0.9288,
26
+ "vs_baseline_acc": 0.9235,
27
+ "vs_baseline_f1": 0.8831
28
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60f3e7cae798f650dfab9a32af0bc410db87555e0135c5617db48d68bd028a52
3
+ size 511286463
tfidf_weights.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<s>",
5
+ "cls_token": "<s>",
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
+ "is_local": false,
9
+ "mask_token": "<mask>",
10
+ "model_max_length": 512,
11
+ "pad_token": "<pad>",
12
+ "sep_token": "</s>",
13
+ "tokenizer_class": "RobertaTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
16
+ }