qiushuocheng commited on
Commit
d53e869
·
1 Parent(s): eece957
LaSA/.vscode/launch.json CHANGED
@@ -10,7 +10,7 @@
10
  "cwd": "/root/autodl-tmp/workshop/LaSA/",
11
  "args": [
12
  "--dataset",
13
- "PKU-subject",
14
  "--cuda",
15
  "0"
16
  ]
 
10
  "cwd": "/root/autodl-tmp/workshop/LaSA/",
11
  "args": [
12
  "--dataset",
13
+ "BABEL3",
14
  "--cuda",
15
  "0"
16
  ]
LaSA/config/BABEL3/config.yaml CHANGED
@@ -2,7 +2,7 @@ dataset: BABEL3
2
  dataset_dir: ./dataset
3
  csv_dir: ./csv
4
 
5
- batch_size: 4
6
  boundary_th: 0.5
7
  ce: true
8
  ce_weight: 1.0
@@ -14,11 +14,11 @@ gstmse: true
14
  gstmse_index: feature
15
  gstmse_weight: 1.0
16
  lambda_b: 0.1
17
- learning_rate: 0.001
18
  optimizer: Adam
19
  momentum: 0.9
20
  dampening: 0.0
21
- weight_decay: 0.0001
22
  nesterov: true
23
  tmse: false
24
  tmse_weight: 0.15
@@ -33,7 +33,7 @@ n_stages_asb: 2
33
  n_stages_brb: 3
34
  SFI_layer: [1, 2 ,3, 4, 5, 6, 7, 8, 9]
35
 
36
- max_epoch: 300
37
  num_workers: 0
38
  iou_thresholds:
39
  - 0.1
 
2
  dataset_dir: ./dataset
3
  csv_dir: ./csv
4
 
5
+ batch_size: 8
6
  boundary_th: 0.5
7
  ce: true
8
  ce_weight: 1.0
 
14
  gstmse_index: feature
15
  gstmse_weight: 1.0
16
  lambda_b: 0.1
17
+ learning_rate: 0.0001
18
  optimizer: Adam
19
  momentum: 0.9
20
  dampening: 0.0
21
+ weight_decay: 0.001
22
  nesterov: true
23
  tmse: false
24
  tmse_weight: 0.15
 
33
  n_stages_brb: 3
34
  SFI_layer: [1, 2 ,3, 4, 5, 6, 7, 8, 9]
35
 
36
+ max_epoch: 150
37
  num_workers: 0
38
  iou_thresholds:
39
  - 0.1
LaSA/evaluate.py CHANGED
@@ -133,7 +133,7 @@ def main():
133
  if args.model is not None:
134
  state_dict = torch.load(args.model,map_location=lambda storage, loc: storage.cuda(device))
135
  else:
136
- state_dict = torch.load(os.path.join(result_path, "best_test_F1_0.5_model.prm"), map_location=lambda storage, loc: storage.cuda(device))
137
 
138
  model.load_state_dict(state_dict, False)
139
 
 
133
  if args.model is not None:
134
  state_dict = torch.load(args.model,map_location=lambda storage, loc: storage.cuda(device))
135
  else:
136
+ state_dict = torch.load(os.path.join(result_path, "best_test_map_model.prm"), map_location=lambda storage, loc: storage.cuda(device))
137
 
138
  model.load_state_dict(state_dict, False)
139
 
LaSA/libs/__pycache__/metric.cpython-310.pyc CHANGED
Binary files a/LaSA/libs/__pycache__/metric.cpython-310.pyc and b/LaSA/libs/__pycache__/metric.cpython-310.pyc differ
 
LaSA/libs/metric.py CHANGED
@@ -312,7 +312,7 @@ class ScoreMeter(object):
312
  seg = getActLoc(self.frm_preds)
313
  print (len(seg))
314
  dmap_list = []
315
- annotation_path = './dataset/BABEL1/val_split1.pkl'
316
  for iou in (0.1,0.2,0.3,0.4,0.5):
317
  print("Testing for IoU %f" % iou)
318
  dmap_list.append(
@@ -605,7 +605,7 @@ def getActLoc(
605
  outer_score = np.mean(vid_cas[outer_temp_list])
606
  c_score = inner_score - 0.6 * outer_score
607
  vid_cls_proposal.append([i, s[j], e[j] + 1, c_score])
608
- pick_idx = NonMaximumSuppression(np.array(vid_cls_proposal), 0.2)
609
  nms_vid_cls_proposal = [vid_cls_proposal[k] for k in pick_idx]
610
  c_temp += nms_vid_cls_proposal
611
  if len(c_temp) > 0:
 
312
  seg = getActLoc(self.frm_preds)
313
  print (len(seg))
314
  dmap_list = []
315
+ annotation_path = './dataset/BABEL3/val_split3.pkl'
316
  for iou in (0.1,0.2,0.3,0.4,0.5):
317
  print("Testing for IoU %f" % iou)
318
  dmap_list.append(
 
605
  outer_score = np.mean(vid_cas[outer_temp_list])
606
  c_score = inner_score - 0.6 * outer_score
607
  vid_cls_proposal.append([i, s[j], e[j] + 1, c_score])
608
+ pick_idx = NonMaximumSuppression(np.array(vid_cls_proposal), 0.5)
609
  nms_vid_cls_proposal = [vid_cls_proposal[k] for k in pick_idx]
610
  c_temp += nms_vid_cls_proposal
611
  if len(c_temp) > 0:
LaSA/run.sh CHANGED
@@ -1,3 +1,5 @@
1
  python train.py --dataset PKU-subject --cuda 0
2
 
3
- python train.py --dataset BABEL1 --cuda 0
 
 
 
1
  python train.py --dataset PKU-subject --cuda 0
2
 
3
+ python train.py --dataset BABEL3 --cuda 0
4
+ python train.py --dataset BABEL1 --cuda 0
5
+
LaSA/train.py CHANGED
@@ -23,6 +23,8 @@ from libs.optimizer import get_optimizer
23
  from libs.transformer import TempDownSamp, ToTensor
24
  from prompt.text_prompt import TextCLIP, text_prompt_for_class, text_prompt_for_joint
25
 
 
 
26
  def get_arguments() -> argparse.Namespace:
27
  """
28
  parse all the arguments from command line inteface
@@ -49,7 +51,7 @@ def import_class(import_str):
49
  except AttributeError:
50
  raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
51
 
52
- def change_label_score(best_test, train_loss, epoch, cls_acc, edit_score, f1s):
53
 
54
  best_test['train_loss'] = train_loss
55
  best_test['epoch'] = epoch
@@ -60,6 +62,7 @@ def change_label_score(best_test, train_loss, epoch, cls_acc, edit_score, f1s):
60
  best_test['f1s@0.5'] = f1s[2]
61
  best_test['f1s@0.75'] = f1s[3]
62
  best_test['f1s@0.9'] = f1s[4]
 
63
 
64
  def main() -> None:
65
 
@@ -208,7 +211,7 @@ def main() -> None:
208
  best_test_acc = {'epoch':0,'train_loss':0,'cls_acc':0,'edit':0,'f1s@0.1':0,'f1s@0.25':0,'f1s@0.5':0,'f1s@0.75':0,'f1s@0.9':0,'mAP':0}
209
  best_test_F1_10 = best_test_acc.copy()
210
  best_test_F1_50 = best_test_acc.copy()
211
-
212
  log = pd.DataFrame(columns=columns)
213
  # ['epoch', 'lr', 'train_loss', 'val_loss', 'cls_acc', 'edit', 'f1s@0.1', 'f1s@0.25', 'f1s@0.5', 'f1s@0.75', 'f1s@0.9', 'bound_acc', 'precision', 'recall', 'bound_f1s'] [Columns: [epoch, lr, train_loss, val_loss, cls_acc, edit, f1s@0.1, f1s@0.25, f1s@0.5, f1
214
  if args.resume:
@@ -351,7 +354,12 @@ def main() -> None:
351
  model.state_dict(),
352
  os.path.join(result_path, 'best_test_F1_0.5_model.prm')
353
  )
354
-
 
 
 
 
 
355
  # save checkpoint every epoch
356
  save_checkpoint(result_path, epoch, model, optimizer, best_loss)
357
 
@@ -412,6 +420,8 @@ def main() -> None:
412
  print('{}'.format(best_test_F1_10))
413
  print('\n---------------------------best_test_F1_50---------------------------\n')
414
  print('{}'.format(best_test_F1_50))
 
 
415
  print('\n---------------------------all_train_time---------------------------\n')
416
  print('all_train_time: {:.2f}min'.format((time.time() - start_start) / 60))
417
 
 
23
  from libs.transformer import TempDownSamp, ToTensor
24
  from prompt.text_prompt import TextCLIP, text_prompt_for_class, text_prompt_for_joint
25
 
26
+ import numpy as np
27
+
28
  def get_arguments() -> argparse.Namespace:
29
  """
30
  parse all the arguments from command line inteface
 
51
  except AttributeError:
52
  raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
53
 
54
+ def change_label_score(best_test, train_loss, epoch, cls_acc, edit_score, f1s,map = [0]):
55
 
56
  best_test['train_loss'] = train_loss
57
  best_test['epoch'] = epoch
 
62
  best_test['f1s@0.5'] = f1s[2]
63
  best_test['f1s@0.75'] = f1s[3]
64
  best_test['f1s@0.9'] = f1s[4]
65
+ best_test['map'] = np.mean(map)
66
 
67
  def main() -> None:
68
 
 
211
  best_test_acc = {'epoch':0,'train_loss':0,'cls_acc':0,'edit':0,'f1s@0.1':0,'f1s@0.25':0,'f1s@0.5':0,'f1s@0.75':0,'f1s@0.9':0,'mAP':0}
212
  best_test_F1_10 = best_test_acc.copy()
213
  best_test_F1_50 = best_test_acc.copy()
214
+ best_test_MAP = best_test_acc.copy()
215
  log = pd.DataFrame(columns=columns)
216
  # ['epoch', 'lr', 'train_loss', 'val_loss', 'cls_acc', 'edit', 'f1s@0.1', 'f1s@0.25', 'f1s@0.5', 'f1s@0.75', 'f1s@0.9', 'bound_acc', 'precision', 'recall', 'bound_f1s'] [Columns: [epoch, lr, train_loss, val_loss, cls_acc, edit, f1s@0.1, f1s@0.25, f1s@0.5, f1
217
  if args.resume:
 
354
  model.state_dict(),
355
  os.path.join(result_path, 'best_test_F1_0.5_model.prm')
356
  )
357
+ if np.mean(maps) > best_test_MAP['mAP']:
358
+ change_label_score(best_test_MAP, train_loss, epoch, cls_acc, edit_score, segment_f1s,maps)
359
+ torch.save(
360
+ model.state_dict(),
361
+ os.path.join(result_path, 'best_test_map_model.prm')
362
+ )
363
  # save checkpoint every epoch
364
  save_checkpoint(result_path, epoch, model, optimizer, best_loss)
365
 
 
420
  print('{}'.format(best_test_F1_10))
421
  print('\n---------------------------best_test_F1_50---------------------------\n')
422
  print('{}'.format(best_test_F1_50))
423
+ print('\n---------------------------best_test_MAP---------------------------\n')
424
+ print('{}'.format(best_test_MAP))
425
  print('\n---------------------------all_train_time---------------------------\n')
426
  print('all_train_time: {:.2f}min'.format((time.time() - start_start) / 60))
427
 
test.txt DELETED
@@ -1 +0,0 @@
1
- test
 
 
workload_check.sh DELETED
@@ -1,35 +0,0 @@
1
- #!/bin/bash
2
-
3
- # Set your Git username or email (must match your Git commits)
4
- AUTHOR="qiushuocheng"
5
-
6
- echo "📊 Git Activity Report (Author: $AUTHOR)"
7
- echo "========================================"
8
-
9
- # Initialize counters
10
- total_add=0
11
- total_del=0
12
-
13
- # Loop through the last 7 days (from 6 days ago to today)
14
- for i in {6..0}; do
15
- day=$(date -v -${i}d "+%Y-%m-%d")
16
-
17
- # Get additions and deletions for that day
18
- stats=$(git log --since="$day 00:00" --until="$day 23:59" --author="$AUTHOR" --pretty=tformat: --numstat |
19
- awk '{ add += $1; del += $2 } END { print add, del }')
20
-
21
- add=$(echo $stats | cut -d' ' -f1)
22
- del=$(echo $stats | cut -d' ' -f2)
23
- sum=$((add + del))
24
-
25
- printf "📅 %s | ➕ %-5s | ➖ %-5s | 🧮 Total: %-5s\n" "$day" "$add" "$del" "$sum"
26
-
27
- total_add=$((total_add + add))
28
- total_del=$((total_del + del))
29
- done
30
-
31
- echo "========================================"
32
- echo "🧾 Summary for Last 7 Days:"
33
- echo "✅ Total Added: $total_add lines"
34
- echo "❌ Total Deleted: $total_del lines"
35
- echo "📦 Total Changed: $((total_add + total_del)) lines"