ANDRYHA commited on
Commit
657d2a4
·
verified ·
1 Parent(s): 982569b

Upload bench.py

Browse files
Files changed (1) hide show
  1. bench.py +42 -27
bench.py CHANGED
@@ -56,7 +56,7 @@ def auc_judd(S, F):
56
  tp[1:-1] = arange / Nfixations
57
 
58
  # Trapezoidal integration to compute AUC-Judd
59
- return np.trapz(tp, fp)
60
 
61
 
62
 
@@ -87,10 +87,10 @@ def calculate_frame_metrics(frame):
87
  pred_sm = cv2.resize(read_sm(frame['predictions_path']), (gt_120_sm.shape[1], gt_120_sm.shape[0]))
88
 
89
  return {
90
- 'sim_score': similarity(pred_sm, gt_120_sm),
91
- 'nss_score': nss(pred_sm, gt_fix),
92
- 'cc_score': cc(pred_sm, gt_120_sm),
93
- 'auc_judd_score': auc_judd(pred_sm, gt_fix),
94
  }
95
 
96
 
@@ -102,7 +102,9 @@ def calculate_metrics(video_name, temp_predictions_path, temp_gt_saliency_path,
102
 
103
  scores = []
104
  assert_func = lambda path: set([int(x.split('.')[0]) for x in listdir(path)])
105
- assert assert_func(gt_saliency_path) == assert_func(predictions_path)
 
 
106
 
107
  frames = [
108
  {
@@ -121,17 +123,23 @@ def calculate_metrics(video_name, temp_predictions_path, temp_gt_saliency_path,
121
 
122
  return {
123
  'video_name' : video_name,
124
- 'cc' : np.mean(conv_scores['cc_score']),
125
- 'sim' : np.mean(conv_scores['sim_score']),
126
- 'nss' : np.mean(conv_scores['nss_score']),
127
- 'auc_judd' : np.mean(conv_scores['auc_judd_score']),
128
  }
129
 
130
 
131
- def calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, num_workers=4):
132
 
 
 
133
  detail_result = []
134
  for video_name in tqdm(video_names):
 
 
 
 
135
  if len([x for x in detail_result if x['video_name'] == video_name]) > 0:
136
  continue
137
  short_video_name = Path(video_name).name
@@ -139,13 +147,17 @@ def calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frame
139
  gt_gaussians = str(Path(gt_extracted_frames) / f'{short_video_name}')
140
  gt_fixations = Path(gt_fixations_path) / short_video_name / 'fixations.json'
141
  cur_result = calculate_metrics(video_name, model_output, gt_gaussians, gt_fixations, num_workers)
142
- detail_result += [cur_result]
143
- np.save("tmp2.npy", detail_result)
 
 
 
 
144
 
145
  return detail_result
146
 
147
 
148
- def make_bench(model_extracted_frames, gt_extracted_frames, gt_fixations_path, split_json='TrainTestSplit.json', results_json='results.json', mode='public_test', num_workers=4):
149
 
150
  print(num_workers, 'worker(s)')
151
  print(f'Testing {model_extracted_frames}')
@@ -162,28 +174,30 @@ def make_bench(model_extracted_frames, gt_extracted_frames, gt_fixations_path, s
162
  splits = set(json.load(f)[mode])
163
 
164
  video_names = [name for name in video_names if name in splits]
 
165
 
166
- detail_result = calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, num_workers)
167
- detail_result = sorted(detail_result, key=lambda res: res['video_name'])
168
 
169
- result = {'cc' : [], 'sim' : [], 'nss' : [], 'auc_judd' : []}
170
- for i in result:
171
- for j in detail_result:
172
- result[i].append(j[i])
173
 
174
- with open(results_json, 'w') as f:
175
- json.dump(result, f)
176
 
177
- model_res = {'Model': [model_extracted_frames], 'Mode': [mode]}
178
- [model_res.update({key: [np.mean(result[key])]}) for key in result.keys()]
179
 
180
- print(model_res)
181
 
182
 
183
 
184
  def extract_frames(input_dir, output_dir, split_json='TrainTestSplit.json', mode='public_test', num_workers=4):
185
 
186
  def poolfunc(x):
 
187
  if x.stem not in splits[mode]:
188
  return
189
  dst_vid = dst / x.stem
@@ -201,6 +215,7 @@ def extract_frames(input_dir, output_dir, split_json='TrainTestSplit.json', mode
201
  dst = Path(output_dir)
202
  dst.mkdir(exist_ok=True)
203
  videos = list(root.iterdir())
 
204
  pbar = tqdm(total=len(splits[mode]))
205
  with ThreadPool(num_workers) as p:
206
  p.map(poolfunc, videos)
@@ -224,7 +239,7 @@ if __name__ == '__main__':
224
  parser.add_argument('--split_json', default='./TrainTestSplit.json',
225
  help='Json from dataset page with names splitting')
226
 
227
- parser.add_argument('--results_json', default='./results.json')
228
  parser.add_argument('--mode', default='public_test', help='public_test/private_test')
229
  parser.add_argument('--num_workers', type=int, default=4)
230
 
@@ -237,4 +252,4 @@ if __name__ == '__main__':
237
  print("Extracting", args.gt_video_predictions, 'to', args.gt_extracted_frames)
238
  extract_frames(args.gt_video_predictions, args.gt_extracted_frames, args.split_json, args.mode, args.num_workers)
239
 
240
- make_bench(args.model_extracted_frames, args.gt_extracted_frames, args.gt_fixations_path, args.split_json, args.results_json, args.mode, args.num_workers)
 
56
  tp[1:-1] = arange / Nfixations
57
 
58
  # Trapezoidal integration to compute AUC-Judd
59
+ return np.trapezoid(tp, fp)
60
 
61
 
62
 
 
87
  pred_sm = cv2.resize(read_sm(frame['predictions_path']), (gt_120_sm.shape[1], gt_120_sm.shape[0]))
88
 
89
  return {
90
+ 'sim_score': float(similarity(pred_sm, gt_120_sm)),
91
+ 'nss_score': float(nss(pred_sm, gt_fix)),
92
+ 'cc_score': float(cc(pred_sm, gt_120_sm)),
93
+ 'auc_judd_score': float(auc_judd(pred_sm, gt_fix)),
94
  }
95
 
96
 
 
102
 
103
  scores = []
104
  assert_func = lambda path: set([int(x.split('.')[0]) for x in listdir(path)])
105
+ gt_set = assert_func(gt_saliency_path)
106
+ pred_set = assert_func(predictions_path)
107
+ assert gt_set == pred_set, f'{video_name}: {len(gt_set)}, {len(pred_set)}'
108
 
109
  frames = [
110
  {
 
123
 
124
  return {
125
  'video_name' : video_name,
126
+ 'cc' : conv_scores['cc_score'],
127
+ 'sim' : conv_scores['sim_score'],
128
+ 'nss' : conv_scores['nss_score'],
129
+ 'auc_judd' : conv_scores['auc_judd_score'],
130
  }
131
 
132
 
133
+ def calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, results_path, num_workers=4):
134
 
135
+ results_path = Path(results_path)
136
+ results_path.mkdir(exist_ok=True)
137
  detail_result = []
138
  for video_name in tqdm(video_names):
139
+ video_res_path = results_path / f'{video_name}.json'
140
+ if video_res_path.exists():
141
+ continue
142
+
143
  if len([x for x in detail_result if x['video_name'] == video_name]) > 0:
144
  continue
145
  short_video_name = Path(video_name).name
 
147
  gt_gaussians = str(Path(gt_extracted_frames) / f'{short_video_name}')
148
  gt_fixations = Path(gt_fixations_path) / short_video_name / 'fixations.json'
149
  cur_result = calculate_metrics(video_name, model_output, gt_gaussians, gt_fixations, num_workers)
150
+
151
+ with open(video_res_path, 'w') as f:
152
+ json.dump(cur_result, f)
153
+
154
+ # detail_result += [cur_result]
155
+ # np.save("tmp2.npy", detail_result)
156
 
157
  return detail_result
158
 
159
 
160
+ def make_bench(model_extracted_frames, gt_extracted_frames, gt_fixations_path, split_json='TrainTestSplit.json', results_path='results', mode='public_test', num_workers=4):
161
 
162
  print(num_workers, 'worker(s)')
163
  print(f'Testing {model_extracted_frames}')
 
174
  splits = set(json.load(f)[mode])
175
 
176
  video_names = [name for name in video_names if name in splits]
177
+ # print(video_names[28])
178
 
179
+ detail_result = calculate_all_videos(video_names, model_extracted_frames, gt_extracted_frames, gt_fixations_path, results_path, num_workers)
180
+ # detail_result = sorted(detail_result, key=lambda res: res['video_name'])
181
 
182
+ # result = {'cc' : [], 'sim' : [], 'nss' : [], 'auc_judd' : []}
183
+ # for i in result:
184
+ # for j in detail_result:
185
+ # result[i].append(j[i])
186
 
187
+ # with open(results_json, 'w') as f:
188
+ # json.dump(result, f)
189
 
190
+ # model_res = {'Model': [model_extracted_frames], 'Mode': [mode]}
191
+ # [model_res.update({key: [np.mean(result[key])]}) for key in result.keys()]
192
 
193
+ # print(model_res)
194
 
195
 
196
 
197
  def extract_frames(input_dir, output_dir, split_json='TrainTestSplit.json', mode='public_test', num_workers=4):
198
 
199
  def poolfunc(x):
200
+ # print(x.stem)
201
  if x.stem not in splits[mode]:
202
  return
203
  dst_vid = dst / x.stem
 
215
  dst = Path(output_dir)
216
  dst.mkdir(exist_ok=True)
217
  videos = list(root.iterdir())
218
+ # print(videos)
219
  pbar = tqdm(total=len(splits[mode]))
220
  with ThreadPool(num_workers) as p:
221
  p.map(poolfunc, videos)
 
239
  parser.add_argument('--split_json', default='./TrainTestSplit.json',
240
  help='Json from dataset page with names splitting')
241
 
242
+ parser.add_argument('--results_path', default='./results.json')
243
  parser.add_argument('--mode', default='public_test', help='public_test/private_test')
244
  parser.add_argument('--num_workers', type=int, default=4)
245
 
 
252
  print("Extracting", args.gt_video_predictions, 'to', args.gt_extracted_frames)
253
  extract_frames(args.gt_video_predictions, args.gt_extracted_frames, args.split_json, args.mode, args.num_workers)
254
 
255
+ make_bench(args.model_extracted_frames, args.gt_extracted_frames, args.gt_fixations_path, args.split_json, args.results_path, args.mode, args.num_workers)