Update app requirements and code

#4
by C-Achard - opened
Files changed (5) hide show
  1. README.md +0 -1
  2. app.py +3 -4
  3. detection_utils.py +1 -3
  4. requirements.txt +12 -2
  5. ui_utils.py +61 -113
README.md CHANGED
@@ -4,7 +4,6 @@ emoji: πŸ•πŸπŸ΄πŸ˜»πŸ˜πŸ†πŸΏπŸ‚πŸ¦˜πŸ¦’
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
7
- python_version: 3.12
8
  sdk_version: 6.11.0
9
  app_file: app.py
10
  pinned: false
 
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: gradio
 
7
  sdk_version: 6.11.0
8
  app_file: app.py
9
  pinned: false
app.py CHANGED
@@ -11,7 +11,7 @@ import gradio as gr
11
  import deeplabcut
12
  import dlclibrary
13
  import dlclive
14
- # import transformers
15
 
16
  from PIL import Image, ImageColor, ImageFont, ImageDraw
17
  import requests
@@ -187,7 +187,6 @@ demo = gr.Interface(predict_pipeline,
187
  title=gr_title,
188
  description=gr_description,
189
  examples=examples,
190
- )
191
 
192
- demo.queue()
193
- demo.launch(theme="huggingface")
 
11
  import deeplabcut
12
  import dlclibrary
13
  import dlclive
14
+ import transformers
15
 
16
  from PIL import Image, ImageColor, ImageFont, ImageDraw
17
  import requests
 
187
  title=gr_title,
188
  description=gr_description,
189
  examples=examples,
190
+ theme="huggingface")
191
 
192
+ demo.launch(enable_queue=True, share=True)
 
detection_utils.py CHANGED
@@ -35,9 +35,7 @@ def predict_md(im,
35
  'custom', #model
36
  megadetector_model, # args for callable model
37
  force_reload=True,
38
- device=md_device,
39
- trust_repo=True
40
- )
41
 
42
  # send model to gpu if possible
43
  if (md_device == torch.device('cuda')):
 
35
  'custom', #model
36
  megadetector_model, # args for callable model
37
  force_reload=True,
38
+ device=md_device)
 
 
39
 
40
  # send model to gpu if possible
41
  if (md_device == torch.device('cuda')):
requirements.txt CHANGED
@@ -1,10 +1,20 @@
 
 
 
1
  gradio
2
- gitpython>=3.1.30
 
 
 
3
  seaborn
4
- deeplabcut[modelzoo,tf]>=3.0.0rc14
5
  deeplabcut-live
6
  ruamel.yaml==0.17.21
7
  dlclibrary
 
8
  humanfriendly
 
 
 
9
  psutil
10
  ultralytics
 
1
+ ipython
2
+ transformers
3
+ Pillow
4
  gradio
5
+ numpy
6
+ torch
7
+ torchvision
8
+ timm
9
  seaborn
10
+ deeplabcut[tf,modelzoo]>=3.0.0rc13
11
  deeplabcut-live
12
  ruamel.yaml==0.17.21
13
  dlclibrary
14
+ argparse
15
  humanfriendly
16
+ pandas
17
+ statistics
18
+ tqdm
19
  psutil
20
  ultralytics
ui_utils.py CHANGED
@@ -1,126 +1,74 @@
1
  import gradio as gr
2
 
3
-
4
- def gradio_inputs_for_MD_DLC(md_models_list, dlc_models_list):
 
 
5
  # Input image
6
- gr_image_input = gr.Image(type="pil", label="Input Image")
7
-
8
- # Models
9
- gr_mega_model_input = gr.Dropdown(
10
- choices=md_models_list,
11
- value="md_v5a",
12
- type="value",
13
- label="Select Detector model",
14
- )
15
 
16
- gr_dlc_model_input = gr.Dropdown(
17
- choices=dlc_models_list,
18
- value="superanimal_quadruped_dlcrnet",
19
- type="value",
20
- label="Select DeepLabCut model",
21
- )
22
 
 
 
 
 
 
 
 
 
 
 
23
  # Other inputs
24
- gr_dlc_only_checkbox = gr.Checkbox(
25
- value=False,
26
- label="Run DLClive only, directly on input image?",
27
- )
28
-
29
- gr_str_labels_checkbox = gr.Checkbox(
30
- value=True,
31
- label="Show bodypart labels?",
32
- )
33
-
34
- # Gradio Slider signature is (minimum, maximum, value, step, ...)
35
- gr_slider_conf_bboxes = gr.Slider(
36
- minimum=0,
37
- maximum=1,
38
- value=0.2,
39
- step=0.05,
40
- label="Set confidence threshold for animal detections",
41
- )
42
-
43
- gr_slider_conf_keypoints = gr.Slider(
44
- minimum=0,
45
- maximum=1,
46
- value=0.4,
47
- step=0.05,
48
- label="Set confidence threshold for keypoints",
49
- )
50
-
51
- # Data viz
52
- gr_keypt_color = gr.ColorPicker(
53
- value="#862db7",
54
- label="Choose color for keypoint label",
55
- )
56
-
57
- gr_labels_font_style = gr.Dropdown(
58
- choices=["amiko", "animals", "nature", "painter", "zen"],
59
- value="amiko",
60
- type="value",
61
- label="Select keypoint label font",
62
- )
63
-
64
- gr_slider_font_size = gr.Slider(
65
- minimum=5,
66
- maximum=30,
67
- value=8,
68
- step=1,
69
- label="Set font size",
70
- )
71
-
72
- gr_slider_marker_size = gr.Slider(
73
- minimum=1,
74
- maximum=20,
75
- value=9,
76
- step=1,
77
- label="Set marker size",
78
- )
79
-
80
- return [
81
- gr_image_input,
82
- gr_mega_model_input,
83
- gr_dlc_model_input,
84
- gr_dlc_only_checkbox,
85
- gr_str_labels_checkbox,
86
- gr_slider_conf_bboxes,
87
- gr_slider_conf_keypoints,
88
- gr_labels_font_style,
89
- gr_slider_font_size,
90
- gr_keypt_color,
91
- gr_slider_marker_size,
92
- ]
93
-
94
-
95
  def gradio_outputs_for_MD_DLC():
96
- gr_image_output = gr.Image(type="pil", label="Output Image")
 
97
  gr_file_download = gr.File(label="Download JSON file")
98
- return [gr_image_output, gr_file_download]
99
-
100
 
 
 
101
  def gradio_description_and_examples():
102
  title = "DeepLabCut Model Zoo SuperAnimals"
103
- description = (
104
- "Test the SuperAnimal models from the "
105
- "<a href='http://www.mackenziemathislab.org/dlc-modelzoo'>"
106
- "DeepLabCut ModelZoo Project</a>, and read more on arXiv: "
107
- "https://arxiv.org/abs/2203.07436! Simply upload an image and see how it does. "
108
- "Want to run on videos on the cloud or locally? See the "
109
- "<a href='http://www.mackenziemathislab.org/dlc-modelzoo'>DeepLabCut ModelZoo</a>."
110
- )
111
 
112
- examples = [[
113
- "examples/dog.jpeg",
114
- "md_v5a",
115
- "superanimal_quadruped_dlcrnet",
116
- False,
117
- True,
118
- 0.5,
119
- 0.0,
120
- "amiko",
121
- 9,
122
- "#ff0000",
123
- 3,
124
- ]]
125
 
126
- return [title, description, examples]
 
1
  import gradio as gr
2
 
3
+ ##############################
4
+ def gradio_inputs_for_MD_DLC(md_models_list, # list(MD_models_dict.keys())
5
+ dlc_models_list, # list(DLC_models_dict.keys())
6
+ ):
7
  # Input image
8
+ gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
 
 
 
 
 
 
 
 
9
 
 
 
 
 
 
 
10
 
11
+ # Models
12
+ gr_mega_model_input = gr.inputs.Dropdown(choices=md_models_list,
13
+ default='md_v5a', # default option
14
+ type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
15
+ label='Select Detector model')
16
+ gr_dlc_model_input = gr.inputs.Dropdown(choices=dlc_models_list, # choices
17
+ default='superanimal_quadruped_dlcrnet', # default option
18
+ type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
19
+ label='Select DeepLabCut model')
20
+
21
  # Other inputs
22
+ gr_dlc_only_checkbox = gr.inputs.Checkbox(False,
23
+ label='Run DLClive only, directly on input image?')
24
+ gr_str_labels_checkbox = gr.inputs.Checkbox(True,
25
+ label='Show bodypart labels?')
26
+
27
+ gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.05,0.2,
28
+ label='Set confidence threshold for animal detections')
29
+ gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0.4,
30
+ label='Set confidence threshold for keypoints')
31
+
32
+ # Data viz
33
+ gr_keypt_color = gr.ColorPicker(value ="#862db7", label="choose color for keypoint label")
34
+
35
+ gr_labels_font_style = gr.inputs.Dropdown(choices=['amiko', 'animals', 'nature', 'painter', 'zen'],
36
+ default='amiko',
37
+ type='value',
38
+ label='Select keypoint label font')
39
+ gr_slider_font_size = gr.inputs.Slider(5,30,1,8,
40
+ label='Set font size')
41
+ gr_slider_marker_size = gr.inputs.Slider(1,20,1,9,
42
+ label='Set marker size')
43
+
44
+ # list of inputs
45
+ return [gr_image_input,
46
+ gr_mega_model_input,
47
+ gr_dlc_model_input,
48
+ gr_dlc_only_checkbox,
49
+ gr_str_labels_checkbox,
50
+ gr_slider_conf_bboxes,
51
+ gr_slider_conf_keypoints,
52
+ gr_labels_font_style,
53
+ gr_slider_font_size,
54
+ gr_keypt_color,
55
+ gr_slider_marker_size]
56
+
57
+ ####################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def gradio_outputs_for_MD_DLC():
59
+ # User interface: outputs
60
+ gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
61
  gr_file_download = gr.File(label="Download JSON file")
62
+ return [gr_image_output,
63
+ gr_file_download]
64
 
65
+ ##############################################
66
+ # User interace: description
67
  def gradio_description_and_examples():
68
  title = "DeepLabCut Model Zoo SuperAnimals"
69
+ description = "Test the SuperAnimal models from the <a href='http://www.mackenziemathislab.org/dlc-modelzoo'>DeepLabCut ModelZoo Project</a>\, and read more on arXiv: https://arxiv.org/abs/2203.07436! Simply upload an image and see how it does. Want to run on videos on the cloud or locally? See the <a href='http://www.mackenziemathislab.org/dlc-modelzoo'>DeepLabCut ModelZoo</a>\."
70
+
 
 
 
 
 
 
71
 
72
+ examples = [['examples/dog.jpeg', 'md_v5a', 'superanimal_quadruped_dlcrnet', False, True, 0.5, 0.00, 'amiko',9, 'red', 3]]
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ return [title,description,examples]