Supeem commited on
Commit
3f8b084
·
1 Parent(s): b92a1a9
Files changed (5) hide show
  1. Dockerfile +14 -0
  2. lstm.pt +3 -0
  3. main.py +51 -0
  4. model.py +20 -0
  5. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ WORKDIR /code
7
+
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
11
+
12
+ COPY . .
13
+
14
+ CMD ["gunicorn", "-b", "0.0.0.0:7860", "main:app"]
lstm.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d64c990b25a04b2b15449f2280aab6cbafe3e8066099994fc9a88ccfc4ab2a6
3
+ size 41351302
main.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request
2
+ import torch
3
+ import model
4
+ import numpy as np
5
+ from sentence_transformers import SentenceTransformer
6
+
7
+ sentence_model = SentenceTransformer("all-MiniLM-L6-v2")
8
+
9
+ embedding_dim = 384
10
+ hidden_dim = 512
11
+ num_layers = 1
12
+ output_dim = 180
13
+ num_epochs = 100
14
+ learning_rate = 0.001
15
+
16
+ lstm_model = model.LSTM(embedding_dim, hidden_dim, num_layers, output_dim)
17
+ lstm_model.load_state_dict(torch.load('lstm.pt'))
18
+
19
+ app = Flask(__name__)
20
+
21
+ def GeneratePosesJSON(input):
22
+ with torch.no_grad():
23
+ processed_text = torch.tensor(sentence_model.encode(input), dtype=torch.float)
24
+ output_poses = lstm_model(processed_text.unsqueeze(0))
25
+
26
+ people = output_poses.cpu().detach().numpy().reshape(5, 18, 2).tolist()
27
+ newPeople = []
28
+ for person in people:
29
+ newPerson = []
30
+ for keypoints in person:
31
+ newPerson.append([keypoints[0], keypoints[1], 1])
32
+ newPeople.append(newPerson)
33
+
34
+ data = np.array(newPeople).reshape(5, 54).tolist()
35
+ formatted_data = []
36
+ for person in data:
37
+ formatted_data.append({ "pose_keypoints_2d": person })
38
+ return { 'people': data, 'animals': [], 'canvas_width': 900, 'canvas_height': 300 }
39
+
40
+ @app.route('/')
41
+ def hello():
42
+ return "Hello, World!"
43
+
44
+ @app.route('/generate')
45
+ def generatePose():
46
+ text = request.args.get('text')
47
+ data = GeneratePosesJSON(text)
48
+ return data
49
+
50
+ if __name__ == '__main__':
51
+ app.run()
model.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ class LSTM(nn.Module):
4
+ def __init__(self, embedding_dim, hidden_dim, num_layers, output_dim):
5
+ super(LSTM, self).__init__()
6
+ self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, num_layers, batch_first=True)
7
+ self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
8
+ self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
9
+ self.lstm4 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
10
+ self.lstm5 = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)
11
+ self.o = nn.Linear(hidden_dim, output_dim)
12
+
13
+ def forward(self, embedding):
14
+ o_n1, (h_n1, c_n1) = self.lstm1(embedding)
15
+ o_n2, (h_n2, c_n2) = self.lstm2(o_n1, (h_n1, c_n1))
16
+ o_n3, (h_n3, c_n3) = self.lstm3(o_n2, (h_n2, c_n2))
17
+ o_n4, (h_n4, c_n4) = self.lstm4(o_n3, (h_n3, c_n3))
18
+ o_n5, (h_n5, c_n5) = self.lstm5(o_n4, (h_n4, c_n4))
19
+ output = self.o(o_n5)
20
+ return output
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ sentence_transformers
2
+ numpy
3
+ torch
4
+ Flask
5
+ Gunicorn