Ubaida10 commited on
Commit
553cd96
·
verified ·
1 Parent(s): bb28b66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -236
app.py CHANGED
@@ -1,68 +1,42 @@
1
  import torch
2
  import torch.nn as nn
3
  import torchvision.transforms.functional as F
4
- from torchvision.utils import make_grid as make_image_grid
5
  from torchvision.utils import save_image
6
  import argparse
7
  import os
8
  import time
9
- import replicate
10
- import base64
11
  from PIL import Image
12
  import shutil
13
  import gradio as gr
14
  from cp_dataset_test import CPDatasetTest, CPDataLoader
15
  from networks import ConditionGenerator, load_checkpoint, make_grid, make_grid_3d, get_val
16
  from network_generator import SPADEGenerator
17
- from tensorboardX import SummaryWriter
18
  from utils import *
19
  import torchgeometry as tgm
20
  from collections import OrderedDict
21
- from torch.nn.modules.utils import _pair, _quadruple
22
-
23
- def remove_overlap(seg_out, warped_cm):
24
- assert len(warped_cm.shape) == 4
25
- warped_cm = warped_cm - (torch.cat([seg_out[:, 1:3, :, :], seg_out[:, 5:, :, :]], dim=1)).sum(dim=1, keepdim=True) * warped_cm
26
- return warped_cm
27
 
28
  def get_opt():
29
  parser = argparse.ArgumentParser()
30
  parser.add_argument("--gpu_ids", default="")
31
- parser.add_argument('-j', '--workers', type=int, default=4)
32
- parser.add_argument('-b', '--batch-size', type=int, default=1)
33
- parser.add_argument('--fp16', action='store_true', help='use amp')
34
- parser.add_argument('--test_name', type=str, default='test', help='test name')
35
  parser.add_argument("--dataroot", default="./data")
36
- parser.add_argument("--datamode", default="test")
37
- parser.add_argument("--data_list", default="./data/test_pairs.txt")
38
  parser.add_argument("--output_dir", type=str, default="./output")
39
- parser.add_argument("--datasetting", default="paired")
 
 
 
40
  parser.add_argument("--fine_width", type=int, default=768)
41
  parser.add_argument("--fine_height", type=int, default=1024)
42
- parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')
43
- parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='save checkpoint infos')
44
- parser.add_argument('--tocg_checkpoint', type=str, default='', help='tocg checkpoint')
45
- parser.add_argument('--gen_checkpoint', type=str, default='./checkpoints/gen_step_110000.pth', help='G checkpoint')
46
- parser.add_argument("--tensorboard_count", type=int, default=100)
47
- parser.add_argument("--shuffle", action='store_true', help='shuffle input data')
48
- parser.add_argument("--semantic_nc", type=int, default=13)
49
- parser.add_argument("--output_nc", type=int, default=13)
50
- parser.add_argument('--gen_semantic_nc', type=int, default=7, help='# of input label classes without unknown class')
51
- parser.add_argument("--warp_feature", choices=['encoder', 'T1'], default="T1")
52
- parser.add_argument("--out_layer", choices=['relu', 'conv'], default="relu")
53
- parser.add_argument('--upsample', type=str, default='bilinear', choices=['nearest', 'bilinear'])
54
- parser.add_argument('--occlusion', action='store_true', help="Occlusion handling")
55
  parser.add_argument('--cond_G_ngf', type=int, default=96)
56
  parser.add_argument("--cond_G_input_width", type=int, default=192)
57
  parser.add_argument("--cond_G_input_height", type=int, default=256)
58
  parser.add_argument('--cond_G_num_layers', type=int, default=5)
59
- parser.add_argument('--norm_G', type=str, default='spectralaliasinstance', help='instance normalization or batch normalization')
60
- parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
61
- parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
62
- parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
63
- parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='most')
64
- parser.add_argument("--composition_mask", action='store_true', help='shuffle input data')
65
- parser.add_argument('--use_gradio', action='store_true', default=True, help='Use Gradio interface for image uploads')
66
  opt = parser.parse_args([])
67
  return opt
68
 
@@ -80,65 +54,50 @@ def load_checkpoint_G(model, checkpoint_path):
80
  model.cuda()
81
  print(f"Loaded checkpoint from {checkpoint_path}")
82
 
83
- def process_images(garm_img_path, human_img_path, output_dir):
84
- try:
85
- with open(garm_img_path, 'rb') as garm_file:
86
- garm_data = base64.b64encode(garm_file.read()).decode('utf-8')
87
- garm_img = f"data:image/jpeg;base64,{garm_data}"
88
-
89
- with open(human_img_path, 'rb') as human_file:
90
- human_data = base64.b64encode(human_file.read()).decode('utf-8')
91
- human_img = f"data:image/jpeg;base64,{human_data}"
92
-
93
- input_dict = {
94
- "garm_img": garm_img,
95
- "human_img": human_img,
96
- "garment_des": "generic garment"
97
- }
98
-
99
- output = replicate.run(
100
- "cuuupid/idm-vton:0513734a452173b8173e907e3a59d19a36266e55b48528559432bd21c7d7e985",
101
- input=input_dict
102
- )
103
 
104
- os.makedirs(output_dir, exist_ok=True)
105
- output_filename = os.path.join(output_dir, f"output_{int(time.time())}.jpg")
106
- with open(output_filename, "wb") as file:
107
- file.write(output.read())
108
- print(f"Output saved to {output_filename}")
109
 
 
 
 
 
 
110
  return output_filename
111
  except Exception as e:
112
- print(f"Error processing images: {str(e)}")
113
  return None
114
 
115
- def gradio_interface(garm_img, human_img, output_dir):
116
- get_val()
117
  print("Image processing initialized.")
118
 
119
  if not garm_img:
120
  return None, None, "Error: Please upload a garment image."
121
  if not human_img:
122
  return None, None, "Error: Please upload a human image."
123
-
124
- print(f"Garment image temporary path: {garm_img.name}")
125
- print(f"Human image temporary path: {human_img.name}")
126
 
127
- target_dir = output_dir
128
  os.makedirs(target_dir, exist_ok=True)
129
-
130
  garm_img_path = os.path.join(target_dir, "garment.jpg")
131
  human_img_path = os.path.join(target_dir, "human.jpg")
132
-
133
  try:
134
  shutil.copy(garm_img.name, garm_img_path)
135
  shutil.copy(human_img.name, human_img_path)
136
  print(f"Copied images to {target_dir}")
137
  except Exception as e:
138
  return None, None, f"Error copying images: {str(e)}"
139
-
140
  try:
141
- output_path = process_images(garm_img_path, human_img_path, target_dir)
142
  if output_path:
143
  return Image.open(output_path), output_path, f"Success: Output saved to {output_path}"
144
  else:
@@ -146,136 +105,18 @@ def gradio_interface(garm_img, human_img, output_dir):
146
  except Exception as e:
147
  return None, None, f"Error processing images: {str(e)}"
148
 
149
- def test(opt, test_loader, board, tocg, generator):
150
- gauss = tgm.image.GaussianBlur((15, 15), (3, 3))
151
- gauss = gauss.cuda()
152
-
153
- tocg.cuda()
154
- tocg.eval()
155
- generator.eval()
156
-
157
- if opt.output_dir is not None:
158
- output_dir = opt.output_dir
159
- else:
160
- output_dir = os.path.join('./output', opt.test_name,
161
- opt.datamode, opt.datasetting, 'generator', 'output')
162
- grid_dir = os.path.join('./output', opt.test_name,
163
- opt.datamode, opt.datasetting, 'generator', 'grid')
164
-
165
- os.makedirs(grid_dir, exist_ok=True)
166
- os.makedirs(output_dir, exist_ok=True)
167
-
168
- num = 0
169
- with torch.no_grad():
170
- for inputs in test_loader.data_loader:
171
- pose_map = inputs['pose'].cuda()
172
- pre_clothes_mask = inputs['cloth_mask'][opt.datasetting].cuda()
173
- label = inputs['parse']
174
- parse_agnostic = inputs['parse_agnostic']
175
- agnostic = inputs['agnostic'].cuda()
176
- clothes = inputs['cloth'][opt.datasetting].cuda()
177
- densepose = inputs['densepose'].cuda()
178
- im = inputs['image']
179
- input_label, input_parse_agnostic = label.cuda(), parse_agnostic.cuda()
180
- pre_clothes_mask = torch.FloatTensor((pre_clothes_mask.detach().cpu().numpy() > 0.5).astype(np.float64)).cuda()
181
-
182
- pose_map_down = F.interpolate(pose_map, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='bilinear')
183
- pre_clothes_mask_down = F.interpolate(pre_clothes_mask, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='nearest')
184
- input_label_down = F.interpolate(input_label, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='bilinear')
185
- input_parse_agnostic_down = F.interpolate(input_parse_agnostic, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='nearest')
186
- agnostic_down = F.interpolate(agnostic, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='nearest')
187
- clothes_down = F.interpolate(clothes, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='bilinear')
188
- densepose_down = F.interpolate(densepose, size=(opt.cond_G_input_height, opt.cond_G_input_width), mode='bilinear')
189
-
190
- shape = pre_clothes_mask.shape
191
-
192
- input1 = torch.cat([clothes_down, pre_clothes_mask_down], 1)
193
- input2 = torch.cat([input_parse_agnostic_down, densepose_down], 1)
194
-
195
- flow_list_taco, fake_segmap, _, warped_clothmask_taco, flow_list_tvob, _, _, = tocg(input1, input2)
196
-
197
- warped_cm_onehot = torch.FloatTensor((warped_clothmask_taco.detach().cpu().numpy() > 0.5).astype(np.float64)).cuda()
198
-
199
- cloth_mask = torch.ones_like(fake_segmap)
200
- cloth_mask[:,3:4, :, :] = warped_clothmask_taco
201
- fake_segmap = fake_segmap * cloth_mask
202
-
203
- fake_parse_gauss = gauss(F.interpolate(fake_segmap, size=(opt.fine_height, opt.fine_width), mode='bilinear'))
204
- fake_parse = fake_parse_gauss.argmax(dim=1)[:, None]
205
-
206
- old_parse = torch.FloatTensor(fake_parse.size(0), 13, opt.fine_height, opt.fine_width).zero_().cuda()
207
- old_parse.scatter_(1, fake_parse, 1.0)
208
-
209
- labels = {
210
- 0: ['background', [0]],
211
- 1: ['paste', [2, 4, 7, 8, 9, 10, 11]],
212
- 2: ['upper', [3]],
213
- 3: ['hair', [1]],
214
- 4: ['left_arm', [5]],
215
- 5: ['right_arm', [6]],
216
- 6: ['noise', [12]]
217
- }
218
- parse = torch.FloatTensor(fake_parse.size(0), 7, opt.fine_height, opt.fine_width).zero_().cuda()
219
- for i in range(len(labels)):
220
- for label in labels[i][1]:
221
- parse[:, i] += old_parse[:, label]
222
-
223
- N, _, iH, iW = clothes.shape
224
- N, flow_iH, flow_iW, _ = flow_list_tvob[-1].shape
225
-
226
- flow_tvob = F.interpolate(flow_list_tvob[-1].permute(0, 3, 1, 2), size=(iH, iW), mode='bilinear').permute(0, 2, 3, 1)
227
- flow_tvob_norm = torch.cat([flow_tvob[:, :, :, 0:1] / ((flow_iW - 1.0) / 2.0), flow_tvob[:, :, :, 1:2] / ((flow_iH - 1.0) / 2.0)], 3)
228
-
229
- grid = make_grid(N, iH, iW)
230
- grid_3d = make_grid_3d(N, iH, iW)
231
-
232
- warped_grid_tvob = grid + flow_tvob_norm
233
- warped_cloth_tvob = F.grid_sample(clothes, warped_grid_tvob, padding_mode='border')
234
- warped_clothmask_tvob = F.grid_sample(pre_clothes_mask, warped_grid_tvob, padding_mode='border')
235
-
236
- flow_taco = F.interpolate(flow_list_taco[-1].permute(0, 4, 1, 2, 3), size=(2, iH, iW), mode='trilinear').permute(0, 2, 3, 4, 1)
237
- flow_taco_norm = torch.cat([flow_taco[:, :, :, :, 0:1] / ((flow_iW - 1.0) / 2.0), flow_taco[:, :, :, :, 1:2] / ((flow_iH - 1.0) / 2.0), flow_taco[:, :, :, :, 2:3]], 4)
238
- warped_cloth_tvob = warped_cloth_tvob.unsqueeze(2)
239
- warped_cloth_taco = F.grid_sample(torch.cat((warped_cloth_tvob, torch.zeros_like(warped_cloth_tvob).cuda()), dim=2), flow_taco_norm + grid_3d, padding_mode='border')
240
- warped_cloth_taco = warped_cloth_taco[:,:,0,:,:]
241
-
242
- warped_clothmask_tvob = warped_clothmask_tvob.unsqueeze(2)
243
- warped_clothmask_taco = F.grid_sample(torch.cat((warped_clothmask_tvob, torch.zeros_like(warped_clothmask_tvob).cuda()), dim=2), flow_taco_norm + grid_3d, padding_mode='border')
244
- warped_clothmask_taco = warped_clothmask_taco[:,:,0,:,:]
245
-
246
- if opt.occlusion:
247
- warped_clothmask_taco = remove_overlap(F.softmax(fake_parse_gauss, dim=1), warped_clothmask_taco)
248
- warped_cloth_taco = warped_cloth_taco * warped_clothmask_taco + torch.ones_like(warped_cloth_taco) * (1 - warped_clothmask_taco)
249
-
250
- if opt.composition_mask:
251
- output, comp_mask = generator(torch.cat((agnostic, densepose, warped_cloth_taco), dim=1), parse)
252
- comp_mask1 = comp_mask * warped_clothmask_taco
253
- comp_mask = parse[:,2:3,:,:] * comp_mask1
254
- output = warped_cloth_taco * comp_mask + output * (1 - comp_mask)
255
- else:
256
- output = generator(torch.cat((agnostic, densepose, warped_cloth_taco), dim=1), parse)
257
-
258
- unpaired_names = []
259
- for i in range(shape[0]):
260
- grid = make_image_grid([(clothes[i].cpu() / 2 + 0.5), (pre_clothes_mask[i].cpu()).expand(3, -1, -1), visualize_segmap(parse_agnostic.cpu(), batch=i), ((densepose.cpu()[i]+1)/2),
261
- (warped_cloth_taco[i].cpu().detach() / 2 + 0.5), (warped_clothmask_taco[i].cpu().detach()).expand(3, -1, -1), visualize_segmap(fake_parse_gauss.cpu(), batch=i),
262
- (pose_map[i].cpu()/2 +0.5), (warped_cloth_taco[i].cpu()/2 +0.5), (agnostic[i].cpu()/2 +0.5),
263
- (im[i]/2 +0.5), (output[i].cpu()/2 +0.5)],
264
- nrow=4)
265
- unpaired_name = (inputs['c_name']['paired'][i].split('.')[0] + '_' + inputs['c_name'][opt.datasetting][i].split('.')[0] + '.png')
266
- save_image(grid, os.path.join(grid_dir, unpaired_name))
267
- unpaired_names.append(unpaired_name)
268
-
269
- save_images(output, unpaired_names, output_dir)
270
-
271
- num += shape[0]
272
- print(num)
273
-
274
  def main():
275
  opt = get_opt()
276
  print(opt)
277
  os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_ids
278
-
 
 
 
 
 
 
 
279
  if opt.use_gradio:
280
  with gr.Blocks() as demo:
281
  gr.Markdown("## Virtual Fashion Fit")
@@ -290,46 +131,15 @@ def main():
290
  output_image = gr.Image(label="Output Image")
291
  output_path = gr.Textbox(label="Output Path")
292
  output_text = gr.Textbox(label="Status")
293
-
294
- garm_img.change(
295
- fn=lambda x: x.name if x else None,
296
- inputs=garm_img,
297
- outputs=garm_preview
298
- )
299
- human_img.change(
300
- fn=lambda x: x.name if x else None,
301
- inputs=human_img,
302
- outputs=human_preview
303
- )
304
-
305
- submit.click(
306
- fn=lambda garm_img, human_img: gradio_interface(garm_img, human_img, opt.output_dir),
307
- inputs=[garm_img, human_img],
308
- outputs=[output_image, output_path, output_text]
309
- )
310
-
311
- demo.launch(server_name="0.0.0.0", server_port=7860)
312
- else:
313
- test_dataset = CPDatasetTest(opt)
314
- test_loader = CPDataLoader(opt, test_dataset)
315
-
316
- if not os.path.exists(opt.tensorboard_dir):
317
- os.makedirs(opt.tensorboard_dir)
318
- board = SummaryWriter(log_dir=os.path.join(opt.tensorboard_dir, opt.test_name, opt.datamode, opt.datasetting))
319
 
320
- input1_nc = 4
321
- input2_nc = opt.semantic_nc + 3
322
- tocg = ConditionGenerator(opt, input1_nc=input1_nc, input2_nc=input2_nc, output_nc=opt.output_nc, ngf=opt.cond_G_ngf, norm_layer=nn.BatchNorm2d, num_layers=opt.cond_G_num_layers)
323
- opt.semantic_nc = 7
324
- generator = SPADEGenerator(opt, 3+3+3)
325
- generator.print_network()
326
-
327
- load_checkpoint(tocg, opt.tocg_checkpoint)
328
- load_checkpoint_G(generator, opt.gen_checkpoint)
329
 
330
- test(opt, test_loader, board, tocg, generator)
 
 
331
 
332
- print("Finished testing!")
333
 
334
  if __name__ == "__main__":
335
  main()
 
1
  import torch
2
  import torch.nn as nn
3
  import torchvision.transforms.functional as F
 
4
  from torchvision.utils import save_image
5
  import argparse
6
  import os
7
  import time
 
 
8
  from PIL import Image
9
  import shutil
10
  import gradio as gr
11
  from cp_dataset_test import CPDatasetTest, CPDataLoader
12
  from networks import ConditionGenerator, load_checkpoint, make_grid, make_grid_3d, get_val
13
  from network_generator import SPADEGenerator
 
14
  from utils import *
15
  import torchgeometry as tgm
16
  from collections import OrderedDict
 
 
 
 
 
 
17
 
18
  def get_opt():
19
  parser = argparse.ArgumentParser()
20
  parser.add_argument("--gpu_ids", default="")
21
+ parser.add_argument('--test_name', type=str, default='test')
 
 
 
22
  parser.add_argument("--dataroot", default="./data")
 
 
23
  parser.add_argument("--output_dir", type=str, default="./output")
24
+ parser.add_argument('--checkpoint_dir', type=str, default='checkpoints')
25
+ parser.add_argument('--tocg_checkpoint', type=str, default='./checkpoints/tocg.pth')
26
+ parser.add_argument('--gen_checkpoint', type=str, default='./checkpoints/gen_step_110000.pth')
27
+ parser.add_argument('--use_gradio', action='store_true', default=True)
28
  parser.add_argument("--fine_width", type=int, default=768)
29
  parser.add_argument("--fine_height", type=int, default=1024)
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  parser.add_argument('--cond_G_ngf', type=int, default=96)
31
  parser.add_argument("--cond_G_input_width", type=int, default=192)
32
  parser.add_argument("--cond_G_input_height", type=int, default=256)
33
  parser.add_argument('--cond_G_num_layers', type=int, default=5)
34
+ parser.add_argument('--norm_G', type=str, default='spectralaliasinstance')
35
+ parser.add_argument('--ngf', type=int, default=64)
36
+ parser.add_argument('--init_type', type=str, default='xavier')
37
+ parser.add_argument('--init_variance', type=float, default=0.02)
38
+ parser.add_argument('--semantic_nc', type=int, default=13)
39
+ parser.add_argument('--output_nc', type=int, default=13)
 
40
  opt = parser.parse_args([])
41
  return opt
42
 
 
54
  model.cuda()
55
  print(f"Loaded checkpoint from {checkpoint_path}")
56
 
57
+ def run_single_test(opt, tocg, generator, garment_path, human_path, output_path):
58
+ # Dummy image-based output to simulate result generation
59
+ # Replace this with actual inference logic from test()
60
+ garment_img = Image.open(garment_path).convert("RGB")
61
+ human_img = Image.open(human_path).convert("RGB")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ result = Image.blend(human_img.resize(garment_img.size), garment_img, alpha=0.5)
64
+ result.save(output_path)
65
+ print(f"Saved output to {output_path}")
 
 
66
 
67
+ def process_images_local(opt, tocg, generator, garm_img_path, human_img_path, output_dir):
68
+ os.makedirs(output_dir, exist_ok=True)
69
+ output_filename = os.path.join(output_dir, f"output_{int(time.time())}.jpg")
70
+ try:
71
+ run_single_test(opt, tocg, generator, garm_img_path, human_img_path, output_filename)
72
  return output_filename
73
  except Exception as e:
74
+ print(f"Local inference failed: {e}")
75
  return None
76
 
77
+ def gradio_interface(garm_img, human_img, opt, tocg, generator):
78
+ get_val()
79
  print("Image processing initialized.")
80
 
81
  if not garm_img:
82
  return None, None, "Error: Please upload a garment image."
83
  if not human_img:
84
  return None, None, "Error: Please upload a human image."
 
 
 
85
 
86
+ target_dir = opt.output_dir
87
  os.makedirs(target_dir, exist_ok=True)
88
+
89
  garm_img_path = os.path.join(target_dir, "garment.jpg")
90
  human_img_path = os.path.join(target_dir, "human.jpg")
91
+
92
  try:
93
  shutil.copy(garm_img.name, garm_img_path)
94
  shutil.copy(human_img.name, human_img_path)
95
  print(f"Copied images to {target_dir}")
96
  except Exception as e:
97
  return None, None, f"Error copying images: {str(e)}"
98
+
99
  try:
100
+ output_path = process_images_local(opt, tocg, generator, garm_img_path, human_img_path, target_dir)
101
  if output_path:
102
  return Image.open(output_path), output_path, f"Success: Output saved to {output_path}"
103
  else:
 
105
  except Exception as e:
106
  return None, None, f"Error processing images: {str(e)}"
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  def main():
109
  opt = get_opt()
110
  print(opt)
111
  os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_ids
112
+
113
+ tocg = ConditionGenerator(opt, input1_nc=4, input2_nc=opt.semantic_nc + 3, output_nc=opt.output_nc,
114
+ ngf=opt.cond_G_ngf, norm_layer=nn.BatchNorm2d, num_layers=opt.cond_G_num_layers)
115
+ generator = SPADEGenerator(opt, 3 + 3 + 3)
116
+
117
+ load_checkpoint(tocg, opt.tocg_checkpoint)
118
+ load_checkpoint_G(generator, opt.gen_checkpoint)
119
+
120
  if opt.use_gradio:
121
  with gr.Blocks() as demo:
122
  gr.Markdown("## Virtual Fashion Fit")
 
131
  output_image = gr.Image(label="Output Image")
132
  output_path = gr.Textbox(label="Output Path")
133
  output_text = gr.Textbox(label="Status")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ garm_img.change(lambda x: x.name if x else None, inputs=garm_img, outputs=garm_preview)
136
+ human_img.change(lambda x: x.name if x else None, inputs=human_img, outputs=human_preview)
 
 
 
 
 
 
 
137
 
138
+ submit.click(fn=lambda garm_img, human_img: gradio_interface(garm_img, human_img, opt, tocg, generator),
139
+ inputs=[garm_img, human_img],
140
+ outputs=[output_image, output_path, output_text])
141
 
142
+ demo.launch(server_name="0.0.0.0", server_port=7860)
143
 
144
  if __name__ == "__main__":
145
  main()