| |
| """ |
| 单个 Prompt 测试 Pipeline |
| |
| 完整流程: |
| 1. 接受用户的设计文本描述 |
| 2. 调用 GPT 生成房间规格和物体列表 |
| 3. 检索 3D assets (OpenShape) |
| 4. 使用 LayoutVLM 优化布局 |
| 5. 使用高质量 Blender 渲染可视化 |
| |
| 使用方法: |
| # 完整流程 |
| python run_single_prompt.py --prompt "A cozy bedroom with..." --output results/my_test |
| |
| # 仅生成布局 (跳过渲染) |
| python run_single_prompt.py --prompt "..." --output results/my_test --skip-render |
| |
| # 从已有 scene_config 继续 |
| python run_single_prompt.py --scene-config results/my_test/scene_config.json --output results/my_test |
| |
| # 仅渲染 (已有 layout.json) |
| python run_single_prompt.py --output results/my_test --render-only |
| """ |
|
|
| import os |
| import json |
| import math |
| import argparse |
| import collections |
| import subprocess |
| import sys |
| import re |
| from typing import Dict, List, Any, Optional |
| from pathlib import Path |
|
|
|
|
| |
| |
| |
|
|
| def setup_azure_client(): |
| """设置 Azure OpenAI 客户端""" |
| from openai import AzureOpenAI |
| from azure.identity import ChainedTokenCredential, AzureCliCredential, ManagedIdentityCredential, get_bearer_token_provider |
| |
| scope = "api://trapi/.default" |
| credential = get_bearer_token_provider(ChainedTokenCredential( |
| AzureCliCredential(), |
| ManagedIdentityCredential(), |
| ), scope) |
| |
| api_version = '2024-12-01-preview' |
| instance = 'msra/shared' |
| endpoint = f'https://trapi.research.microsoft.com/{instance}' |
| |
| return AzureOpenAI( |
| azure_endpoint=endpoint, |
| azure_ad_token_provider=credential, |
| api_version=api_version, |
| ) |
|
|
|
|
| |
| |
| |
|
|
| def generate_room_specification(client, user_description: str, model_name: str = 'gpt-4o_2024-11-20') -> Dict: |
| """根据用户描述生成房间规格和布局标准""" |
| prompt = f""" |
| Given a user's room description, return a JSON object containing: |
| 1. task_description: A concise summary of the room |
| 2. room_size: Realistic dimensions in meters (width, length, height) |
| 3. layout_criteria: Detailed layout requirements |
| |
| User's description: {user_description} |
| |
| Return only the JSON object. Example format: |
| {{ |
| "task_description": "a bright open-plan dining room with...", |
| "room_size": {{ |
| "width": 6.0, |
| "length": 8.0, |
| "height": 3.0, |
| "area": 48.0 |
| }}, |
| "layout_criteria": "The layout criteria should..." |
| }} |
| """ |
| |
| response = client.chat.completions.create( |
| model=model_name, |
| messages=[ |
| {"role": "system", "content": "You are an expert interior designer. Always return valid JSON format."}, |
| {"role": "user", "content": prompt}, |
| ], |
| temperature=1.0, |
| ) |
| |
| content = response.choices[0].message.content |
| json_match = re.search(r'\{.*\}', content, re.DOTALL) |
| if json_match: |
| return json.loads(json_match.group(0)) |
| raise ValueError(f"Failed to parse JSON: {content}") |
|
|
|
|
| |
| |
| |
|
|
| def generate_object_list(client, task_description: str, layout_criteria: str, |
| room_size: Dict, model_name: str = "gpt-4o_2024-11-20") -> Dict: |
| """根据任务描述生成物体列表""" |
| room_size_str = f"{room_size['width']}m x {room_size['length']}m" |
| |
| prompt = f""" |
| Given a room description and layout criteria, generate a comprehensive list of objects. |
| |
| Requirements: |
| 1. Object names: 2-3 words, specific (e.g., "wooden chair", "desk lamp") |
| 2. Use singular form (e.g., "chair" not "chairs") |
| 3. Include 10-20 relevant objects |
| 4. Each object needs count, types, and description for 3D asset retrieval |
| |
| Task description: {task_description} |
| Layout criteria: {layout_criteria} |
| Room size: {room_size_str} |
| |
| Return JSON format like: |
| {{ |
| "dining_table": {{ |
| "count": 1, |
| "types": 1, |
| "description": "A rectangular wooden dining table with sturdy legs, seats 4-6 people" |
| }}, |
| "dining_chair": {{ |
| "count": 6, |
| "types": 1, |
| "description": "An upholstered dining chair with padded seat and backrest" |
| }} |
| }} |
| |
| Return only the JSON object. |
| """ |
| |
| response = client.chat.completions.create( |
| model=model_name, |
| messages=[ |
| {"role": "system", "content": "You are an expert interior designer generating object lists. Always return valid JSON."}, |
| {"role": "user", "content": prompt}, |
| ], |
| temperature=1.0, |
| ) |
| |
| content = response.choices[0].message.content |
| json_match = re.search(r'\{.*\}', content, re.DOTALL) |
| if json_match: |
| return json.loads(json_match.group(0)) |
| raise ValueError(f"Failed to parse object list JSON: {content}") |
|
|
|
|
| |
| |
| |
|
|
| |
| _CLIP_MODEL_CACHE = None |
| _OPENSHAPE_DATA_CACHE = None |
|
|
| def load_openclip_model(): |
| """加载 CLIP 模型""" |
| global _CLIP_MODEL_CACHE |
| if _CLIP_MODEL_CACHE is not None: |
| return _CLIP_MODEL_CACHE |
| |
| import torch |
| import transformers |
| |
| print(" 加载 CLIP 模型...") |
| half = torch.float16 if torch.cuda.is_available() else torch.bfloat16 |
| |
| clip_model = transformers.CLIPModel.from_pretrained( |
| "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", |
| low_cpu_mem_usage=True, |
| torch_dtype=half, |
| offload_state_dict=True, |
| ) |
| clip_prep = transformers.CLIPProcessor.from_pretrained( |
| "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" |
| ) |
| |
| if torch.cuda.is_available(): |
| clip_model.cuda() |
| |
| _CLIP_MODEL_CACHE = (clip_model, clip_prep) |
| return clip_model, clip_prep |
|
|
|
|
| def load_openshape_embeddings(): |
| """加载 OpenShape embeddings""" |
| global _OPENSHAPE_DATA_CACHE |
| if _OPENSHAPE_DATA_CACHE is not None: |
| return _OPENSHAPE_DATA_CACHE |
| |
| import torch |
| from huggingface_hub import hf_hub_download |
| |
| print(" 加载 OpenShape embeddings...") |
| |
| meta = json.load(open(hf_hub_download( |
| "OpenShape/openshape-objaverse-embeddings", |
| "objaverse_meta.json", token=True, repo_type='dataset', |
| local_dir="OpenShape-Embeddings" |
| ))) |
| meta = {x['u']: x for x in meta['entries']} |
| |
| deser = torch.load(hf_hub_download( |
| "OpenShape/openshape-objaverse-embeddings", |
| "objaverse.pt", token=True, repo_type='dataset', |
| local_dir="OpenShape-Embeddings" |
| ), map_location='cpu') |
| |
| _OPENSHAPE_DATA_CACHE = (meta, deser['us'], deser['feats']) |
| return _OPENSHAPE_DATA_CACHE |
|
|
|
|
| def retrieve_single_object(description: str, top: int = 5, sim_th: float = 0.1, |
| face_max: int = 100000, asset_dir: str = None) -> List[Dict]: |
| """使用 OpenShape 检索单个物体""" |
| import torch |
| from torch.nn import functional as F |
| |
| clip_model, clip_prep = load_openclip_model() |
| meta, us, feats = load_openshape_embeddings() |
| |
| text = re.sub(r'\d', '', description).replace("_", " ") |
| device = clip_model.device |
| |
| tn = clip_prep(text=[text], return_tensors='pt', truncation=True, max_length=76).to(device) |
| |
| with torch.no_grad(): |
| enc = clip_model.get_text_features(**tn).float().cpu() |
| |
| sims = [] |
| embedding = F.normalize(enc.detach().cpu(), dim=-1).squeeze() |
| |
| for chunk in torch.split(feats, 10240): |
| sims.append(embedding @ F.normalize(chunk.float(), dim=-1).T) |
| |
| sims = torch.cat(sims) |
| sims, idx = torch.sort(sims, descending=True) |
| |
| sim_mask = sims > sim_th |
| sims, idx = sims[sim_mask], idx[sim_mask] |
| |
| results = [] |
| available_assets = set(os.listdir(asset_dir)) if asset_dir and os.path.exists(asset_dir) else None |
| |
| for i, sim in zip(idx, sims): |
| uid = us[i] |
| if uid not in meta: |
| continue |
| obj_meta = meta[uid] |
| if obj_meta['faces'] > face_max: |
| continue |
| if available_assets and uid not in available_assets: |
| continue |
| |
| results.append({ |
| 'uid': uid, |
| 'similarity': float(sim), |
| 'name': obj_meta['name'], |
| 'faces': obj_meta['faces'], |
| }) |
| |
| if len(results) >= top: |
| break |
| |
| return results |
|
|
|
|
| def retrieve_3d_assets(object_list: Dict, asset_dir: str) -> Dict: |
| """为每个物体检索 3D asset""" |
| print("\n🔍 检索 3D Assets...") |
| |
| assets = {} |
| |
| for obj_name, obj_info in object_list.items(): |
| description = obj_info.get("description", obj_name) |
| count = obj_info.get("count", 1) |
| types_needed = obj_info.get("types", 1) |
| |
| print(f" 检索: {obj_name} ({count}个)") |
| |
| try: |
| candidates = retrieve_single_object( |
| description, top=max(10, types_needed * 3), |
| sim_th=0.1, face_max=100000, asset_dir=asset_dir |
| ) |
| |
| if not candidates: |
| print(f" ⚠️ 未找到匹配") |
| continue |
| |
| selected = candidates[:types_needed] |
| print(f" ✓ 找到: {selected[0]['name']} (sim={selected[0]['similarity']:.3f})") |
| |
| for type_idx, asset_info in enumerate(selected): |
| for instance_idx in range(count): |
| key = f"{asset_info['uid']}-{instance_idx}" |
| assets[key] = {} |
| |
| except Exception as e: |
| print(f" ⚠️ 检索失败: {e}") |
| |
| print(f"✅ 共检索 {len(assets)} 个 assets") |
| return assets |
|
|
|
|
| |
| |
| |
|
|
| def generate_scene_json(task_description: str, layout_criteria: str, |
| room_size: Dict, assets: Dict, user_input: str) -> Dict: |
| """生成完整的 scene JSON""" |
| width = room_size['width'] |
| length = room_size['length'] |
| height = room_size.get('height', 2.8) |
| |
| return { |
| "user_input": user_input, |
| "task_description": task_description, |
| "layout_criteria": layout_criteria, |
| "boundary": { |
| "floor_vertices": [ |
| [0, 0, 0], |
| [width, 0, 0], |
| [width, length, 0], |
| [0, length, 0] |
| ], |
| "wall_height": height |
| }, |
| "assets": assets |
| } |
|
|
|
|
| |
| |
| |
|
|
| def prepare_task_assets(task: Dict, asset_dir: str) -> Dict: |
| """准备 assets 元数据""" |
| if "layout_criteria" not in task: |
| task["layout_criteria"] = "the layout should follow the task description" |
|
|
| all_data = collections.defaultdict(list) |
| |
| for original_uid in task["assets"].keys(): |
| uid = '-'.join(original_uid.split('-')[:-1]) |
| data_path = os.path.join(asset_dir, uid, "data.json") |
| |
| if not os.path.exists(data_path): |
| continue |
| |
| with open(data_path, "r") as f: |
| data = json.load(f) |
| data['path'] = os.path.join(asset_dir, uid, f"{uid}.glb") |
| all_data[uid].append(data) |
|
|
| category_count = collections.defaultdict(int) |
| for uid, duplicated_assets in all_data.items(): |
| if not duplicated_assets: |
| continue |
| category_var_name = duplicated_assets[0]['annotations']['category'] |
| category_var_name = category_var_name.replace('-', "_").replace(" ", "_").replace("'", "_").replace("/", "_").replace(",", "_").lower() |
| category_count[category_var_name] += 1 |
|
|
| task["assets"] = {} |
| category_idx = collections.defaultdict(int) |
| |
| for uid, duplicated_assets in all_data.items(): |
| if not duplicated_assets: |
| continue |
| category_var_name = duplicated_assets[0]['annotations']['category'] |
| category_var_name = category_var_name.replace('-', "_").replace(" ", "_").replace("'", "_").replace("/", "_").replace(",", "_").lower() |
| category_idx[category_var_name] += 1 |
| |
| for instance_idx, data in enumerate(duplicated_assets): |
| category_var_name_final = f"{category_var_name}_{chr(ord('A') + category_idx[category_var_name]-1)}" if category_count[category_var_name] > 1 else category_var_name |
| var_name = f"{category_var_name_final}_{instance_idx}" if len(duplicated_assets) > 1 else category_var_name_final |
| |
| task["assets"][f"{category_var_name_final}-{instance_idx}"] = { |
| "uid": uid, |
| "count": len(duplicated_assets), |
| "instance_var_name": var_name, |
| "asset_var_name": category_var_name_final, |
| "instance_idx": instance_idx, |
| "annotations": data["annotations"], |
| "category": data["annotations"]["category"], |
| 'description': data['annotations']['description'], |
| 'path': data['path'], |
| 'onCeiling': data['annotations'].get('onCeiling', False), |
| 'onFloor': data['annotations'].get('onFloor', True), |
| 'onWall': data['annotations'].get('onWall', False), |
| 'onObject': data['annotations'].get('onObject', False), |
| 'frontView': data['annotations'].get('frontView', ""), |
| 'assetMetadata': { |
| "boundingBox": { |
| "x": float(data['assetMetadata']['boundingBox']['y']), |
| "y": float(data['assetMetadata']['boundingBox']['x']), |
| "z": float(data['assetMetadata']['boundingBox']['z']) |
| }, |
| } |
| } |
|
|
| return task |
|
|
|
|
| def run_layoutvlm(scene_config: Dict, save_dir: str, max_retries: int = 5) -> Dict: |
| """运行 LayoutVLM 优化布局,失败时自动重试""" |
| from src.layoutvlm.layoutvlm import LayoutVLM |
| |
| print(f"\n🔧 运行 LayoutVLM (最多重试 {max_retries} 次)...") |
| |
| last_error = None |
| for attempt in range(1, max_retries + 1): |
| try: |
| print(f" 尝试 {attempt}/{max_retries}...") |
| |
| layout_solver = LayoutVLM( |
| mode="one_shot", |
| save_dir=save_dir, |
| asset_source="objaverse" |
| ) |
| |
| layout = layout_solver.solve(scene_config) |
| |
| layout_path = os.path.join(save_dir, 'layout.json') |
| with open(layout_path, 'w') as f: |
| json.dump(layout, f, indent=2) |
| |
| print(f"✅ 布局已保存: {layout_path}") |
| return layout |
| |
| except Exception as e: |
| last_error = e |
| print(f" ⚠️ 尝试 {attempt} 失败: {str(e)[:100]}") |
| if attempt < max_retries: |
| print(f" 🔄 重试中...") |
| |
| raise RuntimeError(f"LayoutVLM 在 {max_retries} 次尝试后仍然失败: {last_error}") |
|
|
|
|
| |
| |
| |
|
|
| def render_with_blender(scene_config: Dict, layout: Dict, save_dir: str, asset_dir: str, |
| views: List[str] = ['topdown', 'diagonal'], |
| engine: str = 'CYCLES', samples: int = 256, |
| width: int = 1600, height: int = 900) -> List[str]: |
| """使用高质量 Blender 渲染""" |
| print("\n🎨 Blender 渲染...") |
| |
| |
| scene_objects = [] |
| |
| for key, transform in layout.items(): |
| if key not in scene_config['assets']: |
| continue |
| |
| asset_info = scene_config['assets'][key] |
| |
| |
| rot_rad = transform.get('rotation', [0, 0, 0]) |
| rot_deg = [math.degrees(r) for r in rot_rad] |
| |
| |
| bbox = asset_info['assetMetadata']['boundingBox'] |
| size = [bbox['x'], bbox['y'], bbox['z']] |
| |
| scene_objects.append({ |
| 'id': key, |
| 'model_id': asset_info['uid'], |
| 'category': asset_info['category'], |
| 'position': transform.get('position', [0, 0, 0]), |
| 'rotation': rot_deg, |
| 'size': size |
| }) |
| |
| scene_for_viz = { |
| 'boundary': scene_config.get('boundary', {}).get('floor_vertices', []), |
| 'assets': scene_objects |
| } |
| |
| |
| temp_scene_path = os.path.join(save_dir, '_temp_scene.json') |
| with open(temp_scene_path, 'w') as f: |
| json.dump(scene_for_viz, f, indent=2) |
| |
| output_files = [] |
| |
| for view in views: |
| output_path = os.path.join(save_dir, f'render_{view}.png') |
| |
| cmd = [ |
| 'python', 'visualize_blender_hq.py', |
| '--scene_path', temp_scene_path, |
| '--output', output_path, |
| '--asset_dir', asset_dir, |
| '--view', view, |
| '--engine', engine, |
| '--samples', str(samples), |
| '--width', str(width), |
| '--height', str(height), |
| '--fill-lights', |
| '--auto-crop', |
| ] |
| |
| print(f" 渲染 {view} 视角...") |
| |
| try: |
| result = subprocess.run(cmd, capture_output=True, text=True, timeout=600) |
| if result.returncode == 0: |
| output_files.append(output_path) |
| print(f" ✅ {view}: {output_path}") |
| else: |
| print(f" ⚠️ {view} 渲染失败: {result.stderr[:200]}") |
| except subprocess.TimeoutExpired: |
| print(f" ⚠️ {view} 渲染超时") |
| except Exception as e: |
| print(f" ⚠️ {view} 渲染异常: {e}") |
| |
| |
| if os.path.exists(temp_scene_path): |
| os.remove(temp_scene_path) |
| |
| return output_files |
|
|
|
|
| |
| |
| |
|
|
| def run_pipeline(prompt: str, output_dir: str, asset_dir: str, |
| scene_config_path: Optional[str] = None, |
| skip_render: bool = False, |
| render_only: bool = False, |
| render_engine: str = 'CYCLES', |
| render_samples: int = 256, |
| render_width: int = 1600, |
| render_height: int = 900, |
| render_views: List[str] = ['topdown', 'diagonal']) -> Dict: |
| """ |
| 完整的 Pipeline |
| |
| Args: |
| prompt: 用户设计描述 |
| output_dir: 输出目录 |
| asset_dir: 3D assets 目录 |
| scene_config_path: 已有的 scene_config.json 路径 (可选) |
| skip_render: 跳过渲染 |
| render_only: 仅渲染 (需要已有 layout.json) |
| |
| Returns: |
| 结果字典 |
| """ |
| os.makedirs(output_dir, exist_ok=True) |
| |
| result = { |
| 'prompt': prompt, |
| 'output_dir': output_dir, |
| 'success': False, |
| } |
| |
| |
| with open(os.path.join(output_dir, 'user_input.txt'), 'w') as f: |
| f.write(prompt) |
| |
| layout_path = os.path.join(output_dir, 'layout.json') |
| scene_config_save_path = os.path.join(output_dir, 'scene_config.json') |
| |
| |
| if render_only: |
| print("🎨 仅渲染模式...") |
| |
| if not os.path.exists(layout_path): |
| raise FileNotFoundError(f"layout.json not found: {layout_path}") |
| if not os.path.exists(scene_config_save_path): |
| raise FileNotFoundError(f"scene_config.json not found: {scene_config_save_path}") |
| |
| with open(layout_path, 'r') as f: |
| layout = json.load(f) |
| with open(scene_config_save_path, 'r') as f: |
| scene_config = json.load(f) |
| |
| scene_config = prepare_task_assets(scene_config, asset_dir) |
| |
| render_files = render_with_blender( |
| scene_config, layout, output_dir, asset_dir, |
| views=render_views, engine=render_engine, |
| samples=render_samples, width=render_width, height=render_height |
| ) |
| |
| result['render_files'] = render_files |
| result['success'] = True |
| return result |
| |
| |
| if scene_config_path and os.path.exists(scene_config_path): |
| print(f"📂 从已有 scene_config 继续: {scene_config_path}") |
| with open(scene_config_path, 'r') as f: |
| scene_json = json.load(f) |
| else: |
| |
| print("=" * 60) |
| print("🚀 启动 Pipeline") |
| print("=" * 60) |
| print(f"📝 Prompt: {prompt[:100]}...") |
| print() |
| |
| |
| print("Step 1: 设置 Azure OpenAI...") |
| client = setup_azure_client() |
| print("✅ 完成\n") |
| |
| |
| print("Step 2: 生成房间规格...") |
| room_spec = generate_room_specification(client, prompt) |
| print(f" 任务: {room_spec['task_description']}") |
| print(f" 尺寸: {room_spec['room_size']}") |
| print() |
| |
| |
| print("Step 3: 生成物体列表...") |
| object_list = generate_object_list( |
| client, |
| room_spec['task_description'], |
| room_spec['layout_criteria'], |
| room_spec['room_size'] |
| ) |
| print(f" 生成了 {len(object_list)} 种物体") |
| for obj_name in list(object_list.keys())[:5]: |
| print(f" - {obj_name}: {object_list[obj_name]['count']}个") |
| print() |
| |
| |
| print("Step 4: 检索 3D Assets...") |
| assets = retrieve_3d_assets(object_list, asset_dir) |
| |
| |
| print("\nStep 5: 生成 Scene JSON...") |
| scene_json = generate_scene_json( |
| room_spec['task_description'], |
| room_spec['layout_criteria'], |
| room_spec['room_size'], |
| assets, |
| prompt |
| ) |
| |
| with open(scene_config_save_path, 'w') as f: |
| json.dump(scene_json, f, indent=2) |
| print(f" 保存: {scene_config_save_path}") |
| |
| |
| print("\nStep 6: LayoutVLM 布局优化...") |
| scene_config = prepare_task_assets(scene_json.copy(), asset_dir) |
| layout = run_layoutvlm(scene_config, output_dir) |
| |
| result['layout_path'] = layout_path |
| result['scene_config_path'] = scene_config_save_path |
| result['num_assets'] = len(layout) |
| |
| |
| if not skip_render: |
| print("\nStep 7: Blender 渲染...") |
| render_files = render_with_blender( |
| scene_config, layout, output_dir, asset_dir, |
| views=render_views, engine=render_engine, |
| samples=render_samples, width=render_width, height=render_height |
| ) |
| result['render_files'] = render_files |
| |
| result['success'] = True |
| |
| |
| result_path = os.path.join(output_dir, 'result.json') |
| with open(result_path, 'w') as f: |
| json.dump(result, f, indent=2) |
| |
| print("\n" + "=" * 60) |
| print("🎉 Pipeline 完成!") |
| print("=" * 60) |
| print(f"📁 输出目录: {output_dir}") |
| print(f" - scene_config.json") |
| print(f" - layout.json") |
| if not skip_render: |
| print(f" - render_*.png") |
| |
| return result |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="单个 Prompt 测试 Pipeline") |
| parser.add_argument("--prompt", type=str, default=None, help="用户设计描述") |
| parser.add_argument("--output", type=str, default="./results/single_test", help="输出目录") |
| parser.add_argument("--asset-dir", type=str, |
| default="/home/v-meiszhang/backup/objaverse_processed", |
| help="3D assets 目录") |
| parser.add_argument("--scene-config", type=str, default=None, |
| help="已有的 scene_config.json 路径") |
| parser.add_argument("--skip-render", action="store_true", help="跳过渲染") |
| parser.add_argument("--render-only", action="store_true", help="仅渲染") |
| |
| parser.add_argument("--render-engine", type=str, default="CYCLES", |
| choices=["CYCLES", "BLENDER_EEVEE"]) |
| parser.add_argument("--render-samples", type=int, default=256) |
| parser.add_argument("--render-width", type=int, default=1600) |
| parser.add_argument("--render-height", type=int, default=900) |
| parser.add_argument("--render-views", nargs='+', default=['topdown', 'diagonal'], |
| help="渲染视角 (topdown, diagonal, diagonal2, ...)") |
| |
| args = parser.parse_args() |
| |
| if not args.prompt and not args.scene_config and not args.render_only: |
| parser.error("需要 --prompt 或 --scene-config 或 --render-only") |
| |
| prompt = args.prompt or "" |
| if args.render_only and not prompt: |
| |
| user_input_path = os.path.join(args.output, 'user_input.txt') |
| if os.path.exists(user_input_path): |
| with open(user_input_path, 'r') as f: |
| prompt = f.read().strip() |
| |
| run_pipeline( |
| prompt=prompt, |
| output_dir=args.output, |
| asset_dir=args.asset_dir, |
| scene_config_path=args.scene_config, |
| skip_render=args.skip_render, |
| render_only=args.render_only, |
| render_engine=args.render_engine, |
| render_samples=args.render_samples, |
| render_width=args.render_width, |
| render_height=args.render_height, |
| render_views=args.render_views, |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|