ZoneMaestro_code / eval /LayoutVLM /end_to_end_pipeline.py
kkkkiiii's picture
Add files using upload-large-folder tool
d5f2893 verified
"""
完整的端到端Pipeline: 从用户文本描述到3D场景渲染
Pipeline流程:
1. 接受用户的设计文本描述
2. 生成房间规格和布局标准 (layout_criteria)
3. 根据布局标准生成物体列表 (object_list)
4. 检索每个物体对应的3D assets (使用OpenShape embeddings)
5. 生成完整的scene JSON (类似 benchmark_tasks/bedroom/bedroom_2.json)
6. 使用LayoutVLM优化物体布局
7. 渲染最终的3D场景
"""
import os
import json
import argparse
import numpy as np
import collections
from openai import AzureOpenAI
from azure.identity import ChainedTokenCredential, AzureCliCredential, ManagedIdentityCredential, get_bearer_token_provider
import uuid
import re
from typing import Dict, List, Any
from src.layoutvlm.layoutvlm import LayoutVLM
from utils.blender_render import render_existing_scene, reset_blender
import torch
import transformers
from torch.nn import functional as F
from huggingface_hub import hf_hub_download
# ============================================================================
# Step 1: Azure OpenAI Client Setup
# ============================================================================
def setup_azure_client():
"""设置Azure OpenAI客户端"""
scope = "api://trapi/.default"
credential = get_bearer_token_provider(ChainedTokenCredential(
AzureCliCredential(),
ManagedIdentityCredential(),
), scope)
api_version = '2024-12-01-preview'
instance = 'msra/shared'
endpoint = f'https://trapi.research.microsoft.com/{instance}'
return AzureOpenAI(
azure_endpoint=endpoint,
azure_ad_token_provider=credential,
api_version=api_version,
)
# ============================================================================
# Step 2: 生成房间规格和布局标准
# ============================================================================
def generate_room_specification(client, user_description: str, model_name: str = 'gpt-4o_2024-11-20') -> Dict:
"""
根据用户描述生成房间规格和布局标准
Args:
client: Azure OpenAI客户端
user_description: 用户的房间描述文本
model_name: 使用的模型名称
Returns:
包含 task_description, room_size, layout_criteria 的字典
"""
prompt_template = """
Given a user's room description, return a JSON object containing:
1. task_description: A concise summary of the room (e.g., "a minimalist bedroom with...")
2. room_size: Realistic dimensions in meters (width, length, height)
3. layout_criteria: Detailed layout requirements (1-2 sentences starting with "The layout criteria should...")
Guidelines:
- width: typically 3-8 meters depending on room type
- length: typically 3-10 meters depending on room type
- height: typically 2.4-3.5 meters (standard ceiling height)
- Calculate area = width * length
User's description: USER_DESCRIPTION
Return only the JSON object. Example format:
{
"task_description": "a minimalist bedroom with a low-profile queen bed",
"room_size": {
"width": 4.0,
"length": 4.0,
"height": 2.4,
"area": 16.0
},
"layout_criteria": "The layout criteria should follow the task description and position the bed against the center wall with ample access space, maintaining a minimalist aesthetic with open floor space."
}
"""
prompt = prompt_template.replace("USER_DESCRIPTION", user_description)
response = client.chat.completions.create(
model=model_name,
messages=[
{
"role": "system",
"content": "You are an expert interior designer. Always return valid JSON format.",
},
{
"role": "user",
"content": prompt,
},
],
temperature=1.0,
)
content = response.choices[0].message.content
# 提取JSON
json_match = re.search(r'\{.*\}', content, re.DOTALL)
if json_match:
return json.loads(json_match.group(0))
else:
raise ValueError(f"Failed to parse JSON from response: {content}")
# ============================================================================
# Step 3: 生成物体列表
# ============================================================================
def generate_object_list(client, task_description: str, layout_criteria: str,
room_size: Dict, model_name: str = "gpt-4o_2024-11-20") -> Dict:
"""
根据任务描述和布局标准生成物体列表
Returns:
物体字典,格式: {"object_name": {"count": int, "types": int, "description": str}}
"""
room_size_str = f"{room_size['width']}m x {room_size['length']}m (area: {room_size.get('area', room_size['width']*room_size['length']):.1f} sq m)"
object_list_prompt = """
Given a room description and layout criteria, generate a comprehensive list of objects to be placed.
Requirements:
1. Object names should be 2-3 words, specific and concise (e.g., "wooden chair", "desk lamp")
2. Only singular objects (use "chair" not "chairs")
3. Ensure rooms have seating and surfaces (tables, counters, etc.)
4. Include at least 10-15 relevant objects
5. Each object needs:
- "count": number of instances
- "types": number of different variations
- "description": Detailed 1-2 sentence description for 3D asset retrieval
Task description: TASK_DESCRIPTION
Layout criteria: LAYOUT_CRITERIA
Room size: ROOM_SIZE
Return JSON format like:
{
"desk": {
"count": 1,
"types": 1,
"description": "A rectangular wooden desk with drawers and a smooth work surface"
},
"office_chair": {
"count": 1,
"types": 1,
"description": "An ergonomic office chair with adjustable height and back support"
}
}
Return only the JSON object.
"""
prompt = object_list_prompt.replace("TASK_DESCRIPTION", task_description)
prompt = prompt.replace("LAYOUT_CRITERIA", layout_criteria)
prompt = prompt.replace("ROOM_SIZE", room_size_str)
response = client.chat.completions.create(
model=model_name,
messages=[
{
"role": "system",
"content": "You are an expert interior designer generating object lists. Always return valid JSON.",
},
{
"role": "user",
"content": prompt,
},
],
temperature=1.0,
)
content = response.choices[0].message.content
json_match = re.search(r'\{.*\}', content, re.DOTALL)
if json_match:
return json.loads(json_match.group(0))
else:
raise ValueError(f"Failed to parse object list JSON: {content}")
# ============================================================================
# Step 4: 检索3D Assets (使用OpenShape embeddings)
# ============================================================================
# 全局缓存,避免重复加载
_CLIP_MODEL_CACHE = None
_OPENSHAPE_DATA_CACHE = None
def load_openclip_model():
"""加载CLIP模型 (使用缓存)"""
global _CLIP_MODEL_CACHE
if _CLIP_MODEL_CACHE is not None:
return _CLIP_MODEL_CACHE
print(" 加载CLIP模型...")
half = torch.float16 if torch.cuda.is_available() else torch.bfloat16
clip_model = transformers.CLIPModel.from_pretrained(
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k",
low_cpu_mem_usage=True,
torch_dtype=half,
offload_state_dict=True,
)
clip_prep = transformers.CLIPProcessor.from_pretrained(
"laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
)
if torch.cuda.is_available():
clip_model.cuda()
_CLIP_MODEL_CACHE = (clip_model, clip_prep)
print(" ✓ CLIP模型加载完成")
return clip_model, clip_prep
def load_openshape_embeddings():
"""加载OpenShape预计算的embeddings (使用缓存)"""
global _OPENSHAPE_DATA_CACHE
if _OPENSHAPE_DATA_CACHE is not None:
return _OPENSHAPE_DATA_CACHE
print(" 加载OpenShape embeddings...")
# 加载元数据
meta = json.load(
open(hf_hub_download(
"OpenShape/openshape-objaverse-embeddings",
"objaverse_meta.json",
token=True,
repo_type='dataset',
local_dir="OpenShape-Embeddings"
))
)
meta = {x['u']: x for x in meta['entries']}
# 加载embeddings
deser = torch.load(
hf_hub_download(
"OpenShape/openshape-objaverse-embeddings",
"objaverse.pt",
token=True,
repo_type='dataset',
local_dir="OpenShape-Embeddings"
),
map_location='cpu'
)
_OPENSHAPE_DATA_CACHE = (meta, deser['us'], deser['feats'])
print(" ✓ OpenShape embeddings加载完成")
return _OPENSHAPE_DATA_CACHE
def retrieve_single_object(description: str, top: int = 5, sim_th: float = 0.1,
face_max: int = 100000, asset_dir: str = None) -> List[Dict]:
"""
使用OpenShape检索单个物体
Args:
description: 物体描述
top: 返回top-K个结果
sim_th: 相似度阈值
face_max: 最大面数限制
asset_dir: 本地asset目录 (如果指定,只返回本地存在的)
Returns:
匹配的物体列表
"""
# 加载模型和数据
clip_model, clip_prep = load_openclip_model()
meta, us, feats = load_openshape_embeddings()
# 文本预处理
text = re.sub(r'\d', '', description).replace("_", " ")
# 编码文本
device = clip_model.device
tn = clip_prep(
text=[text],
return_tensors='pt',
truncation=True,
max_length=76
).to(device)
with torch.no_grad():
enc = clip_model.get_text_features(**tn).float().cpu()
# 计算相似度
sims = []
embedding = F.normalize(enc.detach().cpu(), dim=-1).squeeze()
for chunk in torch.split(feats, 10240):
sims.append(embedding @ F.normalize(chunk.float(), dim=-1).T)
sims = torch.cat(sims)
sims, idx = torch.sort(sims, descending=True)
# 应用阈值
sim_mask = sims > sim_th
sims = sims[sim_mask]
idx = idx[sim_mask]
# 收集结果并过滤
results = []
available_assets = None
if asset_dir and os.path.exists(asset_dir):
available_assets = set(os.listdir(asset_dir))
for i, sim in zip(idx, sims):
uid = us[i]
if uid in meta:
obj_meta = meta[uid]
# 过滤条件
if obj_meta['faces'] > face_max:
continue
# 如果指定了本地目录,只返回本地存在的
if available_assets and uid not in available_assets:
continue
results.append({
'uid': uid,
'similarity': float(sim),
'name': obj_meta['name'],
'faces': obj_meta['faces'],
'tags': obj_meta.get('tags', []),
'categories': obj_meta.get('cats', [])
})
if len(results) >= top:
break
return results
def retrieve_3d_assets(object_list: Dict, asset_dir: str,
use_openshape: bool = True, max_scan: int = 1000) -> Dict:
"""
为每个物体检索对应的3D asset
Args:
object_list: 物体列表字典
asset_dir: 已处理的Objaverse assets目录
use_openshape: 是否使用OpenShape检索 (False则随机选择)
max_scan: 随机模式下最多扫描多少个asset
Returns:
assets字典,格式符合benchmark_tasks的要求
"""
print("\n🔍 检索3D Assets...")
if not use_openshape:
print(" 使用随机选择模式...")
return retrieve_3d_assets_random(object_list, asset_dir, max_scan)
print(" 使用OpenShape语义检索...")
assets = {}
asset_counter = 0
for obj_name, obj_info in object_list.items():
description = obj_info.get("description", obj_name)
count = obj_info.get("count", 1)
types_needed = obj_info.get("types", 1)
print(f" 检索: {obj_name} (需要{types_needed}种类型, 共{count}个实例)")
print(f" 描述: {description}")
try:
# 检索更多候选以确保多样性
candidates = retrieve_single_object(
description,
top=max(10, types_needed * 3),
sim_th=0.1,
face_max=100000,
asset_dir=asset_dir
)
if not candidates:
print(f" ⚠️ 未找到匹配的资产,跳过 {obj_name}")
continue
# 选择多样性的资产
selected = candidates[:types_needed]
print(f" ✓ 找到 {len(selected)} 个匹配:")
for s in selected:
print(f" - {s['name']} (相似度: {s['similarity']:.3f}, 面数: {s['faces']})")
# 生成实例
for type_idx, asset_info in enumerate(selected):
asset_id = asset_info['uid']
for instance_idx in range(count):
if types_needed == 1:
key = f"{asset_id}-{instance_idx}"
else:
key = f"{asset_id}-{type_idx}-{instance_idx}"
assets[key] = {}
asset_counter += 1
except Exception as e:
print(f" ⚠️ 检索失败: {e}")
continue
print(f"✅ 已检索 {asset_counter} 个asset实例 (共{len(assets)}个唯一资产)")
return assets
def retrieve_3d_assets_random(object_list: Dict, asset_dir: str, max_scan: int = 1000) -> Dict:
"""
随机选择模式 (备用方案)
"""
assets = {}
# 列出asset_dir中的所有可用assets
available_assets = []
if os.path.exists(asset_dir):
print(f" 扫描asset目录 (最多{max_scan}个)...")
all_items = os.listdir(asset_dir)
count = 0
for asset_id in all_items:
if count >= max_scan:
break
asset_path = os.path.join(asset_dir, asset_id)
if os.path.isdir(asset_path) and os.path.exists(os.path.join(asset_path, "data.json")):
available_assets.append(asset_id)
count += 1
if not available_assets:
raise ValueError(f"No assets found in {asset_dir}")
print(f" 找到 {len(available_assets)} 个可用assets")
# 随机分配
import random
asset_counter = 0
for obj_name, obj_info in object_list.items():
count = obj_info.get("count", 1)
types_needed = obj_info.get("types", 1)
selected_assets = random.sample(available_assets, min(types_needed, len(available_assets)))
for type_idx, asset_id in enumerate(selected_assets):
for instance_idx in range(count):
key = f"{asset_id}-{type_idx}-{instance_idx}"
assets[key] = {}
asset_counter += 1
print(f"✅ 已分配 {asset_counter} 个asset实例")
return assets
# ============================================================================
# Step 5: 生成完整的Scene JSON
# ============================================================================
def generate_scene_json(task_description: str, layout_criteria: str,
room_size: Dict, assets: Dict) -> Dict:
"""
生成完整的scene JSON文件(类似benchmark_tasks格式)
"""
width = room_size['width']
length = room_size['length']
height = room_size.get('height', 2.4)
scene_json = {
"task_description": task_description,
"layout_criteria": layout_criteria,
"boundary": {
"floor_vertices": [
[0, 0, 0],
[width, 0, 0],
[width, length, 0],
[0, length, 0]
],
"wall_height": height
},
"assets": assets
}
return scene_json
# ============================================================================
# Step 6 & 7: 使用LayoutVLM优化布局并渲染
# ============================================================================
def prepare_task_assets(task, asset_dir):
"""准备assets元数据(从main.py复制)"""
if "layout_criteria" not in task:
task["layout_criteria"] = "the layout should follow the task description and adhere to common sense"
all_data = collections.defaultdict(list)
for original_uid in task["assets"].keys():
uid = '-'.join(original_uid.split('-')[:-1])
data_path = os.path.join(asset_dir, uid, "data.json")
if not os.path.exists(data_path):
print(f"⚠️ Warning: Asset data not found for {uid}")
continue
with open(data_path, "r") as f:
data = json.load(f)
data['path'] = os.path.join(asset_dir, uid, f"{uid}.glb")
all_data[uid].append(data)
category_count = collections.defaultdict(int)
for uid, duplicated_assets in all_data.items():
category_var_name = duplicated_assets[0]['annotations']['category']
category_var_name = category_var_name.replace('-', "_").replace(" ", "_").replace("'", "_").replace("/", "_").replace(",", "_").lower()
category_count[category_var_name] += 1
task["assets"] = {}
category_idx = collections.defaultdict(int)
for uid, duplicated_assets in all_data.items():
category_var_name = duplicated_assets[0]['annotations']['category']
category_var_name = category_var_name.replace('-', "_").replace(" ", "_").replace("'", "_").replace("/", "_").replace(",", "_").lower()
category_idx[category_var_name] += 1
for instance_idx, data in enumerate(duplicated_assets):
category_var_name_final = f"{category_var_name}_{chr(ord('A') + category_idx[category_var_name]-1)}" if category_count[category_var_name] > 1 else category_var_name
var_name = f"{category_var_name_final}_{instance_idx}" if len(duplicated_assets) > 1 else category_var_name_final
task["assets"][f"{category_var_name_final}-{instance_idx}"] = {
"uid": uid,
"count": len(duplicated_assets),
"instance_var_name": var_name,
"asset_var_name": category_var_name_final,
"instance_idx": instance_idx,
"annotations": data["annotations"],
"category": data["annotations"]["category"],
'description': data['annotations']['description'],
'path': data['path'],
'onCeiling': data['annotations']['onCeiling'],
'onFloor': data['annotations']['onFloor'],
'onWall': data['annotations']['onWall'],
'onObject': data['annotations']['onObject'],
'frontView': data['annotations']['frontView'],
'assetMetadata': {
"boundingBox": {
"x": float(data['assetMetadata']['boundingBox']['y']),
"y": float(data['assetMetadata']['boundingBox']['x']),
"z": float(data['assetMetadata']['boundingBox']['z'])
},
}
}
return task
def render_final_scene(scene_config: Dict, layout_json: Dict, save_dir: str):
"""
渲染最终的3D场景(包含所有物体)
生成两个版本:
1. 带标注版本 - 包含坐标、物体标签等 (用于调试)
2. 干净版本 - 纯3D场景,无任何标注 (用于最终展示)
"""
print("\n🎨 渲染最终3D场景...")
output_dir = os.path.join(save_dir, "final_rendering")
os.makedirs(output_dir, exist_ok=True)
try:
# 1. 渲染带标注的版本
print(" 📊 渲染带标注版本...")
annotated_images, visual_marks = render_existing_scene(
placed_assets=layout_json,
task=scene_config,
save_dir=output_dir,
high_res=True,
render_top_down=True,
add_coordinate_mark=True,
annotate_object=True,
annotate_wall=True,
combine_obj_components=True,
fov_multiplier=1.3,
topdown_save_file=os.path.join(output_dir, 'scene_top_down_annotated.png'),
sideview_save_file=os.path.join(output_dir, 'scene_side_view_annotated.png'),
)
reset_blender()
# 2. 渲染干净的版本
print(" 🎬 渲染干净版本...")
clean_images, _ = render_existing_scene(
placed_assets=layout_json,
task=scene_config,
save_dir=output_dir,
high_res=True,
render_top_down=True,
add_coordinate_mark=False, # 不添加坐标标记
annotate_object=False, # 不标注物体
annotate_wall=False, # 不标注墙壁
combine_obj_components=True,
fov_multiplier=1.3,
topdown_save_file=os.path.join(output_dir, 'scene_top_down_clean.png'),
sideview_save_file=os.path.join(output_dir, 'scene_side_view_clean.png'),
)
reset_blender()
all_images = annotated_images + clean_images
print(f"\n✅ 最终渲染完成! 图片保存在: {output_dir}")
print(f" 📊 带标注版本 ({len(annotated_images)} 张):")
for img_path in annotated_images:
print(f" - {os.path.basename(img_path)}")
print(f" 🎬 干净版本 ({len(clean_images)} 张):")
for img_path in clean_images:
print(f" - {os.path.basename(img_path)}")
return all_images
except Exception as e:
print(f"❌ 渲染失败: {e}")
import traceback
traceback.print_exc()
return []
# ============================================================================
# Main Pipeline
# ============================================================================
def main():
parser = argparse.ArgumentParser(description="端到端Pipeline: 从用户文本到3D场景渲染")
parser.add_argument("--user_text", type=str, required=True,
help="用户的房间设计描述文本")
parser.add_argument("--asset_dir", type=str,
default="/home/v-meiszhang/backup/objaverse_processed",
help="Objaverse处理后的assets目录")
parser.add_argument("--save_dir", type=str,
default="./results/end_to_end_output",
help="输出目录")
parser.add_argument("--skip_layout_optimization", action="store_true",
help="跳过布局优化,仅生成scene JSON")
parser.add_argument("--random_selection", action="store_true",
help="使用随机选择模式 (默认使用OpenShape语义检索)")
args = parser.parse_args()
# 创建输出目录
os.makedirs(args.save_dir, exist_ok=True)
print("=" * 80)
print("🚀 启动端到端Pipeline")
print("=" * 80)
print(f"📝 用户描述: {args.user_text}")
print(f"📁 Assets目录: {args.asset_dir}")
print(f"💾 输出目录: {args.save_dir}")
print()
# Step 1: 设置Azure客户端
print("Step 1: 设置Azure OpenAI客户端...")
client = setup_azure_client()
print("✅ 客户端设置完成\n")
# Step 2: 生成房间规格
print("Step 2: 生成房间规格和布局标准...")
room_spec = generate_room_specification(client, args.user_text)
print(f"✅ 房间规格:")
print(f" 任务描述: {room_spec['task_description']}")
print(f" 尺寸: {room_spec['room_size']}")
print(f" 布局标准: {room_spec['layout_criteria'][:100]}...\n")
# Step 3: 生成物体列表
print("Step 3: 生成物体列表...")
object_list = generate_object_list(
client,
room_spec['task_description'],
room_spec['layout_criteria'],
room_spec['room_size']
)
print(f"✅ 生成了 {len(object_list)} 种物体:")
for obj_name, obj_info in list(object_list.items())[:5]:
print(f" - {obj_name}: {obj_info['count']}个")
if len(object_list) > 5:
print(f" ... 还有 {len(object_list)-5} 种物体\n")
# Step 4: 检索3D Assets
print("Step 4: 检索3D Assets...")
use_openshape = not args.random_selection # 默认使用OpenShape,除非指定--random_selection
assets = retrieve_3d_assets(object_list, args.asset_dir, use_openshape=use_openshape)
# Step 5: 生成Scene JSON
print("\nStep 5: 生成Scene JSON...")
scene_json = generate_scene_json(
room_spec['task_description'],
room_spec['layout_criteria'],
room_spec['room_size'],
assets
)
scene_json_path = os.path.join(args.save_dir, "scene_config.json")
with open(scene_json_path, 'w') as f:
json.dump(scene_json, f, indent=2)
print(f"✅ Scene JSON已保存: {scene_json_path}\n")
if args.skip_layout_optimization:
print("⏭️ 跳过布局优化步骤")
return
# Step 6: 准备Assets并运行LayoutVLM
print("Step 6: 使用LayoutVLM优化布局...")
scene_config = prepare_task_assets(scene_json, args.asset_dir)
layout_solver = LayoutVLM(
mode="one_shot",
save_dir=args.save_dir,
asset_source="objaverse"
)
layout = layout_solver.solve(scene_config)
layout_path = os.path.join(args.save_dir, 'layout.json')
with open(layout_path, 'w') as f:
json.dump(layout, f, indent=2)
print(f"✅ 布局优化完成: {layout_path}\n")
# Step 7: 渲染最终场景
print("Step 7: 渲染最终3D场景...")
render_final_scene(scene_config, layout, args.save_dir)
print("\n" + "=" * 80)
print("🎉 Pipeline完成!")
print("=" * 80)
print(f"📁 所有输出文件在: {args.save_dir}")
print(f" - scene_config.json: 场景配置")
print(f" - layout.json: 优化后的布局")
print(f" - final_rendering/: 最终渲染图片")
print(f" - final.gif: 优化过程动画")
if __name__ == "__main__":
main()