LLFF / merge_gs.py
SlekLi's picture
Upload merge_gs.py
d2a9a7f verified
import numpy as np
from plyfile import PlyData, PlyElement
from sklearn.cluster import AgglomerativeClustering
from scipy.spatial.transform import Rotation as R
import os
def read_ply(ply_path):
"""读取3DGS的.ply文件"""
plydata = PlyData.read(ply_path)
vertex = plydata['vertex']
# 提取基本属性
positions = np.stack([vertex['x'], vertex['y'], vertex['z']], axis=1)
# 提取不透明度
opacities = vertex['opacity'][:, np.newaxis]
# 提取scale (3个轴)
scales = np.stack([vertex['scale_0'], vertex['scale_1'], vertex['scale_2']], axis=1)
# 提取rotation (四元数 wxyz或xyzw,需要确认格式)
rotations = np.stack([vertex['rot_0'], vertex['rot_1'], vertex['rot_2'], vertex['rot_3']], axis=1)
# 提取DC系数 (f_dc_0, f_dc_1, f_dc_2 对应RGB)
dc = np.stack([vertex['f_dc_0'], vertex['f_dc_1'], vertex['f_dc_2']], axis=1)
# 提取高阶SH系数 (假设存在)
sh_keys = [key for key in vertex.data.dtype.names if key.startswith('f_rest_')]
if sh_keys:
sh_rest = np.stack([vertex[key] for key in sh_keys], axis=1)
else:
sh_rest = None
return {
'positions': positions,
'opacities': opacities,
'scales': scales,
'rotations': rotations,
'dc': dc,
'sh_rest': sh_rest,
'plydata': plydata # 保存原始数据用于后续保存
}
def quaternion_to_rotation_matrix(q):
"""四元数转旋转矩阵,假设q是[w,x,y,z]或[x,y,z,w]格式"""
# 尝试两种常见格式
try:
# 格式1: [x,y,z,w]
rot = R.from_quat(q)
except:
# 格式2: [w,x,y,z]
rot = R.from_quat([q[1], q[2], q[3], q[0]])
return rot.as_matrix()
def compute_covariance(rotation, scale):
"""从旋转和缩放计算协方差矩阵
Σ = R * S * S^T * R^T
"""
R_mat = quaternion_to_rotation_matrix(rotation)
S_mat = np.diag(scale)
cov = R_mat @ S_mat @ S_mat.T @ R_mat.T
return cov
def covariance_to_rotation_scale(cov):
"""从协方差矩阵分解得到旋转和缩放
使用特征值分解: Σ = V * Λ * V^T
其中 V 是旋转, sqrt(Λ) 是缩放
"""
# 特征值分解
eigenvalues, eigenvectors = np.linalg.eigh(cov)
# 确保特征值为正
eigenvalues = np.maximum(eigenvalues, 1e-7)
# 缩放是特征值的平方根
scale = np.sqrt(eigenvalues)
# 旋转矩阵是特征向量
# 确保是右手坐标系
if np.linalg.det(eigenvectors) < 0:
eigenvectors[:, 0] *= -1
# 转换为四元数
rot = R.from_matrix(eigenvectors)
rotation = rot.as_quat() # [x,y,z,w]
return rotation, scale
def dc_to_rgb(dc):
"""将DC系数转换为RGB (0阶球谐)"""
C0 = 0.28209479177387814
rgb = dc * C0 + 0.5
return np.clip(rgb, 0, 1)
def rgb_to_dc(rgb):
"""将RGB转换回DC系数"""
C0 = 0.28209479177387814
dc = (rgb - 0.5) / C0
return dc
def build_octree(positions, max_points=5000):
"""递归构建八叉树cell"""
cells = []
def subdivide(indices, bbox_min, bbox_max, depth=0):
if len(indices) <= max_points or depth > 10: # 添加最大深度限制
cells.append({
'indices': indices,
'bbox_min': bbox_min,
'bbox_max': bbox_max
})
return
# 计算中心点
center = (bbox_min + bbox_max) / 2
# 8个子空间
for i in range(8):
x_flag = i & 1
y_flag = (i >> 1) & 1
z_flag = (i >> 2) & 1
sub_min = np.array([
center[0] if x_flag else bbox_min[0],
center[1] if y_flag else bbox_min[1],
center[2] if z_flag else bbox_min[2]
])
sub_max = np.array([
bbox_max[0] if x_flag else center[0],
bbox_max[1] if y_flag else center[1],
bbox_max[2] if z_flag else center[2]
])
# 找到在该子空间内的点
mask = np.all((positions[indices] >= sub_min) & (positions[indices] < sub_max), axis=1)
sub_indices = indices[mask]
if len(sub_indices) > 0:
subdivide(sub_indices, sub_min, sub_max, depth + 1)
# 初始边界框
bbox_min = positions.min(axis=0)
bbox_max = positions.max(axis=0)
all_indices = np.arange(len(positions))
subdivide(all_indices, bbox_min, bbox_max)
return cells
def cluster_and_merge_cell(data, cell_indices, bbox_min, bbox_max):
"""对单个cell内的点进行聚类和合并"""
if len(cell_indices) < 4:
return None # 点数太少,不合并
# 计算聚类数量
n_clusters = max(1, len(cell_indices) // 4)
# 提取cell内的数据
positions = data['positions'][cell_indices]
dc = data['dc'][cell_indices]
opacities = data['opacities'][cell_indices]
scales = data['scales'][cell_indices]
rotations = data['rotations'][cell_indices]
# 计算cell的尺寸
cell_size = bbox_max - bbox_min
cell_size = np.maximum(cell_size, 1e-6) # 避免除零
# 归一化位置 (到[0,1])
norm_positions = (positions - bbox_min) / cell_size
# 将DC转为RGB并归一化到[0,1]
rgb = dc_to_rgb(dc)
# 构建聚类特征: [归一化位置 * 0.8权重, RGB * 0.2权重]
# 使用权重的平方根,因为距离计算会平方
features = np.concatenate([
norm_positions * np.sqrt(0.8),
rgb * np.sqrt(0.2)
], axis=1)
# 执行层次聚类
clustering = AgglomerativeClustering(
n_clusters=n_clusters,
linkage='ward'
)
labels = clustering.fit_predict(features)
# 合并每个簇
merged_data = {
'positions': [],
'opacities': [],
'scales': [],
'rotations': [],
'dc': [],
'sh_rest': [] if data['sh_rest'] is not None else None
}
for cluster_id in range(n_clusters):
cluster_mask = labels == cluster_id
cluster_indices = np.where(cluster_mask)[0]
if len(cluster_indices) == 0:
continue
# 计算合并权重: opacity * scale体积
volumes = scales[cluster_indices].prod(axis=1, keepdims=True)
weights = opacities[cluster_indices] * volumes
weights_sum = weights.sum()
normalized_weights = weights / weights_sum
# 加权平均位置
merged_position = (positions[cluster_indices] * normalized_weights).sum(axis=0)
# 加权平均DC
merged_dc = (dc[cluster_indices] * normalized_weights).sum(axis=0)
# 加权平均高阶SH
if data['sh_rest'] is not None:
sh_rest_cell = data['sh_rest'][cell_indices]
merged_sh_rest = (sh_rest_cell[cluster_indices] * normalized_weights).sum(axis=0)
# 计算混合协方差矩阵
covariances = []
for idx in cluster_indices:
cov = compute_covariance(rotations[idx], scales[idx])
covariances.append(cov)
covariances = np.array(covariances)
# Σ_new = Σ w_i * (Σ_i + (μ_i - μ_new)(μ_i - μ_new)^T) / Σ w_i
merged_cov = np.zeros((3, 3))
for i, idx in enumerate(cluster_indices):
diff = positions[idx] - merged_position
outer = np.outer(diff, diff)
merged_cov += normalized_weights[i, 0] * (covariances[i] + outer)
# 从协方差矩阵分解得到旋转和缩放
merged_rotation, merged_scale = covariance_to_rotation_scale(merged_cov)
# 质量守恒: opacity_new * volume_new = Σ(opacity_i * volume_i)
merged_volume = merged_scale.prod()
merged_opacity = weights_sum / merged_volume if merged_volume > 1e-10 else opacities[cluster_indices].mean()
merged_opacity = np.clip(merged_opacity, 0, 1)
# 保存合并结果
merged_data['positions'].append(merged_position)
merged_data['opacities'].append(merged_opacity)
merged_data['scales'].append(merged_scale)
merged_data['rotations'].append(merged_rotation)
merged_data['dc'].append(merged_dc)
if data['sh_rest'] is not None:
merged_data['sh_rest'].append(merged_sh_rest)
# 转换为numpy数组
for key in merged_data:
if merged_data[key] is not None and len(merged_data[key]) > 0:
merged_data[key] = np.array(merged_data[key])
return merged_data
def merge_gaussians(ply_path, output_path):
"""主函数: 读取、聚类、合并、保存"""
print("读取PLY文件...")
data = read_ply(ply_path)
n_original = len(data['positions'])
print(f"原始高斯点数: {n_original}")
print("构建八叉树...")
cells = build_octree(data['positions'], max_points=5000)
print(f"划分为 {len(cells)} 个cells")
print("对每个cell进行聚类和合并...")
all_merged_data = {
'positions': [],
'opacities': [],
'scales': [],
'rotations': [],
'dc': [],
'sh_rest': [] if data['sh_rest'] is not None else None
}
for i, cell in enumerate(cells):
if i % 100 == 0:
print(f"处理进度: {i}/{len(cells)}")
merged = cluster_and_merge_cell(
data,
cell['indices'],
cell['bbox_min'],
cell['bbox_max']
)
if merged is not None:
for key in all_merged_data:
if all_merged_data[key] is not None and len(merged[key]) > 0:
all_merged_data[key].append(merged[key])
# 合并所有cell的结果
print("合并所有cell的结果...")
final_data = {}
for key in all_merged_data:
if all_merged_data[key] is not None and len(all_merged_data[key]) > 0:
final_data[key] = np.concatenate(all_merged_data[key], axis=0)
n_merged = len(final_data['positions'])
print(f"合并后高斯点数: {n_merged}")
print(f"压缩率: {n_merged/n_original*100:.2f}%")
# 保存为PLY
print("保存PLY文件...")
save_ply(final_data, data['plydata'], output_path)
print(f"已保存到: {output_path}")
def save_ply(merged_data, original_plydata, output_path):
"""保存合并后的数据为PLY格式"""
n_points = len(merged_data['positions'])
# 构建新的顶点数据
dtype_list = [
('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('opacity', 'f4'),
('scale_0', 'f4'), ('scale_1', 'f4'), ('scale_2', 'f4'),
('rot_0', 'f4'), ('rot_1', 'f4'), ('rot_2', 'f4'), ('rot_3', 'f4'),
('f_dc_0', 'f4'), ('f_dc_1', 'f4'), ('f_dc_2', 'f4'),
]
# 添加高阶SH
if merged_data['sh_rest'] is not None:
n_sh_rest = merged_data['sh_rest'].shape[1]
for i in range(n_sh_rest):
dtype_list.append((f'f_rest_{i}', 'f4'))
vertex_data = np.empty(n_points, dtype=dtype_list)
# 填充数据
vertex_data['x'] = merged_data['positions'][:, 0]
vertex_data['y'] = merged_data['positions'][:, 1]
vertex_data['z'] = merged_data['positions'][:, 2]
vertex_data['opacity'] = merged_data['opacities'].flatten()
vertex_data['scale_0'] = merged_data['scales'][:, 0]
vertex_data['scale_1'] = merged_data['scales'][:, 1]
vertex_data['scale_2'] = merged_data['scales'][:, 2]
vertex_data['rot_0'] = merged_data['rotations'][:, 0]
vertex_data['rot_1'] = merged_data['rotations'][:, 1]
vertex_data['rot_2'] = merged_data['rotations'][:, 2]
vertex_data['rot_3'] = merged_data['rotations'][:, 3]
vertex_data['f_dc_0'] = merged_data['dc'][:, 0]
vertex_data['f_dc_1'] = merged_data['dc'][:, 1]
vertex_data['f_dc_2'] = merged_data['dc'][:, 2]
if merged_data['sh_rest'] is not None:
for i in range(n_sh_rest):
vertex_data[f'f_rest_{i}'] = merged_data['sh_rest'][:, i]
# 创建PLY元素
vertex_element = PlyElement.describe(vertex_data, 'vertex')
# 保存
PlyData([vertex_element]).write(output_path)
# 使用示例
if __name__ == "__main__":
input_ply = "input.ply" # 输入文件路径
output_ply = "output_merged.ply" # 输出文件路径
merge_gaussians(input_ply, output_ply)