LLFF / merge_finetine.py
SlekLi's picture
Upload merge_finetine.py
25169cb verified
import numpy as np
from plyfile import PlyData, PlyElement
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.transform import Rotation as R
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
import os
# ============================================================
# 以下为原始 merge 相关函数(保持不变)
# ============================================================
def read_ply(ply_path):
"""读取3DGS的.ply文件"""
plydata = PlyData.read(ply_path)
vertex = plydata['vertex']
positions = np.stack([vertex['x'], vertex['y'], vertex['z']], axis=1)
opacities = vertex['opacity'][:, np.newaxis]
scales = np.stack([vertex['scale_0'], vertex['scale_1'], vertex['scale_2']], axis=1)
rotations = np.stack([vertex['rot_0'], vertex['rot_1'], vertex['rot_2'], vertex['rot_3']], axis=1)
filter_3D = np.stack([vertex['filter_3D']], axis=1)
dc = np.stack([vertex['f_dc_0'], vertex['f_dc_1'], vertex['f_dc_2']], axis=1)
sh_keys = [key for key in vertex.data.dtype.names if key.startswith('f_rest_')]
if sh_keys:
sh_rest = np.stack([vertex[key] for key in sh_keys], axis=1)
else:
sh_rest = None
return {
'positions': positions,
'opacities': opacities,
'scales': scales,
'rotations': rotations,
'dc': dc,
'sh_rest': sh_rest,
'plydata': plydata,
'filter_3D': filter_3D
}
def quaternion_to_rotation_matrix(q):
try:
rot = R.from_quat(q)
except:
rot = R.from_quat([q[1], q[2], q[3], q[0]])
return rot.as_matrix()
def compute_covariance(rotation, scale_log):
R_mat = quaternion_to_rotation_matrix(rotation)
scale_actual = np.exp(scale_log)
S_mat = np.diag(scale_actual)
cov = R_mat @ S_mat @ S_mat.T @ R_mat.T
return cov
def covariance_to_rotation_scale(cov):
eigenvalues, eigenvectors = np.linalg.eigh(cov)
eigenvalues = np.maximum(eigenvalues, 1e-7)
scale = np.sqrt(eigenvalues)
if np.linalg.det(eigenvectors) < 0:
eigenvectors[:, 0] *= -1
rot = R.from_matrix(eigenvectors)
rotation = rot.as_quat() # [x,y,z,w]
return rotation, scale
def dc_to_rgb(dc):
C0 = 0.28209479177387814
rgb = dc * C0 + 0.5
return np.clip(rgb, 0, 1)
def rgb_to_dc(rgb):
C0 = 0.28209479177387814
dc = (rgb - 0.5) / C0
return dc
def build_octree(positions, max_points=5000):
cells = []
def subdivide(indices, bbox_min, bbox_max, depth=0):
if len(indices) <= max_points or depth > 10:
cells.append({'indices': indices, 'bbox_min': bbox_min, 'bbox_max': bbox_max})
return
center = (bbox_min + bbox_max) / 2
for i in range(8):
x_flag = i & 1
y_flag = (i >> 1) & 1
z_flag = (i >> 2) & 1
sub_min = np.array([
center[0] if x_flag else bbox_min[0],
center[1] if y_flag else bbox_min[1],
center[2] if z_flag else bbox_min[2]
])
sub_max = np.array([
bbox_max[0] if x_flag else center[0],
bbox_max[1] if y_flag else center[1],
bbox_max[2] if z_flag else center[2]
])
mask = np.all((positions[indices] >= sub_min) & (positions[indices] < sub_max), axis=1)
sub_indices = indices[mask]
if len(sub_indices) > 0:
subdivide(sub_indices, sub_min, sub_max, depth + 1)
bbox_min = positions.min(axis=0)
bbox_max = positions.max(axis=0)
subdivide(np.arange(len(positions)), bbox_min, bbox_max)
return cells
def build_knn_connectivity_graph(positions, k=10):
n_points = len(positions)
nbrs = NearestNeighbors(n_neighbors=min(k + 1, n_points), algorithm='kd_tree').fit(positions)
distances, indices = nbrs.kneighbors(positions)
row_indices, col_indices = [], []
for i in range(n_points):
for j in range(1, len(indices[i])):
neighbor_idx = indices[i][j]
row_indices.append(i);
col_indices.append(neighbor_idx)
row_indices.append(neighbor_idx);
col_indices.append(i)
data = np.ones(len(row_indices))
connectivity_matrix = csr_matrix((data, (row_indices, col_indices)), shape=(n_points, n_points))
return connectivity_matrix
def get_connected_clusters(labels, connectivity_matrix):
unique_labels = np.unique(labels)
refined_labels = labels.copy()
next_label = labels.max() + 1
for cluster_id in unique_labels:
cluster_mask = labels == cluster_id
cluster_indices = np.where(cluster_mask)[0]
if len(cluster_indices) <= 1:
continue
subgraph = connectivity_matrix[cluster_indices, :][:, cluster_indices]
n_components, component_labels = connected_components(subgraph, directed=False, return_labels=True)
if n_components > 1:
for comp_id in range(1, n_components):
comp_mask = component_labels == comp_id
comp_indices = cluster_indices[comp_mask]
refined_labels[comp_indices] = next_label
next_label += 1
return refined_labels
def cluster_and_merge_cell(data, cell_indices, bbox_min, bbox_max,
k_neighbors=5, spread_factor=0.01, aspect_ratio_threshold=5.0):
if len(cell_indices) < 4:
return None
n_clusters = max(1, len(cell_indices) // 2)
cell_positions = data['positions'][cell_indices]
cell_dc = data['dc'][cell_indices]
cell_opacities = data['opacities'][cell_indices]
cell_scales = data['scales'][cell_indices]
cell_rotations = data['rotations'][cell_indices]
cell_filter_3D = data['filter_3D'][cell_indices]
connectivity_matrix = build_knn_connectivity_graph(cell_positions, k=k_neighbors)
cell_size = np.maximum(bbox_max - bbox_min, 1e-6)
norm_positions = (cell_positions - bbox_min) / cell_size
rgb = dc_to_rgb(cell_dc)
features = np.concatenate([
norm_positions * np.sqrt(0.8),
rgb * np.sqrt(0.2)
], axis=1)
clustering = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward')
labels = clustering.fit_predict(features)
refined_labels = get_connected_clusters(labels, connectivity_matrix)
final_n_clusters = len(np.unique(refined_labels))
print(f" 原始簇数: {n_clusters}, 连通性约束后簇数: {final_n_clusters}")
merged_data = {
'positions': [], 'opacities': [], 'scales': [], 'rotations': [],
'dc': [], 'sh_rest': [] if data['sh_rest'] is not None else None,
'filter_3D': []
}
for cluster_id in np.unique(refined_labels):
cluster_mask = refined_labels == cluster_id
cluster_indices_in_cell = np.where(cluster_mask)[0]
if len(cluster_indices_in_cell) == 0:
continue
scale_actual = np.exp(cell_scales[cluster_indices_in_cell])
approximate_volumes = np.prod(scale_actual, axis=1, keepdims=True)
actual_opacities = 1.0 / (1.0 + np.exp(-cell_opacities[cluster_indices_in_cell]))
weights = actual_opacities * approximate_volumes
weights_sum = weights.sum()
normalized_weights = weights / weights_sum
merged_position = (cell_positions[cluster_indices_in_cell] * normalized_weights).sum(axis=0)
merged_dc = (cell_dc[cluster_indices_in_cell] * normalized_weights).sum(axis=0)
merged_filter_3D = (cell_filter_3D[cluster_indices_in_cell] * normalized_weights).sum(axis=0)
if data['sh_rest'] is not None:
sh_rest_cell = data['sh_rest'][cell_indices]
merged_sh_rest = (sh_rest_cell[cluster_indices_in_cell] * normalized_weights).sum(axis=0)
covariances = []
for idx in cluster_indices_in_cell:
cov = compute_covariance(cell_rotations[idx], cell_scales[idx])
covariances.append(cov)
covariances = np.array(covariances)
merged_cov = np.zeros((3, 3))
for i, idx in enumerate(cluster_indices_in_cell):
diff = cell_positions[idx] - merged_position
outer = np.outer(diff, diff)
merged_cov += normalized_weights[i, 0] * (covariances[i] + spread_factor * outer)
merged_rotation, merged_scale = covariance_to_rotation_scale(merged_cov)
max_scale = merged_scale.max()
min_scale = merged_scale.min()
current_ratio = max_scale / (min_scale + 1e-8)
if current_ratio > aspect_ratio_threshold:
target_max = min_scale * aspect_ratio_threshold
merged_scale = np.clip(merged_scale, None, target_max)
merged_opacity_actual = (cell_opacities[cluster_indices_in_cell] * normalized_weights).sum(axis=0)
merged_opacity_actual = np.clip(merged_opacity_actual, 1e-5, 1.0 - 1e-5)
merged_opacity = np.log(merged_opacity_actual / (1.0 - merged_opacity_actual))
merged_data['positions'].append(merged_position)
merged_data['opacities'].append(merged_opacity)
merged_data['scales'].append(merged_scale)
merged_data['rotations'].append(merged_rotation)
merged_data['dc'].append(merged_dc)
if data['sh_rest'] is not None:
merged_data['sh_rest'].append(merged_sh_rest)
merged_data['filter_3D'].append(merged_filter_3D)
for key in merged_data:
if merged_data[key] is not None and len(merged_data[key]) > 0:
merged_data[key] = np.array(merged_data[key])
return merged_data
def validate_data(merged_data):
print("\n" + "=" * 60)
print("数据验证报告")
print("=" * 60)
total_points = len(merged_data['positions'])
print(f"\n总点数: {total_points}")
for name, key, ndim in [("位置 (positions)", 'positions', 'multi'),
("不透明度 (opacities)", 'opacities', 'single'),
("缩放 (scales)", 'scales', 'multi'),
("旋转 (rotations)", 'rotations', 'multi'),
("DC系数 (f_dc)", 'dc', 'multi')]:
arr = merged_data[key]
if ndim == 'multi':
nan_c = np.isnan(arr).any(axis=1).sum()
inf_c = np.isinf(arr).any(axis=1).sum()
else:
nan_c = np.isnan(arr).sum()
inf_c = np.isinf(arr).sum()
print(f"\n【{name}】 NaN: {nan_c} Inf: {inf_c}")
has_nan = (np.isnan(merged_data['positions']).any(axis=1) |
np.isnan(merged_data['opacities']).ravel() |
np.isnan(merged_data['scales']).any(axis=1) |
np.isnan(merged_data['rotations']).any(axis=1) |
np.isnan(merged_data['dc']).any(axis=1))
has_inf = (np.isinf(merged_data['positions']).any(axis=1) |
np.isinf(merged_data['opacities']).ravel() |
np.isinf(merged_data['scales']).any(axis=1) |
np.isinf(merged_data['rotations']).any(axis=1) |
np.isinf(merged_data['dc']).any(axis=1))
print(f"\n包含NaN的点: {has_nan.sum()} 包含Inf的点: {has_inf.sum()}")
print("=" * 60 + "\n")
return {'has_nan': has_nan.sum(), 'has_inf': has_inf.sum(), 'total': total_points}
def save_ply(merged_data, original_plydata, output_path):
n_points = len(merged_data['positions'])
dtype_list = [
('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('opacity', 'f4'),
('scale_0', 'f4'), ('scale_1', 'f4'), ('scale_2', 'f4'),
('rot_0', 'f4'), ('rot_1', 'f4'), ('rot_2', 'f4'), ('rot_3', 'f4'),
('f_dc_0', 'f4'), ('f_dc_1', 'f4'), ('f_dc_2', 'f4'),
]
if merged_data['sh_rest'] is not None:
n_sh_rest = merged_data['sh_rest'].shape[1]
for i in range(n_sh_rest):
dtype_list.append((f'f_rest_{i}', 'f4'))
if 'filter_3D' in merged_data and merged_data['filter_3D'] is not None:
dtype_list.append(('filter_3D', 'f4'))
vertex_data = np.empty(n_points, dtype=dtype_list)
vertex_data['x'] = merged_data['positions'][:, 0]
vertex_data['y'] = merged_data['positions'][:, 1]
vertex_data['z'] = merged_data['positions'][:, 2]
vertex_data['opacity'] = merged_data['opacities'].flatten()
vertex_data['scale_0'] = np.log(merged_data['scales'][:, 0])
vertex_data['scale_1'] = np.log(merged_data['scales'][:, 1])
vertex_data['scale_2'] = np.log(merged_data['scales'][:, 2])
vertex_data['rot_0'] = merged_data['rotations'][:, 0]
vertex_data['rot_1'] = merged_data['rotations'][:, 1]
vertex_data['rot_2'] = merged_data['rotations'][:, 2]
vertex_data['rot_3'] = merged_data['rotations'][:, 3]
vertex_data['f_dc_0'] = merged_data['dc'][:, 0]
vertex_data['f_dc_1'] = merged_data['dc'][:, 1]
vertex_data['f_dc_2'] = merged_data['dc'][:, 2]
if merged_data['sh_rest'] is not None:
for i in range(n_sh_rest):
vertex_data[f'f_rest_{i}'] = merged_data['sh_rest'][:, i]
if 'filter_3D' in merged_data and merged_data['filter_3D'] is not None:
vertex_data['filter_3D'] = merged_data['filter_3D'].flatten()
PlyElement.describe(vertex_data, 'vertex')
PlyData([PlyElement.describe(vertex_data, 'vertex')]).write(output_path)
# ============================================================
# 新增:Fine-tuning 阶段
# 冻结位置,用下采样图像优化其余参数 500 epoch
# ============================================================
def finetune_merged_gaussians(
merged_ply_path,
source_path,
output_ply_path,
sh_degree=3,
num_epochs=500,
lr_opacity=0.05,
lr_scaling=0.005,
lr_rotation=0.001,
lr_features_dc=0.0025,
lr_features_rest=0.000125,
white_background=False,
kernel_size=0.1,
gpu_id=0,
log_interval=50,
):
"""
冻结高斯点的位置,用下采样训练图像对其余参数做fine-tuning。
参数:
merged_ply_path : merge 输出的 PLY 文件路径
source_path : 下采样图像的 COLMAP 数据集根目录
(应包含 sparse/ 和 images/ ,images/ 里是你已下采样好的图像)
output_ply_path : fine-tuning 完成后保存的 PLY 路径
sh_degree : 球谐阶数,需与 merge 时一致
num_epochs : 优化轮数,默认 500
lr_* : 各参数学习率,与原版 3DGS 训练保持同量级
white_background: 背景颜色
kernel_size : 渲染时的 kernel size(与你的渲染脚本一致)
gpu_id : 使用的 GPU 编号
log_interval : 每隔多少 epoch 打印一次 loss
"""
import torch
import torch.nn.functional as F
from scene import Scene
from gaussian_renderer import render, GaussianModel
from scene.dataset_readers import sceneLoadTypeCallbacks
from utils.camera_utils import loadCam
from utils.loss_utils import l1_loss, ssim
device = f'cuda:{gpu_id}'
torch.cuda.set_device(device)
bg_color = [1, 1, 1] if white_background else [0, 0, 0]
background = torch.tensor(bg_color, dtype=torch.float32, device=device)
# ---- 1. 加载 merge 后的高斯模型 ----
print("\n[Fine-tune] 加载 merge 后的高斯模型...")
gaussians = GaussianModel(sh_degree)
gaussians.load_ply(merged_ply_path)
print(f"[Fine-tune] 高斯点数: {gaussians.get_xyz.shape[0]}")
# ---- 2. 冻结位置,只保留其他参数的梯度 ----
# GaussianModel 内部用 _xyz / _features_dc / _features_rest /
# _scaling / _rotation / _opacity 存储(均为 nn.Parameter)
gaussians._xyz.requires_grad_(False)
# 构建优化器,只包含非位置参数
param_groups = [
{'params': [gaussians._features_dc], 'lr': lr_features_dc, 'name': 'f_dc'},
{'params': [gaussians._features_rest], 'lr': lr_features_rest, 'name': 'f_rest'},
{'params': [gaussians._opacity], 'lr': lr_opacity, 'name': 'opacity'},
{'params': [gaussians._scaling], 'lr': lr_scaling, 'name': 'scaling'},
{'params': [gaussians._rotation], 'lr': lr_rotation, 'name': 'rotation'},
]
optimizer = torch.optim.Adam(param_groups, eps=1e-15)
# ---- 3. 读取下采样图像的相机列表 ----
print("[Fine-tune] 读取相机信息...")
if os.path.exists(os.path.join(source_path, "sparse")):
scene_info = sceneLoadTypeCallbacks["Colmap"](
source_path, "images", eval=False, resolution=1
)
elif os.path.exists(os.path.join(source_path, "transforms_train.json")):
scene_info = sceneLoadTypeCallbacks["Blender"](
source_path, white_background, eval=False, resolution=1
)
else:
raise ValueError(f"[Fine-tune] 无法识别数据集格式: {source_path}")
cam_infos = scene_info.train_cameras
print(f"[Fine-tune] 训练相机数量: {len(cam_infos)}")
# 预先把所有相机加载到内存(含 GT 图像)
class _LoadArgs:
resolution = 1
data_device = device
cameras = []
for i, ci in enumerate(cam_infos):
try:
cam = loadCam(_LoadArgs(), i, ci, 1.0, load_image=True)
cameras.append(cam)
except Exception as e:
print(f"[Fine-tune] 跳过相机 {i}: {e}")
if len(cameras) == 0:
raise RuntimeError("[Fine-tune] 没有可用的训练相机,请检查 source_path。")
# pipeline 设置(与你原有渲染脚本保持一致)
class _Pipeline:
convert_SHs_python = False
compute_cov3D_python = False
debug = False
pipeline = _Pipeline()
# ---- 4. Fine-tuning 主循环 ----
print(f"\n[Fine-tune] 开始优化,共 {num_epochs} epochs,{len(cameras)} 张图像...")
lambda_dssim = 0.2 # L1 + 0.2 * (1 - SSIM),与原版 3DGS 一致
import random
for epoch in range(1, num_epochs + 1):
# 每个 epoch 随机打乱相机顺序,逐张渲染并回传梯度
random.shuffle(cameras)
epoch_loss = 0.0
for cam in cameras:
optimizer.zero_grad()
# 渲染
render_pkg = render(cam, gaussians, pipeline, background, kernel_size=kernel_size)
rendered = render_pkg["render"] # (3, H, W)
# GT 图像:Camera 对象上的 original_image,已是 [0,1] float tensor
gt = cam.original_image.to(device) # (3, H, W)
# 确保尺寸一致(下采样图像与渲染尺寸应相同,以防万一做一次 resize)
if rendered.shape != gt.shape:
gt = F.interpolate(
gt.unsqueeze(0),
size=(rendered.shape[1], rendered.shape[2]),
mode='bilinear',
align_corners=False
).squeeze(0)
# 损失:L1 + D-SSIM
Ll1 = l1_loss(rendered, gt)
loss = (1.0 - lambda_dssim) * Ll1 + lambda_dssim * (1.0 - ssim(rendered, gt))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if epoch % log_interval == 0 or epoch == 1:
avg_loss = epoch_loss / len(cameras)
print(f"[Fine-tune] Epoch {epoch:4d}/{num_epochs} avg_loss={avg_loss:.6f}")
# ---- 5. 保存 fine-tuned PLY ----
print(f"\n[Fine-tune] 优化完成,保存至 {output_ply_path} ...")
os.makedirs(os.path.dirname(os.path.abspath(output_ply_path)), exist_ok=True)
gaussians.save_ply(output_ply_path)
print("[Fine-tune] 保存完成。")
# ============================================================
# 主流程
# ============================================================
def merge_and_finetune(
ply_path,
output_path,
# merge 参数
k_neighbors=5,
spread_factor=0.0,
aspect_ratio_threshold=15.0,
# fine-tune 参数
do_finetune=True,
source_path=None,
finetuned_output_path=None,
sh_degree=3,
num_epochs=500,
lr_opacity=0.05,
lr_scaling=0.005,
lr_rotation=0.001,
lr_features_dc=0.0025,
lr_features_rest=0.000125,
white_background=False,
kernel_size=0.1,
gpu_id=0,
log_interval=50,
):
"""
完整流程:merge -> (可选) fine-tune
参数:
ply_path : 原始 3DGS PLY 文件
output_path : merge 后 PLY 的保存路径
do_finetune : 是否执行 fine-tuning 阶段
source_path : 下采样图像的 COLMAP 数据集目录(do_finetune=True 时必填)
finetuned_output_path : fine-tuning 后 PLY 的保存路径
(默认在 output_path 同目录下加 _finetuned 后缀)
"""
# ---------- Step 1: Merge ----------
print("=" * 60)
print("Step 1: Merge 高斯点")
print("=" * 60)
print("读取PLY文件...")
data = read_ply(ply_path)
n_original = len(data['positions'])
print(f"原始高斯点数: {n_original}")
print("构建八叉树...")
cells = build_octree(data['positions'], max_points=5000)
print(f"划分为 {len(cells)} 个cells")
print("对每个cell进行聚类和合并...")
all_merged_data = {
'positions': [], 'opacities': [], 'scales': [], 'rotations': [],
'dc': [], 'sh_rest': [] if data['sh_rest'] is not None else None,
'filter_3D': []
}
for i, cell in enumerate(cells):
if i % 100 == 0:
print(f"处理进度: {i}/{len(cells)}")
merged = cluster_and_merge_cell(
data, cell['indices'], cell['bbox_min'], cell['bbox_max'],
k_neighbors=k_neighbors,
spread_factor=spread_factor,
aspect_ratio_threshold=aspect_ratio_threshold
)
if merged is not None:
for key in all_merged_data:
if all_merged_data[key] is not None and len(merged[key]) > 0:
all_merged_data[key].append(merged[key])
print("合并所有cell的结果...")
final_data = {}
for key in all_merged_data:
if all_merged_data[key] is not None and len(all_merged_data[key]) > 0:
final_data[key] = np.concatenate(all_merged_data[key], axis=0)
n_merged = len(final_data['positions'])
print(f"合并后高斯点数: {n_merged}")
print(f"压缩率: {n_merged / n_original * 100:.2f}%")
validation_result = validate_data(final_data)
if validation_result['has_nan'] > 0 or validation_result['has_inf'] > 0:
print(f"\n⚠️ 警告: 发现 {validation_result['has_nan']} 个NaN和 "
f"{validation_result['has_inf']} 个Inf!")
print("保存 merge 后的PLY文件...")
os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
save_ply(final_data, data['plydata'], output_path)
print(f"Merge PLY 已保存到: {output_path}")
# ---------- Step 2: Fine-tune ----------
if not do_finetune:
print("\ndo_finetune=False,跳过 fine-tuning 阶段。")
return
if source_path is None:
raise ValueError("do_finetune=True 时必须提供 source_path(下采样图像的 COLMAP 目录)")
if finetuned_output_path is None:
base, ext = os.path.splitext(output_path)
finetuned_output_path = base + "_finetuned" + ext
print("\n" + "=" * 60)
print("Step 2: Fine-tune(冻结位置,优化其余参数)")
print("=" * 60)
finetune_merged_gaussians(
merged_ply_path=output_path,
source_path=source_path,
output_ply_path=finetuned_output_path,
sh_degree=sh_degree,
num_epochs=num_epochs,
lr_opacity=lr_opacity,
lr_scaling=lr_scaling,
lr_rotation=lr_rotation,
lr_features_dc=lr_features_dc,
lr_features_rest=lr_features_rest,
white_background=white_background,
kernel_size=kernel_size,
gpu_id=gpu_id,
log_interval=log_interval,
)
print("\n✅ 全流程完成!")
print(f" Merge PLY : {output_path}")
print(f" Fine-tuned PLY : {finetuned_output_path}")
# ============================================================
# 入口
# ============================================================
if __name__ == "__main__":
# ---------- 路径配置 ----------
input_ply = "merge/original_3dgs.ply"
merged_ply = "low_results/output_merged.ply"
finetuned_ply = "low_results/output_finetuned.ply"
# 你提供的下采样图像对应的 COLMAP 数据集目录
# 该目录下需有 sparse/ 和 images/(images/ 里存放下采样后的训练图像)
downsampled_source = "dataset/downsampled"
merge_and_finetune(
# merge 参数
ply_path=input_ply,
output_path=merged_ply,
k_neighbors=5,
spread_factor=0.0,
aspect_ratio_threshold=15.0,
# fine-tune 开关与参数
do_finetune=True,
source_path=downsampled_source,
finetuned_output_path=finetuned_ply,
sh_degree=3,
num_epochs=500,
lr_opacity=0.05,
lr_scaling=0.005,
lr_rotation=0.001,
lr_features_dc=0.0025,
lr_features_rest=0.000125,
white_background=False,
kernel_size=0.1,
gpu_id=0,
log_interval=50,
)