[3D CV ์ฐ๊ตฌ] 3DGS self.xyz_gradient_accum
self.xyz_gradient_accum
์ self.get_xyz.shape[0]
๋ก point cloud์์ point์ ์์ธ n_points ๋งํผ ์ ์
๋ฉ๋๋ค.
# 3dgs/scene/gaussian_model.py
class GaussianModel:
...
@property
def get_xyz(self):
return self._xyz
...
def training_setup(self, training_args):
self.percent_dense = training_args.percent_dense
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
pruning์ optimizable_tensors์ ๋ํด ๊ฐ param_group์ maskingํ์ฌ ์ํํฉ๋๋ค.
self.xyz_gradient_accum
์prune_points()
์์valid_points_mask
๋ก masking์ด ๊ฐ๋ฅํฉ๋๋ค._prune_optimizer()
์์valid_points_mask
๋ก prune๋optimizable_tensors
๋ point cloud์์ point ์์ธn_point
์ ์์๋ ๋ณํ๊ฐ ์๊ณ ๋จ์ํ[mask]
ํ์ฌ 0์ผ๋ก ๋ง๋ญ๋๋ค.def _prune_optimizer(self, mask): optimizable_tensors = {} for group in self.optimizer.param_groups: stored_state = self.optimizer.state.get(group['params'][0], None) if stored_state is not None: stored_state["exp_avg"] = stored_state["exp_avg"][mask] stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask] del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True))) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def prune_points(self, mask): valid_points_mask = ~mask optimizable_tensors = self._prune_optimizer(valid_points_mask) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] self.denom = self.denom[valid_points_mask] self.max_radii2D = self.max_radii2D[valid_points_mask]
์๋ก์ด 3D gaussian properties์ธ tensor๋ฅผ ์ถ๊ฐํ ๋๋, ๊ธฐ์กด์ optimizer์ ๊ฐ properties "name"
์ ํด๋นํ๋๋ param_group๋ณ๋ก concatํ์ฌ ์ถ๊ฐํฉ๋๋ค.
self.xyz_gradient_accum
์densification_postfix()
์์cat_tensors_to_optimizer(d)
๋ก ํ์ฅ๋self._xyz
์ ํด๋นํ๋ point cloud์์ point ์์ธn_points + ์ถ๊ฐ๋ 3d gaussian ์๋งํผ ์ ์
๋ฉ๋๋ค.densification_postfix
์์new_xyz
,new_features_dc
,new_features_rest
,new_opacities
,new_scaling
,new_rotation
์ ํ์ต์ ์ถ๊ฐํ ํ์ต๊ฐ๋ฅํ tensor๋ก์จ ์ ์ํฉ๋๋ค- ์ด๋ ์๋ก ์ถ๊ฐ๋๋ 3d gaussian๋ค์ properties์ ํด๋นํฉ๋๋ค.
- ์ด๋ฅผ dictionary ํํ์ธ
d
์ key๊ฐ๋ค๋ก ์ ์ํ์ฌcat_tensors_to_optimizer
๋ก ๋๊ฒจ์ค๋๋ค. - ๊ธฐ์กด์
self.optimizer.param_groups
์์ ์๋ก ์ ์ํ 3d gaussian์ properties์ธ tensor์ธnew_xyz
,new_features_dc
,new_features_rest
,new_opacities
,new_scaling
,new_rotation
๋ฅผextension_tensor
๋ก ์ถ๊ฐํ ๋๋self.optimizer
์ ๋๊ฒจ์ค ๊ฐ param_group์"name"
์ ํด๋นํ๋"xyz"
,"f_dc"
,"f_rest"
,"opacity"
,"scaling"
,"rotation
โ์ concat ํฉ๋๋ค. - ์ฒ์์
"xyz"
,"f_dc"
,"f_rest"
,"opacity"
,"scaling"
,"rotation
โ๋l
๋ก ์ ์ํ์ฌ ๊ฐ param_group๋ณ๋ก"name"
์ ์ง์ด์ค ๊ฒ์ ํ์ธํ ์ ์์ต๋๋ค. - ์ฌ๊ธฐ์
'params'
๋ ๊ฐ param_group์ ํด๋นํฉ๋๋ค. - ์ฆ, ์ฐ๋ฆฌ๋ 3d gaussian์ properties๋ฅผ param_group ๋ณ๋ก ์ชผ๊ฐ์ด ํ์ต๋ฅ ์ ๋ค๋ฅด๊ฒ ์ค์ ํ์ฌ ํ์ตํ ์ ์์ต๋๋ค.
- ๊ทธ๋ฆฌ๊ณ ์๋ก์ด 3d gaussian properties๋ฅผ tensor์ธ
new_xyz
,new_features_dc
,new_features_rest
,new_opacities
,new_scaling
,new_rotation
๋ก ์ ์ํ์ฌ ์ฒ์์l
์ ๋๊ฒจ๋ฐ์ Adam optimizer์ param_group๋ณ๋ก concatํ์ฌ ์ถ๊ฐํด์ค๋๋ค. - ์ ๋ฆฌํ๋ฉด ๋ค์๊ณผ ๊ฐ์ต๋๋ค.
new_xyz
๋"name" : "xyz"
์ธ param_group์ ์ถ๊ฐnew_features_dc
๋"name" : "f_dc"
์ธ param_group์ ์ถ๊ฐnew_features_rest
๋"name" : "f_rest"
์ธ param_group์ ์ถ๊ฐnew_opacities
๋"name" : "opacity"
์ธ param_group์ ์ถ๊ฐnew_scaling
๋"name" : "scaling"
์ธ param_group์ ์ถ๊ฐnew_rotation
๋"name" : "rotation"
์ธ param_group์ ์ถ๊ฐ ```python3dgs/scene/gaussian_model.py
class GaussianModel:
โฆ
def training_setup(self, training_args):
self.percent_dense = training_args.percent_dense
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
l = [
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
]
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
โฆ
def cat_tensors_to_optimizer(self, tensors_dict):
optimizable_tensors = {}
for group in self.optimizer.param_groups:
assert len(group["params"]) == 1
extension_tensor = tensors_dict[group["name"]]
stored_state = self.optimizer.state.get(group['params'][0], None)
if stored_state is not None:
stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
del self.optimizer.state[group['params'][0]]
group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
self.optimizer.state[group['params'][0]] = stored_state
optimizable_tensors[group["name"]] = group["params"][0]
else:
group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
optimizable_tensors[group["name"]] = group["params"][0]
return optimizable_tensors
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
d = {"xyz": new_xyz,
"f_dc": new_features_dc,
"f_rest": new_features_rest,
"opacity": new_opacities,
"scaling" : new_scaling,
"rotation" : new_rotation}
optimizable_tensors = self.cat_tensors_to_optimizer(d)
โฆ
## ์ densification ํ์ xyz_gradient_accum์ ์ฌ์ ์ํ๋๊ฐ?
**`densification_postfix` ํจ์๋ ์๋ก์ด ์ ๋ค์ด ์ถ๊ฐ๋ ํ ์ด ์ ๋ค์ ์ตํฐ๋ง์ด์ ์ ์ถ๊ฐํ๊ณ , ๊ทธ๋๋์ธํธ ๋์ ๊ฐ๋ค์ 0์ผ๋ก ์ด๊ธฐํํฉ๋๋ค.**
`self.xyz_gradient_accum`์ ๋งค `densification` ๊ณผ์ ํ์ ์ฌ์ ์ํ์ฌ ์ต์ ํ๋ฅผ ์ํํ๋ ์ด์ ๋ ๊ทธ๋๋์ธํธ ๋์ ๊ฐ์ ์ด๊ธฐํํ๊ณ ์๋ก ์ถ๊ฐ๋ ์ ๋ค์ ๋ํด ์ฌ๋ฐ๋ฅธ ๊ทธ๋๋์ธํธ๋ฅผ ๊ณ์ฐํ๊ธฐ ์ํจ์
๋๋ค. `densification` ํ์ **์๋ก์ด ์ ๋ค์ด ์ถ๊ฐ๋๊ฑฐ๋ ๊ธฐ์กด ์ ๋ค์ด ์ ๊ฑฐ๋ ์ ์์ผ๋ฏ๋ก, ๊ธฐ์กด์ ๊ทธ๋๋์ธํธ ๋์ ๊ฐ์ ์ด๊ธฐํํ์ฌ ๋ค์ ๋จ๊ณ์์ ์ ํํ ๊ทธ๋๋์ธํธ๋ฅผ ๋ค์ ๋์ ํ ์ ์๊ฒ ํฉ๋๋ค.**
### 1. ์๋ก์ด ์ ์ถ๊ฐ
densification ๊ณผ์ ์์ ์๋ก์ด ์ ๋ค์ด ์ถ๊ฐ๋ฉ๋๋ค. ์ด ์๋ก์ด ์ ๋ค์ ๊ธฐ์กด์ ๊ทธ๋๋์ธํธ ๋์ ๊ฐ์ด ์๊ธฐ ๋๋ฌธ์, ์ด๊ธฐํ๋ ์ํ์์ ์์ํด์ผ ํฉ๋๋ค. ์๋ก ์ถ๊ฐ๋ ์ ๋ค์ ์์ง ๊ทธ๋๋์ธํธ๋ฅผ ์ถ์ ํ์ง ์์๊ธฐ ๋๋ฌธ์, ์ด๊ธฐํ๋ self.xyz_gradient_accum์ ํตํด ์๋ก ๋์ ๋๋ ๊ทธ๋๋์ธํธ๋ฅผ ์ ์ฅํฉ๋๋ค.
### 2. ๊ธฐ์กด ์ ์ ๊ฑฐ
densification ๊ณผ์ ์์ ์ผ๋ถ ์ ๋ค์ด ์ ๊ฑฐ๋ ์ ์์ต๋๋ค. ์ด ๊ฒฝ์ฐ, ์ ๊ฑฐ๋ ์ ๋ค์ ๊ทธ๋๋์ธํธ ๋์ ๊ฐ์ ๋ ์ด์ ํ์ํ์ง ์์ผ๋ฏ๋ก, ์ด๊ธฐํํ์ฌ ๋จ์์๋ ์ ๋ค์ ๋ํด์๋ง ์ ํํ ๊ทธ๋๋์ธํธ๋ฅผ ๋์ ํ ์ ์๋๋ก ํฉ๋๋ค.
### 3. ๊ทธ๋๋์ธํธ์ ์ ํ์ฑ ์ ์ง
๊ทธ๋๋์ธํธ ๋์ ๊ฐ์ ์ด๊ธฐํํ์ง ์์ผ๋ฉด, ์ด์ ๋จ๊ณ์์ ๋์ ๋ ๊ทธ๋๋์ธํธ ๊ฐ์ด ๊ณ์ ๋จ์์๊ฒ ๋์ด ์๋ก์ด ์ ๋ค์ ๋ํ ์ฌ๋ฐ๋ฅธ ๊ทธ๋๋์ธํธ๋ฅผ ๊ณ์ฐํ๋ ๋ฐ ๋ฌธ์ ๊ฐ ์๊ธธ ์ ์์ต๋๋ค. ๋ฐ๋ผ์, densification ํ์ xyz_gradient_accum์ ์ด๊ธฐํํ์ฌ ๋ค์ ์ต์ ํ ๋จ๊ณ์์ ์ ํํ ๊ทธ๋๋์ธํธ๋ฅผ ๋ค์ ๋์ ํ ์ ์๋๋ก ํฉ๋๋ค.
#### `densification_postfix`๋ `densify_and_clone`๊ณผ `densify_and_split`์์ ์ฌ์ฉ๋ฉ๋๋ค.
**์ฆ, `densification` ํ์ ์๋ก์ด ์ ๋ค์ด ์ถ๊ฐ๋๊ฑฐ๋ ๊ธฐ์กด ์ ๋ค์ด ์ ๊ฑฐ๋ ํ์, ๊ธฐ์กด์ ๊ทธ๋๋์ธํธ ๋์ ๊ฐ์ 0์ผ๋ก ์ด๊ธฐํํ์ฌ ๋ค์ ๋จ๊ณ์์ ์ ํํ ๊ทธ๋๋์ธํธ๋ฅผ ๋ค์ ๋์ ํ ์ ์๊ฒ ํฉ๋๋ค.**
```python
# 3dgs/scene/gaussian_model.py
class GaussianModel:
...
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
d = {"xyz": new_xyz,
"f_dc": new_features_dc,
"f_rest": new_features_rest,
"opacity": new_opacities,
"scaling" : new_scaling,
"rotation" : new_rotation}
optimizable_tensors = self.cat_tensors_to_optimizer(d)
self._xyz = optimizable_tensors["xyz"]
self._features_dc = optimizable_tensors["f_dc"]
self._features_rest = optimizable_tensors["f_rest"]
self._opacity = optimizable_tensors["opacity"]
self._scaling = optimizable_tensors["scaling"]
self._rotation = optimizable_tensors["rotation"]
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
n_init_points = self.get_xyz.shape[0]
# Extract points that satisfy the gradient condition
padded_grad = torch.zeros((n_init_points), device="cuda")
padded_grad[:grads.shape[0]] = grads.squeeze()
selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
stds = self.get_scaling[selected_pts_mask].repeat(N,1)
means =torch.zeros((stds.size(0), 3),device="cuda")
samples = torch.normal(mean=means, std=stds)
rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation)
prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
self.prune_points(prune_filter)
def densify_and_clone(self, grads, grad_threshold, scene_extent):
# Extract points that satisfy the gradient condition
selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
new_xyz = self._xyz[selected_pts_mask]
new_features_dc = self._features_dc[selected_pts_mask]
new_features_rest = self._features_rest[selected_pts_mask]
new_opacities = self._opacity[selected_pts_mask]
new_scaling = self._scaling[selected_pts_mask]
new_rotation = self._rotation[selected_pts_mask]
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation)
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
grads = self.xyz_gradient_accum / self.denom
grads[grads.isnan()] = 0.0
self.densify_and_clone(grads, max_grad, extent)
self.densify_and_split(grads, max_grad, extent)
...
viewspace์ gradient๋ renderํ๋ view์์ viewing frustum์ ํฌํจ๋๋ points๋ฅผ viewspace_point_tensor
๋ก ์ ์ํ์ฌ self.xyz_gradient_accum
์ gradient ์
๋ฐ์ดํธ์ ์ถ๊ฐํฉ๋๋ค.
# 3dgs/scene/gaussian_model.py
class GaussianModel:
...
def update_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "xyz":
lr = self.xyz_scheduler_args(iteration)
param_group['lr'] = lr
return lr
...
def add_densification_stats(self, viewspace_point_tensor, update_filter):
self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
self.denom[update_filter] += 1
# 3dgs/train.py
def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from):
...
for iteration in range(first_iter, opt.iterations + 1):
...
# ํ์ต๋ฅ ์
๋ฐ์ดํธ
gaussians.update_learning_rate(iteration)
# SH degree ์ฆ๊ฐ
if iteration % 1000 == 0:
gaussians.oneupSHdegree()
...
render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
...
with torch.no_grad():
...
# Densification
if iteration < opt.densify_until_iter:
# Keep track of max radii in image-space for pruning
gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
...
if iteration < opt.iterations:
gaussians.optimizer.step()
gaussians.optimizer.zero_grad(set_to_none=True)
Leave a comment