5 minute read

self.xyz_gradient_accum์€ self.get_xyz.shape[0]๋กœ point cloud์—์„œ point์˜ ์ˆ˜์ธ n_points ๋งŒํผ ์ •์˜๋ฉ๋‹ˆ๋‹ค.

# 3dgs/scene/gaussian_model.py

class GaussianModel:

...

    @property
    def get_xyz(self):
        return self._xyz

...

    def training_setup(self, training_args):
        self.percent_dense = training_args.percent_dense
        self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")

pruning์€ optimizable_tensors์— ๋Œ€ํ•ด ๊ฐ param_group์„ maskingํ•˜์—ฌ ์ˆ˜ํ–‰ํ•ฉ๋‹ˆ๋‹ค.

  • self.xyz_gradient_accum์€ prune_points()์—์„œ valid_points_mask๋กœ masking์ด ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค.
    • _prune_optimizer()์—์„  valid_points_mask๋กœ prune๋œ optimizable_tensors๋Š” point cloud์—์„œ point ์ˆ˜์ธ n_point์˜ ์ˆ˜์—๋Š” ๋ณ€ํ™”๊ฐ€ ์—†๊ณ  ๋‹จ์ˆœํžˆ [mask]ํ•˜์—ฌ 0์œผ๋กœ ๋งŒ๋“ญ๋‹ˆ๋‹ค.
        
        def _prune_optimizer(self, mask):
            optimizable_tensors = {}
            for group in self.optimizer.param_groups:
                stored_state = self.optimizer.state.get(group['params'][0], None)
                if stored_state is not None:
                    stored_state["exp_avg"] = stored_state["exp_avg"][mask]
                    stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
        
                    del self.optimizer.state[group['params'][0]]
                    group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
                    self.optimizer.state[group['params'][0]] = stored_state
        
                    optimizable_tensors[group["name"]] = group["params"][0]
                else:
                    group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
                    optimizable_tensors[group["name"]] = group["params"][0]
            return optimizable_tensors
        
        def prune_points(self, mask):
            valid_points_mask = ~mask
            optimizable_tensors = self._prune_optimizer(valid_points_mask)
        
            self._xyz = optimizable_tensors["xyz"]
            self._features_dc = optimizable_tensors["f_dc"]
            self._features_rest = optimizable_tensors["f_rest"]
            self._opacity = optimizable_tensors["opacity"]
            self._scaling = optimizable_tensors["scaling"]
            self._rotation = optimizable_tensors["rotation"]
        
            self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
        
            self.denom = self.denom[valid_points_mask]
            self.max_radii2D = self.max_radii2D[valid_points_mask]
      

์ƒˆ๋กœ์šด 3D gaussian properties์ธ tensor๋ฅผ ์ถ”๊ฐ€ํ•  ๋•Œ๋Š”, ๊ธฐ์กด์˜ optimizer์— ๊ฐ properties "name"์— ํ•ด๋‹นํ•˜๋Š”๋Š” param_group๋ณ„๋กœ concatํ•˜์—ฌ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค.

  • self.xyz_gradient_accum์€ densification_postfix()์—์„œ cat_tensors_to_optimizer(d)๋กœ ํ™•์žฅ๋œ self._xyz์— ํ•ด๋‹นํ•˜๋Š” point cloud์—์„œ point ์ˆ˜์ธ n_points + ์ถ”๊ฐ€๋œ 3d gaussian ์ˆ˜๋งŒํผ ์ •์˜๋ฉ๋‹ˆ๋‹ค.
  • densification_postfix์—์„œ new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation์„ ํ•™์Šต์— ์ถ”๊ฐ€ํ•  ํ•™์Šต๊ฐ€๋Šฅํ•œ tensor๋กœ์จ ์ •์˜ํ•ฉ๋‹ˆ๋‹ค
  • ์ด๋Š” ์ƒˆ๋กœ ์ถ”๊ฐ€๋˜๋Š” 3d gaussian๋“ค์˜ properties์— ํ•ด๋‹นํ•ฉ๋‹ˆ๋‹ค.
  • ์ด๋ฅผ dictionary ํ˜•ํƒœ์ธ d์˜ key๊ฐ’๋“ค๋กœ ์ •์˜ํ•˜์—ฌ cat_tensors_to_optimizer๋กœ ๋„˜๊ฒจ์ค๋‹ˆ๋‹ค.
  • ๊ธฐ์กด์˜ self.optimizer.param_groups์—์„œ ์ƒˆ๋กœ ์ •์˜ํ•œ 3d gaussian์˜ properties์ธ tensor์ธ new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation๋ฅผ extension_tensor๋กœ ์ถ”๊ฐ€ํ•  ๋•Œ๋Š” self.optimizer์— ๋„˜๊ฒจ์ค€ ๊ฐ param_group์˜ "name"์— ํ•ด๋‹นํ•˜๋Š” "xyz", "f_dc", "f_rest", "opacity", "scaling", "rotationโ€œ์— concat ํ•ฉ๋‹ˆ๋‹ค.
  • ์ฒ˜์Œ์— "xyz", "f_dc", "f_rest", "opacity", "scaling", "rotationโ€œ๋Š” l๋กœ ์ •์˜ํ•˜์—ฌ ๊ฐ param_group๋ณ„๋กœ "name"์„ ์ง€์–ด์ค€ ๊ฒƒ์„ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
  • ์—ฌ๊ธฐ์„œ 'params'๋Š” ๊ฐ param_group์— ํ•ด๋‹นํ•ฉ๋‹ˆ๋‹ค.
  • ์ฆ‰, ์šฐ๋ฆฌ๋Š” 3d gaussian์˜ properties๋ฅผ param_group ๋ณ„๋กœ ์ชผ๊ฐœ์–ด ํ•™์Šต๋ฅ ์„ ๋‹ค๋ฅด๊ฒŒ ์„ค์ •ํ•˜์—ฌ ํ•™์Šตํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
  • ๊ทธ๋ฆฌ๊ณ  ์ƒˆ๋กœ์šด 3d gaussian properties๋ฅผ tensor์ธ new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation๋กœ ์ •์˜ํ•˜์—ฌ ์ฒ˜์Œ์˜ l์„ ๋„˜๊ฒจ๋ฐ›์€ Adam optimizer์— param_group๋ณ„๋กœ concatํ•˜์—ฌ ์ถ”๊ฐ€ํ•ด์ค๋‹ˆ๋‹ค.
  • ์ •๋ฆฌํ•˜๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™์Šต๋‹ˆ๋‹ค.
    • new_xyz๋Š” "name" : "xyz"์ธ param_group์— ์ถ”๊ฐ€
    • new_features_dc๋Š” "name" : "f_dc"์ธ param_group์— ์ถ”๊ฐ€
    • new_features_rest๋Š” "name" : "f_rest"์ธ param_group์— ์ถ”๊ฐ€
    • new_opacities๋Š” "name" : "opacity"์ธ param_group์— ์ถ”๊ฐ€
    • new_scaling๋Š” "name" : "scaling"์ธ param_group์— ์ถ”๊ฐ€
    • new_rotation๋Š” "name" : "rotation"์ธ param_group์— ์ถ”๊ฐ€ ```python

      3dgs/scene/gaussian_model.py

class GaussianModel:

โ€ฆ

  def training_setup(self, training_args):
    self.percent_dense = training_args.percent_dense
    self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
    self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")

    l = [
        {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
        {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
        {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
        {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
        {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
        {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
    ]

    self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)

โ€ฆ

def cat_tensors_to_optimizer(self, tensors_dict):
    optimizable_tensors = {}
    for group in self.optimizer.param_groups:
        assert len(group["params"]) == 1
        extension_tensor = tensors_dict[group["name"]]
        stored_state = self.optimizer.state.get(group['params'][0], None)
        if stored_state is not None:

            stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
            stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)

            del self.optimizer.state[group['params'][0]]
            group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
            self.optimizer.state[group['params'][0]] = stored_state

            optimizable_tensors[group["name"]] = group["params"][0]
        else:
            group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
            optimizable_tensors[group["name"]] = group["params"][0]

    return optimizable_tensors

def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
    d = {"xyz": new_xyz,
    "f_dc": new_features_dc,
    "f_rest": new_features_rest,
    "opacity": new_opacities,
    "scaling" : new_scaling,
    "rotation" : new_rotation}

    optimizable_tensors = self.cat_tensors_to_optimizer(d)

โ€ฆ


## ์™œ densification ํ›„์— xyz_gradient_accum์„ ์žฌ์ •์˜ํ•˜๋Š”๊ฐ€?

**`densification_postfix` ํ•จ์ˆ˜๋Š” ์ƒˆ๋กœ์šด ์ ๋“ค์ด ์ถ”๊ฐ€๋œ ํ›„ ์ด ์ ๋“ค์„ ์˜ตํ‹ฐ๋งˆ์ด์ €์— ์ถ”๊ฐ€ํ•˜๊ณ , ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’๋“ค์„ 0์œผ๋กœ ์ดˆ๊ธฐํ™”ํ•ฉ๋‹ˆ๋‹ค.**

`self.xyz_gradient_accum`์„ ๋งค `densification` ๊ณผ์ • ํ›„์— ์žฌ์ •์˜ํ•˜์—ฌ ์ตœ์ ํ™”๋ฅผ ์ˆ˜ํ–‰ํ•˜๋Š” ์ด์œ ๋Š” ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’์„ ์ดˆ๊ธฐํ™”ํ•˜๊ณ  ์ƒˆ๋กœ ์ถ”๊ฐ€๋œ ์ ๋“ค์— ๋Œ€ํ•ด ์˜ฌ๋ฐ”๋ฅธ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ๊ณ„์‚ฐํ•˜๊ธฐ ์œ„ํ•จ์ž…๋‹ˆ๋‹ค. `densification` ํ›„์— **์ƒˆ๋กœ์šด ์ ๋“ค์ด ์ถ”๊ฐ€๋˜๊ฑฐ๋‚˜ ๊ธฐ์กด ์ ๋“ค์ด ์ œ๊ฑฐ๋  ์ˆ˜ ์žˆ์œผ๋ฏ€๋กœ, ๊ธฐ์กด์˜ ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’์„ ์ดˆ๊ธฐํ™”ํ•˜์—ฌ ๋‹ค์Œ ๋‹จ๊ณ„์—์„œ ์ •ํ™•ํ•œ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ๋‹ค์‹œ ๋ˆ„์ ํ•  ์ˆ˜ ์žˆ๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค.**

### 1. ์ƒˆ๋กœ์šด ์  ์ถ”๊ฐ€
densification ๊ณผ์ •์—์„œ ์ƒˆ๋กœ์šด ์ ๋“ค์ด ์ถ”๊ฐ€๋ฉ๋‹ˆ๋‹ค. ์ด ์ƒˆ๋กœ์šด ์ ๋“ค์€ ๊ธฐ์กด์˜ ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’์ด ์—†๊ธฐ ๋•Œ๋ฌธ์—, ์ดˆ๊ธฐํ™”๋œ ์ƒํƒœ์—์„œ ์‹œ์ž‘ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ƒˆ๋กœ ์ถ”๊ฐ€๋œ ์ ๋“ค์€ ์•„์ง ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ์ถ•์ ํ•˜์ง€ ์•Š์•˜๊ธฐ ๋•Œ๋ฌธ์—, ์ดˆ๊ธฐํ™”๋œ self.xyz_gradient_accum์„ ํ†ตํ•ด ์ƒˆ๋กœ ๋ˆ„์ ๋˜๋Š” ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.

### 2. ๊ธฐ์กด ์  ์ œ๊ฑฐ
densification ๊ณผ์ •์—์„œ ์ผ๋ถ€ ์ ๋“ค์ด ์ œ๊ฑฐ๋  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ์ด ๊ฒฝ์šฐ, ์ œ๊ฑฐ๋œ ์ ๋“ค์˜ ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’์€ ๋” ์ด์ƒ ํ•„์š”ํ•˜์ง€ ์•Š์œผ๋ฏ€๋กœ, ์ดˆ๊ธฐํ™”ํ•˜์—ฌ ๋‚จ์•„์žˆ๋Š” ์ ๋“ค์— ๋Œ€ํ•ด์„œ๋งŒ ์ •ํ™•ํ•œ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ๋ˆ„์ ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ฉ๋‹ˆ๋‹ค.

### 3. ๊ทธ๋ž˜๋””์–ธํŠธ์˜ ์ •ํ™•์„ฑ ์œ ์ง€
๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’์„ ์ดˆ๊ธฐํ™”ํ•˜์ง€ ์•Š์œผ๋ฉด, ์ด์ „ ๋‹จ๊ณ„์—์„œ ๋ˆ„์ ๋œ ๊ทธ๋ž˜๋””์–ธํŠธ ๊ฐ’์ด ๊ณ„์† ๋‚จ์•„์žˆ๊ฒŒ ๋˜์–ด ์ƒˆ๋กœ์šด ์ ๋“ค์— ๋Œ€ํ•œ ์˜ฌ๋ฐ”๋ฅธ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ๊ณ„์‚ฐํ•˜๋Š” ๋ฐ ๋ฌธ์ œ๊ฐ€ ์ƒ๊ธธ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋”ฐ๋ผ์„œ, densification ํ›„์— xyz_gradient_accum์„ ์ดˆ๊ธฐํ™”ํ•˜์—ฌ ๋‹ค์Œ ์ตœ์ ํ™” ๋‹จ๊ณ„์—์„œ ์ •ํ™•ํ•œ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ๋‹ค์‹œ ๋ˆ„์ ํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ฉ๋‹ˆ๋‹ค.

#### `densification_postfix`๋Š” `densify_and_clone`๊ณผ `densify_and_split`์—์„œ ์‚ฌ์šฉ๋ฉ๋‹ˆ๋‹ค.

**์ฆ‰, `densification` ํ›„์— ์ƒˆ๋กœ์šด ์ ๋“ค์ด ์ถ”๊ฐ€๋˜๊ฑฐ๋‚˜ ๊ธฐ์กด ์ ๋“ค์ด ์ œ๊ฑฐ๋œ ํ›„์—, ๊ธฐ์กด์˜ ๊ทธ๋ž˜๋””์–ธํŠธ ๋ˆ„์  ๊ฐ’์„ 0์œผ๋กœ ์ดˆ๊ธฐํ™”ํ•˜์—ฌ ๋‹ค์Œ ๋‹จ๊ณ„์—์„œ ์ •ํ™•ํ•œ ๊ทธ๋ž˜๋””์–ธํŠธ๋ฅผ ๋‹ค์‹œ ๋ˆ„์ ํ•  ์ˆ˜ ์žˆ๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค.**

```python
# 3dgs/scene/gaussian_model.py

class GaussianModel:

...

    def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
        d = {"xyz": new_xyz,
        "f_dc": new_features_dc,
        "f_rest": new_features_rest,
        "opacity": new_opacities,
        "scaling" : new_scaling,
        "rotation" : new_rotation}

        optimizable_tensors = self.cat_tensors_to_optimizer(d)
        self._xyz = optimizable_tensors["xyz"]
        self._features_dc = optimizable_tensors["f_dc"]
        self._features_rest = optimizable_tensors["f_rest"]
        self._opacity = optimizable_tensors["opacity"]
        self._scaling = optimizable_tensors["scaling"]
        self._rotation = optimizable_tensors["rotation"]

        self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
        self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
        self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")


    def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
        n_init_points = self.get_xyz.shape[0]
        # Extract points that satisfy the gradient condition
        padded_grad = torch.zeros((n_init_points), device="cuda")
        padded_grad[:grads.shape[0]] = grads.squeeze()
        selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
        selected_pts_mask = torch.logical_and(selected_pts_mask,
                                              torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)

        stds = self.get_scaling[selected_pts_mask].repeat(N,1)
        means =torch.zeros((stds.size(0), 3),device="cuda")
        samples = torch.normal(mean=means, std=stds)
        rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
        new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
        new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
        new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
        new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
        new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
        new_opacity = self._opacity[selected_pts_mask].repeat(N,1)

        self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation)

        prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
        self.prune_points(prune_filter)

    def densify_and_clone(self, grads, grad_threshold, scene_extent):
        # Extract points that satisfy the gradient condition
        selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
        selected_pts_mask = torch.logical_and(selected_pts_mask,
                                              torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
        
        new_xyz = self._xyz[selected_pts_mask]
        new_features_dc = self._features_dc[selected_pts_mask]
        new_features_rest = self._features_rest[selected_pts_mask]
        new_opacities = self._opacity[selected_pts_mask]
        new_scaling = self._scaling[selected_pts_mask]
        new_rotation = self._rotation[selected_pts_mask]

        self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation)

    def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
        grads = self.xyz_gradient_accum / self.denom
        grads[grads.isnan()] = 0.0

        self.densify_and_clone(grads, max_grad, extent)
        self.densify_and_split(grads, max_grad, extent)

...

viewspace์˜ gradient๋Š” renderํ•˜๋Š” view์—์„œ viewing frustum์— ํฌํ•จ๋˜๋Š” points๋ฅผ viewspace_point_tensor๋กœ ์ •์˜ํ•˜์—ฌ self.xyz_gradient_accum์˜ gradient ์—…๋ฐ์ดํŠธ์— ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค.

# 3dgs/scene/gaussian_model.py

class GaussianModel:

...

    def update_learning_rate(self, iteration):
        ''' Learning rate scheduling per step '''
        for param_group in self.optimizer.param_groups:
            if param_group["name"] == "xyz":
                lr = self.xyz_scheduler_args(iteration)
                param_group['lr'] = lr
                return lr

...

    def add_densification_stats(self, viewspace_point_tensor, update_filter):
        self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
        self.denom[update_filter] += 1
# 3dgs/train.py

def training(dataset, opt, pipe, testing_iterations, saving_iterations, checkpoint_iterations, checkpoint, debug_from):

...

    for iteration in range(first_iter, opt.iterations + 1):

...

        # ํ•™์Šต๋ฅ  ์—…๋ฐ์ดํŠธ
        gaussians.update_learning_rate(iteration)

        # SH degree ์ฆ๊ฐ€
        if iteration % 1000 == 0:
            gaussians.oneupSHdegree()

...

        render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
        image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]

...

        with torch.no_grad():

...

            # Densification
            if iteration < opt.densify_until_iter:
                # Keep track of max radii in image-space for pruning
                gaussians.max_radii2D[visibility_filter] = torch.max(gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
                gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)


...

            if iteration < opt.iterations:
                gaussians.optimizer.step()
                gaussians.optimizer.zero_grad(set_to_none=True)


Leave a comment