You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I created a custom texture plugin to represent textures as coloured point clouds. The idea is that the texture is represented directly on the object's surface; therefore, no UV mapping is required. When a ray intersects a mesh with a point cloud texture I find the 3 nearest points on the point cloud and interpolate their colour. Everything works until I try to make it differentiable.
I hope you can point me in the right direction and help me understand what I am doing wrong.
I have attached a mesh with its coloured point cloud, the code of a toy example to reproduce my issue, my system info, and the current error.
Thanks in advance for your help!
Code
importdrjitasdrimportmitsubaasmimi.set_variant("cuda_ad_rgb")
importtrimeshimporttorchimporttorch_geometric.dataimporttorch_geometric.transforms# Utility functions ###############################################importnumpyasnpfromtorch_geometric.nnimportknn_interpolatedefto_torch(x: np.ndarray) ->torch.Tensor:
returntorch.tensor(x, dtype=torch.float, requires_grad=False).contiguous()
defload_mesh_with_pcltex(
pcltex_path: str, mesh_path: str|None
) ->torch_geometric.data.Data:
pcl=trimesh.load_mesh(pcltex_path, process=False)
colours= (pcl.colors/255-0.5) *2# shift colours in [-1, 1]colours=colours[:, :3]
data=torch_geometric.data.Data(
pos=to_torch(pcl.vertices), x=to_torch(colours)
)
mesh=trimesh.load_mesh(mesh_path, process=False)
data.verts=to_torch(mesh.vertices)
data.face=to_torch(mesh.faces).Treturndatadefdefine_camera(
camera_distance: float,
azimuth_deg: float,
elevation_deg: float,
camera_type: str="perspective",
img_width: int=256,
img_height: int=256,
sampler_type: str="multijitter", # default was "independent"sample_count: int=16,
fov: float=40,
aperture_radius: float|None=None,
focus_distance: float|None=None,
) ->dict:
camera_pos=mi.ScalarTransform4f.rotate([0, 0, 1], elevation_deg).rotate(
[0, 1, 0], azimuth_deg
) @ mi.ScalarPoint3f([0, 0, camera_distance])
camera= {
"type": camera_type,
"fov": fov,
"near_clip": 0.01,
"far_clip": 1000.0,
"to_world": mi.ScalarTransform4f.look_at(
origin=camera_pos, target=[0, 0, 0], up=[0, 1, 0]
),
"film": {
"type": "hdrfilm",
"rfilter": {"type": "box"},
"width": img_width,
"height": img_height,
},
"sampler": {
"type": sampler_type,
"sample_count": sample_count,
},
}
ifcamera_type=="thinlens":
camera["aperture_radius"] =aperture_radiuscamera["focus_distance"] =focus_distancereturncameradefvec_to_tens_safe(vec):
# A utility function that converts a Vector3f to a TensorXf safely in# mitsuba while keeping the gradients;# a regular type cast mi.TensorXf(vector) detaches the gradientsreturnmi.TensorXf(
dr.ravel(vec), shape=[dr.shape(vec)[1], dr.shape(vec)[0]]
)
defmega_kernel(state: bool=False):
dr.set_flag(dr.JitFlag.LoopRecord, state)
dr.set_flag(dr.JitFlag.VCallRecord, state)
dr.set_flag(dr.JitFlag.VCallOptimize, state)
defflush_cache():
for_inrange(5): # Not sure why but calling it once is not enoughdr.flush_malloc_cache()
# Core part ####################################################classTorchTextureToOptimise(torch.nn.Module):
""" Most likely this could be a single tensor with grads enabled but keeping things in a module for compatibility with the rest of the code I need to use """def__init__(self, data):
super().__init__()
self.pcl_texture=torch.nn.Parameter(
torch.randn_like(data.x), requires_grad=True
)
defforward(self):
returnself.pcl_textureclassPclColoursTexture(mi.Texture):
def__init__(self, props: mi.Properties) ->None:
mi.Texture.__init__(self, props)
self._grad_activator=mi.Vector3f(0)
self.pcl_torch_pos=Noneself.pcl_mi_cols=None# formerly a torch tensor, # casted because mitsuba was asking for a mitsuba type to be updateddeftraverse(self, callback):
callback.put_parameter(
"grad_activator", self._grad_activator, mi.ParamFlags.Differentiable
)
callback.put_parameter(
"pcltex_color", self.pcl_mi_cols, mi.ParamFlags.Differentiable
)
callback.put_parameter(
"pcltex_pos", self.pcl_torch_pos, mi.ParamFlags.NonDifferentiable
)
defeval(self, si, active=True, dirs=None, norms=None, albedo=None):
surface_intersection_position=vec_to_tens_safe(
si.p+self._grad_activator
)
mi_out=self._eval_in_torch(
surface_intersection_position, self.pcl_mi_cols
)
returndr.unravel(mi.Vector3f, mi_out)
@dr.wrap_ad(source="drjit", target="torch")def_eval_in_torch(self, pts, pcl_cols):
interpolated_cols_torch=knn_interpolate(
pcl_cols.to(pts.device),
self.pcl_torch_pos.to(pts.device),
pts,
k=3,
)
returninterpolated_cols_torchdefeval_1(self, si, active=True):
returnmi.Float(self.eval(si)[0])
defto_string(self):
return"PclColoursTexture"mi.register_texture("pcl_colours_texture", lambdap: PclColoursTexture(p))
defmesh_with_pcltex_to_mitsuba(
data: torch_geometric.data.Data,
twosided: bool=True,
) ->mi.Mesh:
data=data.cpu()
verts=data.verts.squeeze().detach().cpu().numpy()
faces=data.face.squeeze().T.detach().cpu().numpy()
pcl_colours_texture=mi.load_dict({"type": "pcl_colours_texture"})
pcl_colours_texture.pcl_torch_pos=data.pos.squeeze()
pcl_colours_texture.pcl_torch_pos.requires_grad=Truepcl_colours_texture.pcl_cols_model=texture_model# Vertex colours were normalised in [-1, 1], bring them back to [0, 1]pcl_cols= ((data.x/2) +0.5).clamp(0, 1)
if"cuda"inmi.variant():
pcl_cols=pcl_cols.cuda()
pcl_colours_texture.pcl_mi_cols=mi.TensorXf(
dr.ravel(mi.TensorXf(pcl_cols.squeeze())),
shape=pcl_cols.squeeze().shape,
)
bsdf_dict= {
"type": "principled",
"base_color": pcl_colours_texture,
}
iftwosided:
bsdf_dict= {"type": "twosided", "material": bsdf_dict}
bsdf_prop=mi.Properties()
bsdf_prop["mesh_bsdf"] =mi.load_dict(bsdf_dict)
mi_mesh=mi.Mesh(
"mesh",
vertex_count=verts.shape[0],
face_count=faces.shape[0],
props=bsdf_prop,
)
# "Traverse" the mesh to get its updateable parametersmesh_params=mi.traverse(mi_mesh)
mesh_params["vertex_positions"] =dr.ravel(mi.Point3f(verts))
mesh_params["faces"] =dr.ravel(mi.Vector3u(faces))
mesh_params.update()
returnmi_meshif__name__=="__main__":
importmatplotlib.pyplotaspltdevice=torch.device("cuda"iftorch.cuda.is_available() else"cpu")
data=load_mesh_with_pcltex(
pcltex_path="/data/B07B4YXXPZ.ply",
mesh_path="/data/B07B4YXXPZ_mesh.ply",
)
texture_model=TorchTextureToOptimise(data)
texture_model=texture_model.to(device)
texture_model.train()
scene_dict= {
"type": "scene",
"integrator": {"type": "prb"},
"camera": define_camera(3.5, 210, -50),
"emitter": {"type": "constant"},
"mesh": mesh_with_pcltex_to_mitsuba(data),
}
scene=mi.load_dict(scene_dict)
rend_params=mi.traverse(scene)
mega_kernel(False)
optimizer=torch.optim.Adamax(texture_model.parameters(), lr=1e-2)
@dr.wrap_ad(source="torch", target="drjit")defdiff_render_textured_mesh(texture):
rend_params["mesh.bsdf.brdf_0.base_color.pcltex_color"] =texturerend_params.update()
img=mi.render(scene, rend_params)
returnimgdr.enable_grad(rend_params["mesh.bsdf.brdf_0.base_color.grad_activator"])
dr.enable_grad(rend_params["mesh.bsdf.brdf_0.base_color.pcltex_color"])
# Dummy optimisation (one step only)optimizer.zero_grad()
tx=texture_model().squeeze()
img=diff_render_textured_mesh(tx)
dummy_loss=img.mean()
dummy_loss.backward() # line causing errorsoptimizer.step()
plt.axis("off")
plt.imshow(img.clone().detach().cpu().numpy())
flush_cache()
Main issue and current error
> File "/.../lib/python3.10/site-packages/drjit/router.py", line 4541, in traverse
> dtype.traverse_(mode, flags)
> File "/.../lib/python3.10/site-packages/drjit/router.py", line 6050, in backward
> _torch.autograd.backward(flatten(self.res_torch), flatten(grad_out_torch))
> File "/.../lib/python3.10/site-packages/torch/autograd/__init__.py", line 200, in backward
> Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
> RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
> terminate called after throwing an instance of 'std::runtime_error'
> what(): drjit-autodiff: ad_scope_leave(): underflow!
I can report also the full stacktrace if it helps. Essentially I can't figure out which is the tensor that does not require a grad. It looks like all the torch tensors have requires_grad=True and all the drjit ones return True if passed through dr.grad_enabled().
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
Hi!
I created a custom texture plugin to represent textures as coloured point clouds. The idea is that the texture is represented directly on the object's surface; therefore, no UV mapping is required. When a ray intersects a mesh with a point cloud texture I find the 3 nearest points on the point cloud and interpolate their colour. Everything works until I try to make it differentiable.
I hope you can point me in the right direction and help me understand what I am doing wrong.
I have attached a mesh with its coloured point cloud, the code of a toy example to reproduce my issue, my system info, and the current error.
Thanks in advance for your help!
Code
Main issue and current error
I can report also the full stacktrace if it helps. Essentially I can't figure out which is the tensor that does not require a grad. It looks like all the torch tensors have requires_grad=True and all the drjit ones return True if passed through dr.grad_enabled().
Files needed to reproduce
meshes.zip
System Information
Beta Was this translation helpful? Give feedback.
All reactions