diff --git a/docs/tutorials/fit_textured_mesh.ipynb b/docs/tutorials/fit_textured_mesh.ipynb index e3a3bca05..b05407b04 100644 --- a/docs/tutorials/fit_textured_mesh.ipynb +++ b/docs/tutorials/fit_textured_mesh.ipynb @@ -118,7 +118,7 @@ "from pytorch3d.structures import Meshes\n", "from pytorch3d.renderer import (\n", " look_at_view_transform,\n", - " OpenGLPerspectiveCameras, \n", + " FoVPerspectiveCameras, \n", " PointLights, \n", " DirectionalLights, \n", " Materials, \n", @@ -304,11 +304,11 @@ "# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n", "# then specify elevation and azimuth angles for each viewpoint as tensors. \n", "R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n", - "cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)\n", + "cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n", "\n", "# We arbitrarily choose one particular view that will be used to visualize \n", "# results\n", - "camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...], \n", + "camera = FoVPerspectiveCameras(device=device, R=R[None, 1, ...], \n", " T=T[None, 1, ...]) \n", "\n", "# Define the settings for rasterization and shading. Here we set the output \n", @@ -351,7 +351,7 @@ "# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n", "# each of length num_views.\n", "target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n", - "target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...], \n", + "target_cameras = [FoVPerspectiveCameras(device=device, R=R[None, i, ...], \n", " T=T[None, i, ...]) for i in range(num_views)]" ] }, @@ -708,6 +708,7 @@ " image_size=128, \n", " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", " faces_per_pixel=50, \n", + " perspective_correct=False, \n", ")\n", "\n", "# Differentiable soft renderer using per vertex RGB colors for texture\n",