From 90f6a005b09af9570fc50799a301303e49b67dfe Mon Sep 17 00:00:00 2001 From: Nikhila Ravi Date: Fri, 21 Aug 2020 19:18:49 -0700 Subject: [PATCH] Tutorials textures updates and fix bug in extending meshes with uv textures Summary: Found a bug in extending textures with vertex uv coordinates. This was due to the padded -> list conversion of vertex uv coordinates i.e. The number of vertices in the mesh and in verts_uvs can differ e.g. if a vertex is shared between 3 faces, it can have up to 3 different uv coordinates. Therefore we cannot convert directly from padded to list using _num_verts_per_mesh Reviewed By: bottler Differential Revision: D23233595 fbshipit-source-id: 0c66d15baae697ead0bdc384f74c27d4c6539fc9 --- docs/tutorials/fit_textured_mesh.ipynb | 1859 ++++++++++--------- docs/tutorials/render_textured_meshes.ipynb | 13 +- pytorch3d/renderer/mesh/textures.py | 18 +- tests/test_texturing.py | 11 +- 4 files changed, 957 insertions(+), 944 deletions(-) diff --git a/docs/tutorials/fit_textured_mesh.ipynb b/docs/tutorials/fit_textured_mesh.ipynb index 2b5add741..f57cd1aaa 100644 --- a/docs/tutorials/fit_textured_mesh.ipynb +++ b/docs/tutorials/fit_textured_mesh.ipynb @@ -1,928 +1,935 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "accelerator": "GPU", - "bento_stylesheets": { - "bento/extensions/flow/main.css": true, - "bento/extensions/kernel_selector/main.css": true, - "bento/extensions/kernel_ui/main.css": true, - "bento/extensions/new_kernel/main.css": true, - "bento/extensions/system_usage/main.css": true, - "bento/extensions/theme/main.css": true - }, - "colab": { - "name": "fit_textured_mesh.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "cells": [ - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "_Ip8kp4TfBLZ", - "colab": {} - }, - "source": [ - "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved." - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "kuXHJv44fBLe" - }, - "source": [ - "# Fit a mesh via rendering\n", - "\n", - "This tutorial shows how to:\n", - "- Load a mesh and textures from an `.obj` file. \n", - "- Create a synthetic dataset by rendering a textured mesh from multiple viewpoints\n", - "- Fit a mesh to the observed synthetic images using differential silhouette rendering\n", - "- Fit a mesh and its textures using differential textured rendering" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Bnj3THhzfBLf" - }, - "source": [ - "## 0. Install and Import modules" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "okLalbR_g7NS" - }, - "source": [ - "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "musUWTglgxSB", - "colab": {} - }, - "source": [ - "!pip install torch torchvision\n", - "!pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "nX99zdoffBLg", - "colab": {} - }, - "source": [ - "import os\n", - "import torch\n", - "import matplotlib.pyplot as plt\n", - "from skimage.io import imread\n", - "\n", - "from pytorch3d.utils import ico_sphere\n", - "import numpy as np\n", - "from tqdm.notebook import tqdm\n", - "\n", - "# Util function for loading meshes\n", - "from pytorch3d.io import load_objs_as_meshes, save_obj\n", - "\n", - "from pytorch3d.loss import (\n", - " chamfer_distance, \n", - " mesh_edge_loss, \n", - " mesh_laplacian_smoothing, \n", - " mesh_normal_consistency,\n", - ")\n", - "\n", - "# Data structures and functions for rendering\n", - "from pytorch3d.structures import Meshes, Textures\n", - "from pytorch3d.renderer import (\n", - " look_at_view_transform,\n", - " FoVPerspectiveCameras, \n", - " PointLights, \n", - " DirectionalLights, \n", - " Materials, \n", - " RasterizationSettings, \n", - " MeshRenderer, \n", - " MeshRasterizer, \n", - " TexturedSoftPhongShader,\n", - " SoftSilhouetteShader,\n", - " SoftPhongShader,\n", - ")\n", - "\n", - "# add path for demo utils functions \n", - "import sys\n", - "import os\n", - "sys.path.append(os.path.abspath(''))" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Lxmehq6Zhrzv" - }, - "source": [ - "If using **Google Colab**, fetch the utils file for plotting image grids:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "HZozr3Pmho-5", - "colab": {} - }, - "source": [ - "!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n", - "from plot_image_grid import image_grid" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "g4B62MzYiJUM" - }, - "source": [ - "OR if running **locally** uncomment and run the following cell:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "paJ4Im8ahl7O", - "colab": {} - }, - "source": [ - " # from utils.plot_image_grid import image_grid" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "collapsed": true, - "id": "5jGq772XfBLk" - }, - "source": [ - "### 1. Load a mesh and texture file\n", - "\n", - "Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. \n", - "\n", - "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", - "\n", - "**Textures** is an auxillary datastructure for storing texture information about meshes. \n", - "\n", - "**Meshes** has several class methods which are used throughout the rendering pipeline." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "a8eU4zo5jd_H" - }, - "source": [ - "If running this notebook using **Google Colab**, run the following cell to fetch the mesh obj and texture files and save it at the path `data/cow_mesh`:\n", - "If running locally, the data is already available at the correct path. " - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "tTm0cVuOjb1W", - "colab": {} - }, - "source": [ - "!mkdir -p data/cow_mesh\n", - "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj\n", - "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl\n", - "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "gi5Kd0GafBLl", - "colab": {} - }, - "source": [ - "# Setup\n", - "if torch.cuda.is_available():\n", - " device = torch.device(\"cuda:0\")\n", - " torch.cuda.set_device(device)\n", - "else:\n", - " device = torch.device(\"cpu\")\n", - "\n", - "# Set paths\n", - "DATA_DIR = \"./data\"\n", - "obj_filename = os.path.join(DATA_DIR, \"cow_mesh/cow.obj\")\n", - "\n", - "# Load obj file\n", - "mesh = load_objs_as_meshes([obj_filename], device=device)\n", - "\n", - "# We scale normalize and center the target mesh to fit in a sphere of radius 1 \n", - "# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh \n", - "# to its original center and scale. Note that normalizing the target mesh, \n", - "# speeds up the optimization but is not necessary!\n", - "verts = mesh.verts_packed()\n", - "N = verts.shape[0]\n", - "center = verts.mean(0)\n", - "scale = max((verts - center).abs().max(0)[0])\n", - "mesh.offset_verts_(-center.expand(N, 3))\n", - "mesh.scale_verts_((1.0 / float(scale)));" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "17c4xmtyfBMH" - }, - "source": [ - "## 2. Dataset Creation\n", - "\n", - "We sample different camera positions that encode multiple viewpoints of the cow. We create a renderer with a shader that performs texture map interpolation. We render a synthetic dataset of images of the textured cow mesh from multiple viewpoints.\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "CDQKebNNfBMI", - "colab": {} - }, - "source": [ - "# the number of different viewpoints from which we want to render the mesh.\n", - "num_views = 20\n", - "\n", - "# Get a batch of viewing angles. \n", - "elev = torch.linspace(0, 360, num_views)\n", - "azim = torch.linspace(-180, 180, num_views)\n", - "\n", - "# Place a point light in front of the object. As mentioned above, the front of \n", - "# the cow is facing the -z direction. \n", - "lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])\n", - "\n", - "# Initialize a camera that represents a batch of different \n", - "# viewing angles. All the cameras helper methods support mixed type inputs and \n", - "# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n", - "# then specify elevation and azimuth angles for each viewpoint as tensors. \n", - "R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n", - "cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n", - "\n", - "# We arbitrarily choose one particular view that will be used to visualize \n", - "# results\n", - "camera = FoVPerspectiveCameras(device=device, R=R[None, 1, ...], \n", - " T=T[None, 1, ...]) \n", - "\n", - "# Define the settings for rasterization and shading. Here we set the output \n", - "# image to be of size 128X128. As we are rendering images for visualization \n", - "# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to \n", - "# rasterize_meshes.py for explanations of these parameters. We also leave \n", - "# bin_size and max_faces_per_bin to their default values of None, which sets \n", - "# their values using huristics and ensures that the faster coarse-to-fine \n", - "# rasterization method is used. Refer to docs/notes/renderer.md for an \n", - "# explanation of the difference between naive and coarse-to-fine rasterization. \n", - "raster_settings = RasterizationSettings(\n", - " image_size=128, \n", - " blur_radius=0.0, \n", - " faces_per_pixel=1, \n", - ")\n", - "\n", - "# Create a phong renderer by composing a rasterizer and a shader. The textured \n", - "# phong shader will interpolate the texture uv coordinates for each vertex, \n", - "# sample from a texture image and apply the Phong lighting model\n", - "renderer = MeshRenderer(\n", - " rasterizer=MeshRasterizer(\n", - " cameras=camera, \n", - " raster_settings=raster_settings\n", - " ),\n", - " shader=TexturedSoftPhongShader(\n", - " device=device, \n", - " cameras=camera,\n", - " lights=lights\n", - " )\n", - ")\n", - "\n", - "# Create a batch of meshes by repeating the cow mesh and associated textures. \n", - "# Meshes has a useful `extend` method which allows us do this very easily. \n", - "# This also extends the textures. \n", - "meshes = mesh.extend(num_views)\n", - "\n", - "# Render the cow mesh from each viewing angle\n", - "target_images = renderer(meshes, cameras=cameras, lights=lights)\n", - "\n", - "# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n", - "# each of length num_views.\n", - "target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n", - "target_cameras = [FoVPerspectiveCameras(device=device, R=R[None, i, ...], \n", - " T=T[None, i, ...]) for i in range(num_views)]" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "TppB4PVmR1Rc" - }, - "source": [ - "Visualize the dataset:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "HHE0CnbVR1Rd", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# RGB images\n", - "image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True)\n", - "plt.show()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gOb4rYx65E8z", - "colab_type": "text" - }, - "source": [ - "Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "iP_g-nwX4exM", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Rasterization settings for silhouette rendering \n", - "sigma = 1e-4\n", - "raster_settings_silhouette = RasterizationSettings(\n", - " image_size=128, \n", - " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", - " faces_per_pixel=50, \n", - ")\n", - "\n", - "# Silhouette renderer \n", - "renderer_silhouette = MeshRenderer(\n", - " rasterizer=MeshRasterizer(\n", - " cameras=camera, \n", - " raster_settings=raster_settings_silhouette\n", - " ),\n", - " shader=SoftSilhouetteShader()\n", - ")\n", - "\n", - "# Render silhouette images. The 3rd channel of the rendering output is \n", - "# the alpha/silhouette channel\n", - "silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)\n", - "target_silhouette = [silhouette_images[i, ..., 3] for i in range(num_views)]\n", - "\n", - "# Visualize silhouette images\n", - "image_grid(silhouette_images.cpu().numpy(), rows=4, cols=5, rgb=False)\n", - "plt.show()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "t3qphI1ElUb5" - }, - "source": [ - "## 3. Mesh prediction via silhouette rendering\n", - "In the previous section, we created a dataset of images of multiple viewpoints of a cow. In this section, we predict a mesh by observing those target images without any knowledge of the ground truth cow mesh. We assume we know the position of the cameras and lighting.\n", - "\n", - "We first define some helper functions to visualize the results of our mesh prediction:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "eeWYHROrR1Rh", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Show a visualization comparing the rendered predicted mesh to the ground truth \n", - "# mesh\n", - "def visualize_prediction(predicted_mesh, renderer=renderer_silhouette, \n", - " target_image=target_rgb[1], title='', \n", - " silhouette=False):\n", - " inds = 3 if silhouette else range(3)\n", - " predicted_images = renderer(predicted_mesh)\n", - " plt.figure(figsize=(20, 10))\n", - " plt.subplot(1, 2, 1)\n", - " plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())\n", - "\n", - " plt.subplot(1, 2, 2)\n", - " plt.imshow(target_image.cpu().detach().numpy())\n", - " plt.title(title)\n", - " plt.grid(\"off\")\n", - " plt.axis(\"off\")\n", - "\n", - "# Plot losses as a function of optimization iteration\n", - "def plot_losses(losses):\n", - " fig = plt.figure(figsize=(13, 5))\n", - " ax = fig.gca()\n", - " for k, l in losses.items():\n", - " ax.plot(l['values'], label=k + \" loss\")\n", - " ax.legend(fontsize=\"16\")\n", - " ax.set_xlabel(\"Iteration\", fontsize=\"16\")\n", - " ax.set_ylabel(\"Loss\", fontsize=\"16\")\n", - " ax.set_title(\"Loss vs iterations\", fontsize=\"16\")" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "PpsvBpuMR1Ri" - }, - "source": [ - "Starting from a sphere mesh, we will learn offsets of each vertex such that the predicted mesh silhouette is more similar to the target silhouette image at each optimization step. We begin by loading our initial sphere mesh:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "i989ARH1R1Rj", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# We initialize the source shape to be a sphere of radius 1. \n", - "src_mesh = ico_sphere(4, device)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "f5xVtgLNDvC5", - "colab_type": "text" - }, - "source": [ - "We create a new differentiable renderer for rendering the silhouette of our predicted mesh:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "sXfjzgG4DsDJ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Rasterization settings for differentiable rendering, where the blur_radius\n", - "# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n", - "# Renderer for Image-based 3D Reasoning', ICCV 2019\n", - "sigma = 1e-4\n", - "raster_settings_soft = RasterizationSettings(\n", - " image_size=128, \n", - " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", - " faces_per_pixel=50, \n", - ")\n", - "\n", - "# Silhouette renderer \n", - "renderer_silhouette = MeshRenderer(\n", - " rasterizer=MeshRasterizer(\n", - " cameras=camera, \n", - " raster_settings=raster_settings_soft\n", - " ),\n", - " shader=SoftSilhouetteShader()\n", - ")" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "SGJKbCB6R1Rk" - }, - "source": [ - "We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target silhouettes:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "0sLrKv_MEULh", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Number of views to optimize over in each SGD iteration\n", - "num_views_per_iteration = 2\n", - "# Number of optimization steps\n", - "Niter = 2000\n", - "# Plot period for the losses\n", - "plot_period = 250\n", - "\n", - "%matplotlib inline\n", - "\n", - "# Optimize using rendered silhouette image loss, mesh edge loss, mesh normal \n", - "# consistency, and mesh laplacian smoothing\n", - "losses = {\"silhouette\": {\"weight\": 1.0, \"values\": []},\n", - " \"edge\": {\"weight\": 1.0, \"values\": []},\n", - " \"normal\": {\"weight\": 0.01, \"values\": []},\n", - " \"laplacian\": {\"weight\": 1.0, \"values\": []},\n", - " }\n", - "\n", - "# Losses to smooth / regularize the mesh shape\n", - "def update_mesh_shape_prior_losses(mesh, loss):\n", - " # and (b) the edge length of the predicted mesh\n", - " loss[\"edge\"] = mesh_edge_loss(mesh)\n", - " \n", - " # mesh normal consistency\n", - " loss[\"normal\"] = mesh_normal_consistency(mesh)\n", - " \n", - " # mesh laplacian smoothing\n", - " loss[\"laplacian\"] = mesh_laplacian_smoothing(mesh, method=\"uniform\")\n", - "\n", - "# We will learn to deform the source mesh by offsetting its vertices\n", - "# The shape of the deform parameters is equal to the total number of vertices in\n", - "# src_mesh\n", - "verts_shape = src_mesh.verts_packed().shape\n", - "deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n", - "\n", - "# The optimizer\n", - "optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "QLc9zK8lEqFS" - }, - "source": [ - "We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "gCfepfOoR1Rl", - "colab_type": "code", - "colab": {} - }, - "source": [ - "loop = tqdm(range(Niter))\n", - "\n", - "for i in loop:\n", - " # Initialize optimizer\n", - " optimizer.zero_grad()\n", - " \n", - " # Deform the mesh\n", - " new_src_mesh = src_mesh.offset_verts(deform_verts)\n", - " \n", - " # Losses to smooth /regularize the mesh shape\n", - " loss = {k: torch.tensor(0.0, device=device) for k in losses}\n", - " update_mesh_shape_prior_losses(new_src_mesh, loss)\n", - " \n", - " # Compute the average silhouette loss over two random views, as the average \n", - " # squared L2 distance between the predicted silhouette and the target \n", - " # silhouette from our dataset\n", - " for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n", - " images_predicted = renderer_silhouette(new_src_mesh, cameras=target_cameras[j], lights=lights)\n", - " predicted_silhouette = images_predicted[..., 3]\n", - " loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n", - " loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n", - " \n", - " # Weighted sum of the losses\n", - " sum_loss = torch.tensor(0.0, device=device)\n", - " for k, l in loss.items():\n", - " sum_loss += l * losses[k][\"weight\"]\n", - " losses[k][\"values\"].append(l)\n", - " \n", - " # Print the losses\n", - " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", - " \n", - " # Plot mesh\n", - " if i % plot_period == 0:\n", - " visualize_prediction(new_src_mesh, title=\"iter: %d\" % i, silhouette=True,\n", - " target_image=target_silhouette[1])\n", - " \n", - " # Optimization step\n", - " sum_loss.backward()\n", - " optimizer.step()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "scrolled": true, - "id": "CX4huayKR1Rm", - "colab_type": "code", - "colab": {} - }, - "source": [ - "visualize_prediction(new_src_mesh, silhouette=True, \n", - " target_image=target_silhouette[1])\n", - "plot_losses(losses)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "XJDsJQmrR1Ro" - }, - "source": [ - "## 3. Mesh and texture prediction via textured rendering\n", - "We can predict both the mesh and its texture if we add an additional loss based on the comparing a predicted rendered RGB image to the target image. As before, we start with a sphere mesh. We learn both translational offsets and RGB texture colors for each vertex in the sphere mesh. Since our loss is based on rendered RGB pixel values instead of just the silhouette, we use a **SoftPhongShader** instead of a **SoftSilhouetteShader**. Note also that we use a **SoftPhongShader** instead of the **TexturedSoftPhongShader** used to generate our dataset, because we represent texture using per vertex RGB colors instead of a texture image." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "aZObyIt9R1Ro", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Rasterization settings for differentiable rendering, where the blur_radius\n", - "# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n", - "# Renderer for Image-based 3D Reasoning', ICCV 2019\n", - "sigma = 1e-4\n", - "raster_settings_soft = RasterizationSettings(\n", - " image_size=128, \n", - " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", - " faces_per_pixel=50, \n", - ")\n", - "\n", - "# Differentiable soft renderer using per vertex RGB colors for texture\n", - "renderer_textured = MeshRenderer(\n", - " rasterizer=MeshRasterizer(\n", - " cameras=camera, \n", - " raster_settings=raster_settings_soft\n", - " ),\n", - " shader=SoftPhongShader(device=device, \n", - " cameras=camera,\n", - " lights=lights)\n", - ")" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "NM7gJux8GMQX" - }, - "source": [ - "We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target RGB images:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "BS6LAQquF3wq", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Number of views to optimize over in each SGD iteration\n", - "num_views_per_iteration = 2\n", - "# Number of optimization steps\n", - "Niter = 2000\n", - "# Plot period for the losses\n", - "plot_period = 250\n", - "\n", - "%matplotlib inline\n", - "\n", - "# Optimize using rendered RGB image loss, rendered silhouette image loss, mesh \n", - "# edge loss, mesh normal consistency, and mesh laplacian smoothing\n", - "losses = {\"rgb\": {\"weight\": 1.0, \"values\": []},\n", - " \"silhouette\": {\"weight\": 1.0, \"values\": []},\n", - " \"edge\": {\"weight\": 1.0, \"values\": []},\n", - " \"normal\": {\"weight\": 0.01, \"values\": []},\n", - " \"laplacian\": {\"weight\": 1.0, \"values\": []},\n", - " }\n", - "\n", - "# We will learn to deform the source mesh by offsetting its vertices\n", - "# The shape of the deform parameters is equal to the total number of vertices in \n", - "# src_mesh\n", - "verts_shape = src_mesh.verts_packed().shape\n", - "deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n", - "\n", - "# We will also learn per vertex colors for our sphere mesh that define texture \n", - "# of the mesh\n", - "sphere_verts_rgb = torch.full([1, verts_shape[0], 3], 0.5, device=device, requires_grad=True)\n", - "\n", - "# The optimizer\n", - "optimizer = torch.optim.SGD([deform_verts, sphere_verts_rgb], lr=1.0, momentum=0.9)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "tzIAycuUR1Rq" - }, - "source": [ - "We write an optimization loop to iteratively refine our predicted mesh and its vertex colors from the sphere mesh into a mesh that matches the target images:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "EKEH2p8-R1Rr", - "colab_type": "code", - "colab": {} - }, - "source": [ - "loop = tqdm(range(Niter))\n", - "\n", - "for i in loop:\n", - " # Initialize optimizer\n", - " optimizer.zero_grad()\n", - " \n", - " # Deform the mesh\n", - " new_src_mesh = src_mesh.offset_verts(deform_verts)\n", - " \n", - " # Add per vertex colors to texture the mesh\n", - " new_src_mesh.textures = Textures(verts_rgb=sphere_verts_rgb) \n", - " \n", - " # Losses to smooth /regularize the mesh shape\n", - " loss = {k: torch.tensor(0.0, device=device) for k in losses}\n", - " update_mesh_shape_prior_losses(new_src_mesh, loss)\n", - " \n", - " # Randomly select two views to optimize over in this iteration. Compared\n", - " # to using just one view, this helps resolve ambiguities between updating\n", - " # mesh shape vs. updating mesh texture\n", - " for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n", - " images_predicted = renderer_textured(new_src_mesh, cameras=target_cameras[j], lights=lights)\n", - "\n", - " # Squared L2 distance between the predicted silhouette and the target \n", - " # silhouette from our dataset\n", - " predicted_silhouette = images_predicted[..., 3]\n", - " loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n", - " loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n", - " \n", - " # Squared L2 distance between the predicted RGB image and the target \n", - " # image from our dataset\n", - " predicted_rgb = images_predicted[..., :3]\n", - " loss_rgb = ((predicted_rgb - target_rgb[j]) ** 2).mean()\n", - " loss[\"rgb\"] += loss_rgb / num_views_per_iteration\n", - " \n", - " # Weighted sum of the losses\n", - " sum_loss = torch.tensor(0.0, device=device)\n", - " for k, l in loss.items():\n", - " sum_loss += l * losses[k][\"weight\"]\n", - " losses[k][\"values\"].append(l)\n", - " \n", - " # Print the losses\n", - " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", - " \n", - " # Plot mesh\n", - " if i % plot_period == 0:\n", - " visualize_prediction(new_src_mesh, renderer=renderer_textured, title=\"iter: %d\" % i, silhouette=False)\n", - " \n", - " # Optimization step\n", - " sum_loss.backward()\n", - " optimizer.step()\n" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "scrolled": true, - "id": "2qTcHO4rR1Rs", - "colab_type": "code", - "colab": {} - }, - "source": [ - "visualize_prediction(new_src_mesh, renderer=renderer_textured, silhouette=False)\n", - "plot_losses(losses)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "akBOm_xcNUms", - "colab_type": "text" - }, - "source": [ - "Save the final predicted mesh:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "dXoIsGyhxRyK" - }, - "source": [ - "## 4. Save the final predicted mesh" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "OQGhV-psKna8", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Fetch the verts and faces of the final predicted mesh\n", - "final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)\n", - "\n", - "# Scale normalize back to the original target size\n", - "final_verts = final_verts * scale + center\n", - "\n", - "# Store the predicted mesh using save_obj\n", - "final_obj = os.path.join('./', 'final_model.obj')\n", - "save_obj(final_obj, final_verts, final_faces)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "MtKYp0B6R1Ru" - }, - "source": [ - "## 5. Conclusion\n", - "In this tutorial, we learned how to load a textured mesh from an obj file, create a synthetic dataset by rendering the mesh from multiple viewpoints. We showed how to set up an optimization loop to fit a mesh to the observed dataset images based on a rendered silhouette loss. We then augmented this optimization loop with an additional loss based on rendered RGB images, which allowed us to predict both a mesh and its texture." - ] - } - ] + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "_Ip8kp4TfBLZ" + }, + "outputs": [], + "source": [ + "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "kuXHJv44fBLe" + }, + "source": [ + "# Fit a mesh via rendering\n", + "\n", + "This tutorial shows how to:\n", + "- Load a mesh and textures from an `.obj` file. \n", + "- Create a synthetic dataset by rendering a textured mesh from multiple viewpoints\n", + "- Fit a mesh to the observed synthetic images using differential silhouette rendering\n", + "- Fit a mesh and its textures using differential textured rendering" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Bnj3THhzfBLf" + }, + "source": [ + "## 0. Install and Import modules" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "okLalbR_g7NS" + }, + "source": [ + "If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "musUWTglgxSB" + }, + "outputs": [], + "source": [ + "!pip install torch torchvision\n", + "!pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nX99zdoffBLg" + }, + "outputs": [], + "source": [ + "import os\n", + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from skimage.io import imread\n", + "\n", + "from pytorch3d.utils import ico_sphere\n", + "import numpy as np\n", + "from tqdm.notebook import tqdm\n", + "\n", + "# Util function for loading meshes\n", + "from pytorch3d.io import load_objs_as_meshes, save_obj\n", + "\n", + "from pytorch3d.loss import (\n", + " chamfer_distance, \n", + " mesh_edge_loss, \n", + " mesh_laplacian_smoothing, \n", + " mesh_normal_consistency,\n", + ")\n", + "\n", + "# Data structures and functions for rendering\n", + "from pytorch3d.structures import Meshes\n", + "from pytorch3d.renderer import (\n", + " look_at_view_transform,\n", + " OpenGLPerspectiveCameras, \n", + " PointLights, \n", + " DirectionalLights, \n", + " Materials, \n", + " RasterizationSettings, \n", + " MeshRenderer, \n", + " MeshRasterizer, \n", + " SoftPhongShader,\n", + " SoftSilhouetteShader,\n", + " SoftPhongShader,\n", + " TexturesVertex\n", + ")\n", + "\n", + "# add path for demo utils functions \n", + "import sys\n", + "import os\n", + "sys.path.append(os.path.abspath(''))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Lxmehq6Zhrzv" + }, + "source": [ + "If using **Google Colab**, fetch the utils file for plotting image grids:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "HZozr3Pmho-5" + }, + "outputs": [], + "source": [ + "!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py\n", + "from plot_image_grid import image_grid" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "g4B62MzYiJUM" + }, + "source": [ + "OR if running **locally** uncomment and run the following cell:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "paJ4Im8ahl7O" + }, + "outputs": [], + "source": [ + "# from utils.plot_image_grid import image_grid" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "collapsed": true, + "id": "5jGq772XfBLk" + }, + "source": [ + "### 1. Load a mesh and texture file\n", + "\n", + "Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object. \n", + "\n", + "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", + "\n", + "**TexturesVertex** is an auxillary datastructure for storing vertex rgb texture information about meshes. \n", + "\n", + "**Meshes** has several class methods which are used throughout the rendering pipeline." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "a8eU4zo5jd_H" + }, + "source": [ + "If running this notebook using **Google Colab**, run the following cell to fetch the mesh obj and texture files and save it at the path `data/cow_mesh`:\n", + "If running locally, the data is already available at the correct path. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "tTm0cVuOjb1W" + }, + "outputs": [], + "source": [ + "!mkdir -p data/cow_mesh\n", + "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj\n", + "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl\n", + "!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "gi5Kd0GafBLl" + }, + "outputs": [], + "source": [ + "# Setup\n", + "if torch.cuda.is_available():\n", + " device = torch.device(\"cuda:0\")\n", + " torch.cuda.set_device(device)\n", + "else:\n", + " device = torch.device(\"cpu\")\n", + "\n", + "# Set paths\n", + "DATA_DIR = \"./data\"\n", + "obj_filename = os.path.join(DATA_DIR, \"cow_mesh/cow.obj\")\n", + "\n", + "# Load obj file\n", + "mesh = load_objs_as_meshes([obj_filename], device=device)\n", + "\n", + "# We scale normalize and center the target mesh to fit in a sphere of radius 1 \n", + "# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh \n", + "# to its original center and scale. Note that normalizing the target mesh, \n", + "# speeds up the optimization but is not necessary!\n", + "verts = mesh.verts_packed()\n", + "N = verts.shape[0]\n", + "center = verts.mean(0)\n", + "scale = max((verts - center).abs().max(0)[0])\n", + "mesh.offset_verts_(-center.expand(N, 3))\n", + "mesh.scale_verts_((1.0 / float(scale)));" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "17c4xmtyfBMH" + }, + "source": [ + "## 2. Dataset Creation\n", + "\n", + "We sample different camera positions that encode multiple viewpoints of the cow. We create a renderer with a shader that performs texture map interpolation. We render a synthetic dataset of images of the textured cow mesh from multiple viewpoints.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CDQKebNNfBMI" + }, + "outputs": [], + "source": [ + "# the number of different viewpoints from which we want to render the mesh.\n", + "num_views = 20\n", + "\n", + "# Get a batch of viewing angles. \n", + "elev = torch.linspace(0, 360, num_views)\n", + "azim = torch.linspace(-180, 180, num_views)\n", + "\n", + "# Place a point light in front of the object. As mentioned above, the front of \n", + "# the cow is facing the -z direction. \n", + "lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])\n", + "\n", + "# Initialize an OpenGL perspective camera that represents a batch of different \n", + "# viewing angles. All the cameras helper methods support mixed type inputs and \n", + "# broadcasting. So we can view the camera from the a distance of dist=2.7, and \n", + "# then specify elevation and azimuth angles for each viewpoint as tensors. \n", + "R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)\n", + "cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)\n", + "\n", + "# We arbitrarily choose one particular view that will be used to visualize \n", + "# results\n", + "camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...], \n", + " T=T[None, 1, ...]) \n", + "\n", + "# Define the settings for rasterization and shading. Here we set the output \n", + "# image to be of size 128X128. As we are rendering images for visualization \n", + "# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to \n", + "# rasterize_meshes.py for explanations of these parameters. We also leave \n", + "# bin_size and max_faces_per_bin to their default values of None, which sets \n", + "# their values using huristics and ensures that the faster coarse-to-fine \n", + "# rasterization method is used. Refer to docs/notes/renderer.md for an \n", + "# explanation of the difference between naive and coarse-to-fine rasterization. \n", + "raster_settings = RasterizationSettings(\n", + " image_size=128, \n", + " blur_radius=0.0, \n", + " faces_per_pixel=1, \n", + ")\n", + "\n", + "# Create a phong renderer by composing a rasterizer and a shader. The textured \n", + "# phong shader will interpolate the texture uv coordinates for each vertex, \n", + "# sample from a texture image and apply the Phong lighting model\n", + "renderer = MeshRenderer(\n", + " rasterizer=MeshRasterizer(\n", + " cameras=camera, \n", + " raster_settings=raster_settings\n", + " ),\n", + " shader=SoftPhongShader(\n", + " device=device, \n", + " cameras=camera,\n", + " lights=lights\n", + " )\n", + ")\n", + "\n", + "# Create a batch of meshes by repeating the cow mesh and associated textures. \n", + "# Meshes has a useful `extend` method which allows us do this very easily. \n", + "# This also extends the textures. \n", + "meshes = mesh.extend(num_views)\n", + "\n", + "# Render the cow mesh from each viewing angle\n", + "target_images = renderer(meshes, cameras=cameras, lights=lights)\n", + "\n", + "# Our multi-view cow dataset will be represented by these 2 lists of tensors,\n", + "# each of length num_views.\n", + "target_rgb = [target_images[i, ..., :3] for i in range(num_views)]\n", + "target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...], \n", + " T=T[None, i, ...]) for i in range(num_views)]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "TppB4PVmR1Rc" + }, + "source": [ + "Visualize the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "HHE0CnbVR1Rd" + }, + "outputs": [], + "source": [ + "# RGB images\n", + "image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "gOb4rYx65E8z" + }, + "source": [ + "Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "iP_g-nwX4exM" + }, + "outputs": [], + "source": [ + "# Rasterization settings for silhouette rendering \n", + "sigma = 1e-4\n", + "raster_settings_silhouette = RasterizationSettings(\n", + " image_size=128, \n", + " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", + " faces_per_pixel=50, \n", + ")\n", + "\n", + "# Silhouette renderer \n", + "renderer_silhouette = MeshRenderer(\n", + " rasterizer=MeshRasterizer(\n", + " cameras=camera, \n", + " raster_settings=raster_settings_silhouette\n", + " ),\n", + " shader=SoftSilhouetteShader()\n", + ")\n", + "\n", + "# Render silhouette images. The 3rd channel of the rendering output is \n", + "# the alpha/silhouette channel\n", + "silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)\n", + "target_silhouette = [silhouette_images[i, ..., 3] for i in range(num_views)]\n", + "\n", + "# Visualize silhouette images\n", + "image_grid(silhouette_images.cpu().numpy(), rows=4, cols=5, rgb=False)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "t3qphI1ElUb5" + }, + "source": [ + "## 3. Mesh prediction via silhouette rendering\n", + "In the previous section, we created a dataset of images of multiple viewpoints of a cow. In this section, we predict a mesh by observing those target images without any knowledge of the ground truth cow mesh. We assume we know the position of the cameras and lighting.\n", + "\n", + "We first define some helper functions to visualize the results of our mesh prediction:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "eeWYHROrR1Rh" + }, + "outputs": [], + "source": [ + "# Show a visualization comparing the rendered predicted mesh to the ground truth \n", + "# mesh\n", + "def visualize_prediction(predicted_mesh, renderer=renderer_silhouette, \n", + " target_image=target_rgb[1], title='', \n", + " silhouette=False):\n", + " inds = 3 if silhouette else range(3)\n", + " predicted_images = renderer(predicted_mesh)\n", + " plt.figure(figsize=(20, 10))\n", + " plt.subplot(1, 2, 1)\n", + " plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())\n", + "\n", + " plt.subplot(1, 2, 2)\n", + " plt.imshow(target_image.cpu().detach().numpy())\n", + " plt.title(title)\n", + " plt.grid(\"off\")\n", + " plt.axis(\"off\")\n", + "\n", + "# Plot losses as a function of optimization iteration\n", + "def plot_losses(losses):\n", + " fig = plt.figure(figsize=(13, 5))\n", + " ax = fig.gca()\n", + " for k, l in losses.items():\n", + " ax.plot(l['values'], label=k + \" loss\")\n", + " ax.legend(fontsize=\"16\")\n", + " ax.set_xlabel(\"Iteration\", fontsize=\"16\")\n", + " ax.set_ylabel(\"Loss\", fontsize=\"16\")\n", + " ax.set_title(\"Loss vs iterations\", fontsize=\"16\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "PpsvBpuMR1Ri" + }, + "source": [ + "Starting from a sphere mesh, we will learn offsets of each vertex such that the predicted mesh silhouette is more similar to the target silhouette image at each optimization step. We begin by loading our initial sphere mesh:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "i989ARH1R1Rj" + }, + "outputs": [], + "source": [ + "# We initialize the source shape to be a sphere of radius 1. \n", + "src_mesh = ico_sphere(4, device)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f5xVtgLNDvC5" + }, + "source": [ + "We create a new differentiable renderer for rendering the silhouette of our predicted mesh:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "sXfjzgG4DsDJ" + }, + "outputs": [], + "source": [ + "# Rasterization settings for differentiable rendering, where the blur_radius\n", + "# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n", + "# Renderer for Image-based 3D Reasoning', ICCV 2019\n", + "sigma = 1e-4\n", + "raster_settings_soft = RasterizationSettings(\n", + " image_size=128, \n", + " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", + " faces_per_pixel=50, \n", + ")\n", + "\n", + "# Silhouette renderer \n", + "renderer_silhouette = MeshRenderer(\n", + " rasterizer=MeshRasterizer(\n", + " cameras=camera, \n", + " raster_settings=raster_settings_soft\n", + " ),\n", + " shader=SoftSilhouetteShader()\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "SGJKbCB6R1Rk" + }, + "source": [ + "We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target silhouettes:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "0sLrKv_MEULh" + }, + "outputs": [], + "source": [ + "# Number of views to optimize over in each SGD iteration\n", + "num_views_per_iteration = 2\n", + "# Number of optimization steps\n", + "Niter = 2000\n", + "# Plot period for the losses\n", + "plot_period = 250\n", + "\n", + "%matplotlib inline\n", + "\n", + "# Optimize using rendered silhouette image loss, mesh edge loss, mesh normal \n", + "# consistency, and mesh laplacian smoothing\n", + "losses = {\"silhouette\": {\"weight\": 1.0, \"values\": []},\n", + " \"edge\": {\"weight\": 1.0, \"values\": []},\n", + " \"normal\": {\"weight\": 0.01, \"values\": []},\n", + " \"laplacian\": {\"weight\": 1.0, \"values\": []},\n", + " }\n", + "\n", + "# Losses to smooth / regularize the mesh shape\n", + "def update_mesh_shape_prior_losses(mesh, loss):\n", + " # and (b) the edge length of the predicted mesh\n", + " loss[\"edge\"] = mesh_edge_loss(mesh)\n", + " \n", + " # mesh normal consistency\n", + " loss[\"normal\"] = mesh_normal_consistency(mesh)\n", + " \n", + " # mesh laplacian smoothing\n", + " loss[\"laplacian\"] = mesh_laplacian_smoothing(mesh, method=\"uniform\")\n", + "\n", + "# We will learn to deform the source mesh by offsetting its vertices\n", + "# The shape of the deform parameters is equal to the total number of vertices in\n", + "# src_mesh\n", + "verts_shape = src_mesh.verts_packed().shape\n", + "deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n", + "\n", + "# The optimizer\n", + "optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "QLc9zK8lEqFS" + }, + "source": [ + "We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "gCfepfOoR1Rl" + }, + "outputs": [], + "source": [ + "loop = tqdm(range(Niter))\n", + "\n", + "for i in loop:\n", + " # Initialize optimizer\n", + " optimizer.zero_grad()\n", + " \n", + " # Deform the mesh\n", + " new_src_mesh = src_mesh.offset_verts(deform_verts)\n", + " \n", + " # Losses to smooth /regularize the mesh shape\n", + " loss = {k: torch.tensor(0.0, device=device) for k in losses}\n", + " update_mesh_shape_prior_losses(new_src_mesh, loss)\n", + " \n", + " # Compute the average silhouette loss over two random views, as the average \n", + " # squared L2 distance between the predicted silhouette and the target \n", + " # silhouette from our dataset\n", + " for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n", + " images_predicted = renderer_silhouette(new_src_mesh, cameras=target_cameras[j], lights=lights)\n", + " predicted_silhouette = images_predicted[..., 3]\n", + " loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n", + " loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n", + " \n", + " # Weighted sum of the losses\n", + " sum_loss = torch.tensor(0.0, device=device)\n", + " for k, l in loss.items():\n", + " sum_loss += l * losses[k][\"weight\"]\n", + " losses[k][\"values\"].append(l)\n", + " \n", + " # Print the losses\n", + " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", + " \n", + " # Plot mesh\n", + " if i % plot_period == 0:\n", + " visualize_prediction(new_src_mesh, title=\"iter: %d\" % i, silhouette=True,\n", + " target_image=target_silhouette[1])\n", + " \n", + " # Optimization step\n", + " sum_loss.backward()\n", + " optimizer.step()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CX4huayKR1Rm", + "scrolled": true + }, + "outputs": [], + "source": [ + "visualize_prediction(new_src_mesh, silhouette=True, \n", + " target_image=target_silhouette[1])\n", + "plot_losses(losses)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "XJDsJQmrR1Ro" + }, + "source": [ + "## 3. Mesh and texture prediction via textured rendering\n", + "We can predict both the mesh and its texture if we add an additional loss based on the comparing a predicted rendered RGB image to the target image. As before, we start with a sphere mesh. We learn both translational offsets and RGB texture colors for each vertex in the sphere mesh. Since our loss is based on rendered RGB pixel values instead of just the silhouette, we use a **SoftPhongShader** instead of a **SoftSilhouetteShader**." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "aZObyIt9R1Ro" + }, + "outputs": [], + "source": [ + "# Rasterization settings for differentiable rendering, where the blur_radius\n", + "# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable \n", + "# Renderer for Image-based 3D Reasoning', ICCV 2019\n", + "sigma = 1e-4\n", + "raster_settings_soft = RasterizationSettings(\n", + " image_size=128, \n", + " blur_radius=np.log(1. / 1e-4 - 1.)*sigma, \n", + " faces_per_pixel=50, \n", + ")\n", + "\n", + "# Differentiable soft renderer using per vertex RGB colors for texture\n", + "renderer_textured = MeshRenderer(\n", + " rasterizer=MeshRasterizer(\n", + " cameras=camera, \n", + " raster_settings=raster_settings_soft\n", + " ),\n", + " shader=SoftPhongShader(device=device, \n", + " cameras=camera,\n", + " lights=lights)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "NM7gJux8GMQX" + }, + "source": [ + "We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target RGB images:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "BS6LAQquF3wq" + }, + "outputs": [], + "source": [ + "# Number of views to optimize over in each SGD iteration\n", + "num_views_per_iteration = 2\n", + "# Number of optimization steps\n", + "Niter = 2000\n", + "# Plot period for the losses\n", + "plot_period = 250\n", + "\n", + "%matplotlib inline\n", + "\n", + "# Optimize using rendered RGB image loss, rendered silhouette image loss, mesh \n", + "# edge loss, mesh normal consistency, and mesh laplacian smoothing\n", + "losses = {\"rgb\": {\"weight\": 1.0, \"values\": []},\n", + " \"silhouette\": {\"weight\": 1.0, \"values\": []},\n", + " \"edge\": {\"weight\": 1.0, \"values\": []},\n", + " \"normal\": {\"weight\": 0.01, \"values\": []},\n", + " \"laplacian\": {\"weight\": 1.0, \"values\": []},\n", + " }\n", + "\n", + "# We will learn to deform the source mesh by offsetting its vertices\n", + "# The shape of the deform parameters is equal to the total number of vertices in \n", + "# src_mesh\n", + "verts_shape = src_mesh.verts_packed().shape\n", + "deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)\n", + "\n", + "# We will also learn per vertex colors for our sphere mesh that define texture \n", + "# of the mesh\n", + "sphere_verts_rgb = torch.full([1, verts_shape[0], 3], 0.5, device=device, requires_grad=True)\n", + "\n", + "# The optimizer\n", + "optimizer = torch.optim.SGD([deform_verts, sphere_verts_rgb], lr=1.0, momentum=0.9)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "tzIAycuUR1Rq" + }, + "source": [ + "We write an optimization loop to iteratively refine our predicted mesh and its vertex colors from the sphere mesh into a mesh that matches the target images:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "EKEH2p8-R1Rr" + }, + "outputs": [], + "source": [ + "loop = tqdm(range(Niter))\n", + "\n", + "for i in loop:\n", + " # Initialize optimizer\n", + " optimizer.zero_grad()\n", + " \n", + " # Deform the mesh\n", + " new_src_mesh = src_mesh.offset_verts(deform_verts)\n", + " \n", + " # Add per vertex colors to texture the mesh\n", + " new_src_mesh.textures = TexturesVertex(verts_rgb=sphere_verts_rgb) \n", + " \n", + " # Losses to smooth /regularize the mesh shape\n", + " loss = {k: torch.tensor(0.0, device=device) for k in losses}\n", + " update_mesh_shape_prior_losses(new_src_mesh, loss)\n", + " \n", + " # Randomly select two views to optimize over in this iteration. Compared\n", + " # to using just one view, this helps resolve ambiguities between updating\n", + " # mesh shape vs. updating mesh texture\n", + " for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:\n", + " images_predicted = renderer_textured(new_src_mesh, cameras=target_cameras[j], lights=lights)\n", + "\n", + " # Squared L2 distance between the predicted silhouette and the target \n", + " # silhouette from our dataset\n", + " predicted_silhouette = images_predicted[..., 3]\n", + " loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()\n", + " loss[\"silhouette\"] += loss_silhouette / num_views_per_iteration\n", + " \n", + " # Squared L2 distance between the predicted RGB image and the target \n", + " # image from our dataset\n", + " predicted_rgb = images_predicted[..., :3]\n", + " loss_rgb = ((predicted_rgb - target_rgb[j]) ** 2).mean()\n", + " loss[\"rgb\"] += loss_rgb / num_views_per_iteration\n", + " \n", + " # Weighted sum of the losses\n", + " sum_loss = torch.tensor(0.0, device=device)\n", + " for k, l in loss.items():\n", + " sum_loss += l * losses[k][\"weight\"]\n", + " losses[k][\"values\"].append(l)\n", + " \n", + " # Print the losses\n", + " loop.set_description(\"total_loss = %.6f\" % sum_loss)\n", + " \n", + " # Plot mesh\n", + " if i % plot_period == 0:\n", + " visualize_prediction(new_src_mesh, renderer=renderer_textured, title=\"iter: %d\" % i, silhouette=False)\n", + " \n", + " # Optimization step\n", + " sum_loss.backward()\n", + " optimizer.step()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "2qTcHO4rR1Rs", + "scrolled": true + }, + "outputs": [], + "source": [ + "visualize_prediction(new_src_mesh, renderer=renderer_textured, silhouette=False)\n", + "plot_losses(losses)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "akBOm_xcNUms" + }, + "source": [ + "Save the final predicted mesh:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "dXoIsGyhxRyK" + }, + "source": [ + "## 4. Save the final predicted mesh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "OQGhV-psKna8" + }, + "outputs": [], + "source": [ + "# Fetch the verts and faces of the final predicted mesh\n", + "final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)\n", + "\n", + "# Scale normalize back to the original target size\n", + "final_verts = final_verts * scale + center\n", + "\n", + "# Store the predicted mesh using save_obj\n", + "final_obj = os.path.join('./', 'final_model.obj')\n", + "save_obj(final_obj, final_verts, final_faces)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MtKYp0B6R1Ru" + }, + "source": [ + "## 5. Conclusion\n", + "In this tutorial, we learned how to load a textured mesh from an obj file, create a synthetic dataset by rendering the mesh from multiple viewpoints. We showed how to set up an optimization loop to fit a mesh to the observed dataset images based on a rendered silhouette loss. We then augmented this optimization loop with an additional loss based on rendered RGB images, which allowed us to predict both a mesh and its texture." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "anp_metadata": { + "path": "fbsource/fbcode/vision/fair/pytorch3d/docs/tutorials/fit_textured_mesh.ipynb" + }, + "bento_stylesheets": { + "bento/extensions/flow/main.css": true, + "bento/extensions/kernel_selector/main.css": true, + "bento/extensions/kernel_ui/main.css": true, + "bento/extensions/new_kernel/main.css": true, + "bento/extensions/system_usage/main.css": true, + "bento/extensions/theme/main.css": true + }, + "colab": { + "name": "fit_textured_mesh.ipynb", + "provenance": [], + "toc_visible": true + }, + "disseminate_notebook_info": { + "backup_notebook_id": "781874812352022" + }, + "kernelspec": { + "display_name": "intro_to_cv", + "language": "python", + "name": "bento_kernel_intro_to_cv" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5+" + } + }, + "nbformat": 4, + "nbformat_minor": 1 } diff --git a/docs/tutorials/render_textured_meshes.ipynb b/docs/tutorials/render_textured_meshes.ipynb index 24e8548db..fac102819 100644 --- a/docs/tutorials/render_textured_meshes.ipynb +++ b/docs/tutorials/render_textured_meshes.ipynb @@ -87,7 +87,7 @@ "from pytorch3d.io import load_objs_as_meshes, load_obj\n", "\n", "# Data structures and functions for rendering\n", - "from pytorch3d.structures import Meshes, Textures\n", + "from pytorch3d.structures import Meshes\n", "from pytorch3d.renderer import (\n", " look_at_view_transform,\n", " FoVPerspectiveCameras, \n", @@ -97,7 +97,8 @@ " RasterizationSettings, \n", " MeshRenderer, \n", " MeshRasterizer, \n", - " SoftPhongShader\n", + " SoftPhongShader,\n", + " TexturesUV\n", ")\n", "\n", "# add path for demo utils functions \n", @@ -170,7 +171,7 @@ "\n", "**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes. \n", "\n", - "**Textures** is an auxillary datastructure for storing texture information about meshes. \n", + "**TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes. \n", "\n", "**Meshes** has several class methods which are used throughout the rendering pipeline." ] @@ -537,7 +538,7 @@ "source": [ "# We can pass arbirary keyword arguments to the rasterizer/shader via the renderer\n", "# so the renderer does not need to be reinitialized if any of the settings change.\n", - "images = renderer(meshes, cameras=cameras, lights=lights)" + "images = renderer(mesh, cameras=cameras, lights=lights)" ] }, { @@ -582,9 +583,9 @@ "backup_notebook_id": "569222367081034" }, "kernelspec": { - "display_name": "pytorch3d (local)", + "display_name": "intro_to_cv", "language": "python", - "name": "pytorch3d_local" + "name": "bento_kernel_intro_to_cv" }, "language_info": { "codemirror_mode": { diff --git a/pytorch3d/renderer/mesh/textures.py b/pytorch3d/renderer/mesh/textures.py index d06e06191..b0a938edf 100644 --- a/pytorch3d/renderer/mesh/textures.py +++ b/pytorch3d/renderer/mesh/textures.py @@ -599,11 +599,6 @@ def __init__( if not all(v.device == self.device for v in verts_uvs): raise ValueError("verts_uvs and faces_uvs must be on the same device") - # These values may be overridden when textures is - # passed into the Meshes constructor. For more details - # refer to the __init__ of Meshes. - self._num_verts_per_mesh = [len(v) for v in verts_uvs] - elif torch.is_tensor(verts_uvs): if ( verts_uvs.ndim != 3 @@ -621,7 +616,6 @@ def __init__( # These values may be overridden when textures is # passed into the Meshes constructor. max_V = verts_uvs.shape[1] - self._num_verts_per_mesh = [max_V] * self._N else: raise ValueError("Expected verts_uvs to be a tensor or list") @@ -758,9 +752,11 @@ def verts_uvs_list(self) -> List[torch.Tensor]: torch.empty((0, 2), dtype=torch.float32, device=self.device) ] * self._N else: - self._verts_uvs_list = padded_to_list( - self._verts_uvs_padded, split_size=self._num_verts_per_mesh - ) + # The number of vertices in the mesh and in verts_uvs can differ + # e.g. if a vertex is shared between 3 faces, it can + # have up to 3 different uv coordinates. Therefore we cannot + # convert directly from padded to list using _num_verts_per_mesh + self._verts_uvs_list = list(self._verts_uvs_padded.unbind(0)) return self._verts_uvs_list # Currently only the padded maps are used. @@ -783,7 +779,6 @@ def extend(self, N: int) -> "TexturesUV": "verts_uvs_padded", "faces_uvs_padded", "_num_faces_per_mesh", - "_num_verts_per_mesh", ], ) new_tex = TexturesUV( @@ -791,8 +786,8 @@ def extend(self, N: int) -> "TexturesUV": faces_uvs=new_props["faces_uvs_padded"], verts_uvs=new_props["verts_uvs_padded"], ) + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] - new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"] return new_tex def sample_textures(self, fragments, **kwargs) -> torch.Tensor: @@ -860,6 +855,7 @@ def sample_textures(self, fragments, **kwargs) -> torch.Tensor: # right-bottom pixel of input. pixel_uvs = pixel_uvs * 2.0 - 1.0 + texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map if texture_maps.device != pixel_uvs.device: texture_maps = texture_maps.to(pixel_uvs.device) diff --git a/tests/test_texturing.py b/tests/test_texturing.py index 44d03ae4f..3ed2950d4 100644 --- a/tests/test_texturing.py +++ b/tests/test_texturing.py @@ -588,10 +588,19 @@ def test_extend(self): tex_init = tex_mesh.textures new_tex = new_mesh.textures + new_tex_num_verts = new_mesh.num_verts_per_mesh() for i in range(len(tex_mesh)): for n in range(N): + tex_nv = new_tex_num_verts[i * N + n] self.assertClose( - tex_init.verts_uvs_list()[i], new_tex.verts_uvs_list()[i * N + n] + # The original textures were initialized using + # verts uvs list + tex_init.verts_uvs_list()[i], + # In the new textures, the verts_uvs are initialized + # from padded. The verts per mesh are not used to + # convert from padded to list. See TexturesUV for an + # explanation. + new_tex.verts_uvs_list()[i * N + n][:tex_nv, ...], ) self.assertClose( tex_init.faces_uvs_list()[i], new_tex.faces_uvs_list()[i * N + n]