Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
225 changes: 158 additions & 67 deletions Detailed_Expression_Capture_and_Animation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,9 @@
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "Copy of Detailed Expression Capture and Animation.ipynb",
"private_outputs": true,
"provenance": [],
"collapsed_sections": [],
"toc_visible": true,
"authorship_tag": "ABX9TyOJqULOUbwW86YwxbpnWlFn",
"include_colab_link": true
},
"kernelspec": {
Expand All @@ -27,32 +25,9 @@
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/YadiraF/DECA/blob/master/Detailed_Expression_Capture_and_Animation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
"<a href=\"https://colab.research.google.com/github/mhoangvslev/DECA/blob/master/Detailed_Expression_Capture_and_Animation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"source": [
"<center>\n",
"\n",
"# DECA: Detailed Expression Capture and Animation\n",
"\n",
"Code in [![GitHub stars](https://img.shields.io/github/stars/yadiraf/DECA?style=social)](https://github.com/YadiraF/DECA)\n",
"\n",
"Page at [![](https://img.shields.io/badge/Project-Page-blue?style=flat&logo=Google%20chrome&logoColor=blue)](https://deca.is.tue.mpg.de/)\n",
"\n",
"Made by [![Yao](https://img.shields.io/twitter/follow/yaofeng1995?style=social)](https://twitter.com/yaofeng1995)\n",
"\n",
"![extreme](https://deca.is.tue.mpg.de/uploads/ckeditor/pictures/642/content_teaser.gif)\n",
"\n",
"</center>\n",
"\n",
"Thanks [mhoangvslev](https://github.com/mhoangvslev) for contributing to this Colab document. "
],
"metadata": {
"id": "LKXziS2IzAg2"
}
},
{
"cell_type": "markdown",
"source": [
Expand All @@ -62,55 +37,68 @@
"id": "j0hyU8s5f_SB"
}
},
{
"cell_type": "code",
"source": [
"%tensorflow_version 1.x"
],
"metadata": {
"id": "x8Gm4LXA3Fay"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "bB5Ii6bkBYWc"
},
"outputs": [],
"source": [
"#@title Setup dependencies\n",
"\n",
"%cd /content/\n",
"!git clone https://github.com/yadiraf/DECA\n",
"!git clone https://github.com/mhoangvslev/DECA\n",
"\n",
"%cd DECA/\n",
"!apt -q install -y zip unzip ffmpeg libsm6 libxext6\n",
"#!pip install -r requirements.txt\n",
"!pip install 'torch==1.6.0'\n",
"!pip install 'torchvision==0.7.0'\n",
"!pip install -q 'pytorch3d==0.2.5'\n",
"!pip install -q numpy scipy chumpy scikit-image opencv-python PyYAML face-alignment yacs kornia ninja fvcore\n",
"!pip install -q lucid>=0.2.3 gdown matplotlib\n",
"#!pip install 'torch==1.6.0'\n",
"#!pip install 'torchvision==0.7.0'\n",
"#!pip install -q 'pytorch3d==0.2.5'\n",
"!pip install -q numpy scipy chumpy scikit-image opencv-python PyYAML face-alignment yacs kornia ninja fvcore gdown matplotlib\n",
"!pip install -q gdown==4.5.4 --no-cache-dir\n",
"#!pip install --upgrade ipykernel"
]
},
{
"cell_type": "code",
"source": [
"#@title Install pytorch3d for Colab\n",
"# SOURCE: https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md\n",
"import sys\n",
"import torch\n",
"pyt_version_str=torch.__version__.split(\"+\")[0].replace(\".\", \"\")\n",
"version_str=\"\".join([\n",
" f\"py3{sys.version_info.minor}_cu\",\n",
" torch.version.cuda.replace(\".\",\"\"),\n",
" f\"_pyt{pyt_version_str}\"\n",
"])\n",
"!pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html\n"
],
"metadata": {
"cellView": "form",
"id": "yfHNyj6KpZ4X"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title Download models\n",
"#@markdown By executing this cell, you agree to the [LICENSE](https://flame.is.tue.mpg.de/modellicense.html) provided by Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V\n",
"\n",
"print(\"Downloading FLAME2020 model...\")\n",
"!gdown --id 18eHkbq2W3aJJVUNTM3QwFK0dPaeNoRAz -O FLAME2020.zip\n",
"!gdown 1FRoGBmNCLM6Q0FyP_IeqRtbwTsHslss5 -O FLAME2020.zip\n",
"!unzip -o FLAME2020.zip -d data/\n",
"\n",
"print(\"Downloading deca_model...\")\n",
"!gdown --id 1rp8kdyLPvErw2dTmqtjISRVvQLj6Yzje -O data/deca_model.tar"
"!gdown 1rp8kdyLPvErw2dTmqtjISRVvQLj6Yzje -O data/deca_model.tar"
],
"metadata": {
"cellView": "form",
"id": "ZmSRqqrvCIwx"
},
"execution_count": null,
Expand All @@ -125,12 +113,35 @@
"id": "TuLFayzVf6h-"
}
},
{
"cell_type": "code",
"source": [
"!python -V"
],
"metadata": {
"id": "xm8YmZeopmci"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!pip list | grep torch"
],
"metadata": {
"id": "BC_nEkL2i9cQ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title Run paper demo\n",
"print(\"Setting up...\")\n",
"!pip install -q kornia==0.4.0 yacs==0.1.8 face_alignment ninja fvcore\n",
"!pip list | grep torch\n",
"#!pip install -q kornia==0.4.0 yacs==0.1.8 face_alignment ninja fvcore\n",
"\n",
"print(\"Check for NVIDIA Driver...\")\n",
"!nvidia-smi\n",
Expand All @@ -142,7 +153,8 @@
"!python demos/demo_reconstruct.py -i $input_folder -s $output_folder --saveDepth True --saveObj True\n"
],
"metadata": {
"id": "iiFP_JPZHjVf"
"id": "iiFP_JPZHjVf",
"cellView": "form"
},
"execution_count": null,
"outputs": []
Expand All @@ -152,8 +164,6 @@
"source": [
"#@title Use your own image\n",
"#@markdown Upload your images to `upload` folder under `DECA`\n",
"print(\"Setting up...\")\n",
"!pip install -q kornia yacs face_alignment ninja fvcore\n",
"\n",
"print(\"Check for NVIDIA Driver...\")\n",
"!nvidia-smi\n",
Expand All @@ -162,32 +172,113 @@
"import os\n",
"input_folder = \"/content/\" #@param {type:\"string\"}\n",
"output_folder = os.path.join(input_folder, \"results\")\n",
"!python demos/demo_reconstruct.py -i $input_folder -s $output_folder --saveDepth True --saveObj True\n",
"\n",
"## show results \n",
"print('visualize one exmaple below')\n",
"!python demos/demo_reconstruct.py -i $input_folder -s $output_folder --saveDepth True --saveObj True\n"
],
"metadata": {
"cellView": "form",
"id": "Ny9lRkxefRPz"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title Visualize the results\n",
"#@markdown Add import ctypes.util in glcontext.py if error. Warning: May crash your session!\n",
"#@markdown Alternatively, you can use this [3dviewer.net](https://3dviewer.net/).\n",
"import matplotlib.pyplot as plt\n",
"from glob import glob\n",
"from PIL import Image\n",
"vispath_list = glob(output_folder+'/*_size.jpg')\n",
"for vispath in vispath_list:\n",
" image = Image.open(vispath)\n",
" plt.figure(figsize=(20, 20))\n",
" plt.imshow(image)\n",
"\n",
"# Util function for loading meshes\n",
"from pytorch3d.io import load_objs_as_meshes, load_obj\n",
"\n",
"# Data structures and functions for rendering\n",
"from pytorch3d.structures import Meshes\n",
"from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene\n",
"from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib\n",
"from pytorch3d.renderer import (\n",
" look_at_view_transform,\n",
" FoVPerspectiveCameras, \n",
" PointLights, \n",
" DirectionalLights, \n",
" Materials, \n",
" RasterizationSettings, \n",
" MeshRenderer, \n",
" MeshRasterizer, \n",
" SoftPhongShader,\n",
" TexturesUV,\n",
" TexturesVertex\n",
")\n",
"import os\n",
"\n",
"import tensorflow as tf\n",
"\n",
"if torch.cuda.is_available():\n",
" device = torch.device(\"cuda:0\")\n",
" torch.cuda.set_device(device)\n",
"else:\n",
" device = torch.device(\"cpu\")\n",
"\n",
"# Initialize a camera.\n",
"# With world coordinates +Y up, +X left and +Z in, the front of the cow is facing the -Z direction. \n",
"# So we move the camera by 180 in the azimuth direction so it is facing the front of the cow. \n",
"R, T = look_at_view_transform(2.7, 0, 180) \n",
"cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n",
"\n",
"# Define the settings for rasterization and shading. Here we set the output image to be of size\n",
"# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1\n",
"# and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that \n",
"# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for \n",
"# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of \n",
"# the difference between naive and coarse-to-fine rasterization. \n",
"raster_settings = RasterizationSettings(\n",
" image_size=512, \n",
" blur_radius=0.0, \n",
" faces_per_pixel=1, \n",
")\n",
"\n",
"# Place a point light in front of the object. As mentioned above, the front of the cow is facing the \n",
"# -z direction. \n",
"lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])\n",
"\n",
"# Create a Phong renderer by composing a rasterizer and a shader. The textured Phong shader will \n",
"# interpolate the texture uv coordinates for each vertex, sample from a texture image and \n",
"# apply the Phong lighting model\n",
"renderer = MeshRenderer(\n",
" rasterizer=MeshRasterizer(\n",
" cameras=cameras, \n",
" raster_settings=raster_settings\n",
" ),\n",
" shader=SoftPhongShader(\n",
" device=device, \n",
" cameras=cameras,\n",
" lights=lights\n",
" )\n",
")\n",
"\n",
"results = next(os.walk(output_folder))[1]\n",
"for folder in results:\n",
" filename = os.path.join(output_folder, folder)\n",
" mesh = load_objs_as_meshes([os.path.join(filename, f'{folder}.obj')], device=device)\n",
" #clr_map = load(os.path.join(filename, f'{folder}.png'))\n",
" images = renderer(mesh)\n",
" plt.figure(figsize=(10, 10))\n",
" plt.imshow(images[0, ..., :3].cpu().numpy())\n",
" plt.axis(\"off\");\n",
" break\n",
"print(f'Please check all results in {output_folder}')\n"
" break\n"
],
"metadata": {
"id": "Ny9lRkxefRPz"
"id": "T_KrjopImyGQ",
"cellView": "form"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"metadata": {
"id": "zuBCgeH08tdn"
"id": "zuBCgeH08tdn",
"cellView": "form"
},
"source": [
"#@title Download the result\n",
Expand All @@ -198,8 +289,8 @@
"folders = [ os.path.join(output_folder, f) for f in next(os.walk(os.path.join(input_folder, 'results')))[1] ]\n",
"\n",
"print(f'Download results...')\n",
"os.system(f'zip -r DECA_results.zip {\" \".join(folders)}')\n",
"files.download(\"DECA_results.zip\")"
"os.system(f'zip -r download.zip {\" \".join(folders)}')\n",
"files.download(\"download.zip\")"
],
"execution_count": null,
"outputs": []
Expand Down