{"Affiliation":[{"label":"Affiliation","value":"Science, Faculty of","attrs":{"lang":"en","ns":"http:\/\/vivoweb.org\/ontology\/core#departmentOrSchool","classmap":"vivo:EducationalProcess","property":"vivo:departmentOrSchool"},"iri":"http:\/\/vivoweb.org\/ontology\/core#departmentOrSchool","explain":"VIVO-ISF Ontology V1.6 Property; The department or school name within institution; Not intended to be an institution name."},{"label":"Affiliation","value":"Computer Science, Department of","attrs":{"lang":"en","ns":"http:\/\/vivoweb.org\/ontology\/core#departmentOrSchool","classmap":"vivo:EducationalProcess","property":"vivo:departmentOrSchool"},"iri":"http:\/\/vivoweb.org\/ontology\/core#departmentOrSchool","explain":"VIVO-ISF Ontology V1.6 Property; The department or school name within institution; Not intended to be an institution name."}],"AggregatedSourceRepository":[{"label":"AggregatedSourceRepository","value":"DSpace","attrs":{"lang":"en","ns":"http:\/\/www.europeana.eu\/schemas\/edm\/dataProvider","classmap":"ore:Aggregation","property":"edm:dataProvider"},"iri":"http:\/\/www.europeana.eu\/schemas\/edm\/dataProvider","explain":"A Europeana Data Model Property; The name or identifier of the organization who contributes data indirectly to an aggregation service (e.g. Europeana)"}],"Campus":[{"label":"Campus","value":"UBCV","attrs":{"lang":"en","ns":"https:\/\/open.library.ubc.ca\/terms#degreeCampus","classmap":"oc:ThesisDescription","property":"oc:degreeCampus"},"iri":"https:\/\/open.library.ubc.ca\/terms#degreeCampus","explain":"UBC Open Collections Metadata Components; Local Field; Identifies the name of the campus from which the graduate completed their degree."}],"Creator":[{"label":"Creator","value":"Bessmeltsev, Mikhail","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/creator","classmap":"dpla:SourceResource","property":"dcterms:creator"},"iri":"http:\/\/purl.org\/dc\/terms\/creator","explain":"A Dublin Core Terms Property; An entity primarily responsible for making the resource.; Examples of a Contributor include a person, an organization, or a service."}],"DateAvailable":[{"label":"DateAvailable","value":"2016-08-23T02:02:16","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/issued","classmap":"edm:WebResource","property":"dcterms:issued"},"iri":"http:\/\/purl.org\/dc\/terms\/issued","explain":"A Dublin Core Terms Property; Date of formal issuance (e.g., publication) of the resource."}],"DateIssued":[{"label":"DateIssued","value":"2016","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/issued","classmap":"oc:SourceResource","property":"dcterms:issued"},"iri":"http:\/\/purl.org\/dc\/terms\/issued","explain":"A Dublin Core Terms Property; Date of formal issuance (e.g., publication) of the resource."}],"Degree":[{"label":"Degree","value":"Doctor of Philosophy - PhD","attrs":{"lang":"en","ns":"http:\/\/vivoweb.org\/ontology\/core#relatedDegree","classmap":"vivo:ThesisDegree","property":"vivo:relatedDegree"},"iri":"http:\/\/vivoweb.org\/ontology\/core#relatedDegree","explain":"VIVO-ISF Ontology V1.6 Property; The thesis degree; Extended Property specified by UBC, as per https:\/\/wiki.duraspace.org\/display\/VIVO\/Ontology+Editor%27s+Guide"}],"DegreeGrantor":[{"label":"DegreeGrantor","value":"University of British Columbia","attrs":{"lang":"en","ns":"https:\/\/open.library.ubc.ca\/terms#degreeGrantor","classmap":"oc:ThesisDescription","property":"oc:degreeGrantor"},"iri":"https:\/\/open.library.ubc.ca\/terms#degreeGrantor","explain":"UBC Open Collections Metadata Components; Local Field; Indicates the institution where thesis was granted."}],"Description":[{"label":"Description","value":"Modern tools to create 3D models are cumbersome and time-consuming. Sketching is a natural way to communicate ideas quickly, and human observers, given a sketch, typically imagine a unique 3D shape; thus, a tool to algorithmically interpret sketches recovering the intended 3D shape would significantly simplify 3D modeling. However, developing such tool is known to be a difficult problem in computer science due to multitude of ambiguities, inaccuracies and incompleteness in the sketches. In this thesis, we introduce three novel approaches in CAD and character modeling that successfully overcome those problems, inferring artist-intended 3D shape from sketches. First, we introduce a system to infer the artist-intended surface of a CAD object from a network of closed 3D curves. Second, we propose a new system for recovering a 3D model of a character, given a single complete drawing and a correspondingly posed 3D skeleton. Finally, we introduce a novel system to pose a 3D character using a single gesture drawing. While developing each system, we derive our key insights from perceptual and artist literature, and confirm our algorithmic choices by various evaluations and comparisons to ground truth data.","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/description","classmap":"dpla:SourceResource","property":"dcterms:description"},"iri":"http:\/\/purl.org\/dc\/terms\/description","explain":"A Dublin Core Terms Property; An account of the resource.; Description may include but is not limited to: an abstract, a table of contents, a graphical representation, or a free-text account of the resource."}],"DigitalResourceOriginalRecord":[{"label":"DigitalResourceOriginalRecord","value":"https:\/\/circle.library.ubc.ca\/rest\/handle\/2429\/58914?expand=metadata","attrs":{"lang":"en","ns":"http:\/\/www.europeana.eu\/schemas\/edm\/aggregatedCHO","classmap":"ore:Aggregation","property":"edm:aggregatedCHO"},"iri":"http:\/\/www.europeana.eu\/schemas\/edm\/aggregatedCHO","explain":"A Europeana Data Model Property; The identifier of the source object, e.g. the Mona Lisa itself. This could be a full linked open date URI or an internal identifier"}],"FullText":[{"label":"FullText","value":"Recovering 3D Shape from Conceptand Pose DrawingsbyMikhail BessmeltsevB.Sc., Novosibirsk State University, 2008M.Sc., Novosibirsk State University, 2010A THESIS SUBMITTED IN PARTIAL FULFILLMENT OFTHE REQUIREMENTS FOR THE DEGREE OFDOCTOR OF PHILOSOPHYinThe Faculty of Graduate and Postdoctoral Studies(Computer Science)THE UNIVERSITY OF BRITISH COLUMBIA(Vancouver)August 2016\u00a9 Mikhail Bessmeltsev 2016AbstractModern tools to create 3D models are cumbersome and time-consuming.Sketching is a natural way to communicate ideas quickly, and human ob-servers, given a sketch, typically imagine a unique 3D shape; thus, a tool toalgorithmically interpret sketches recovering the intended 3D shape wouldsignificantly simplify 3D modeling. However, developing such tool is knownto be a difficult problem in computer science due to multitude of ambiguities,inaccuracies and incompleteness in the sketches. In this thesis, we introducethree novel approaches in CAD and character modeling that successfullyovercome those problems, inferring artist-intended 3D shape from sketches.First, we introduce a system to infer the artist-intended surface of aCAD object from a network of closed 3D curves. Second, we propose a newsystem for recovering a 3D model of a character, given a single completedrawing and a correspondingly posed 3D skeleton. Finally, we introduce anovel system to pose a 3D character using a single gesture drawing. Whiledeveloping each system, we derive our key insights from perceptual andartist literature, and confirm our algorithmic choices by various evaluationsand comparisons to ground truth data.iiPrefaceA version of Chapter 3 has been published in ACM Transactions on Graph-ics [11]. Most of the ideas originated in discussions between myself andAlla Sheffer. I carried out most of the implementation and testing, assem-bled the video, and presented the paper at SIGGRAPH ASIA 2012. CaoyuWang contributed to the ideas and implementation of the final stage ofthe algorithm (Section 3.3) and the implementation of the stable matchingalgorithm. Karan Singh mostly helped with writing, designing validationprocedures and overall framing of the story. Alla Sheffer and Karan Singhwrote most of the manuscript, I edited and wrote parts of the text, andcreated all the figures.A version of Chapter 4 has been published in ACM Transactions onGraphics, 2015 [10]. Most of the ideas originated in discussions betweenmyself and Alla Sheffer. I carried out most of the experiments, implementa-tion, testing, evaluations, and video assembly. Will Chang contributed ideasto Section 4.4.1, overall Section 4.4, and helped with its implementation,along with implementation of various rendering tasks. Alla Sheffer, KaranSingh, and Nicholas Vining wrote most the manuscript, I and Will Changedited and wrote parts of the paper. I also created most of the figures inthe manuscript. Nicholas Vining and Karan Singh also helped with shapingthe story, designing and conducting evaluations, and video narration.Finally, a version of Chapter 5 has been conditionally accepted to SIG-GRAPH ASIA 2016 conference. Most of the ideas originated in discussionsbetween myself and Alla Sheffer. I carried out most of the experiments,implementation, testing, evaluations, and video assembly. Nicholas Viningcontributed ideas to Section 5.6, helped with its implementation, carried outsome experiments in Section 5.7, helped with some evaluations and videonarration. I wrote the initial version of the paper; Alla Sheffer and NicholasVining wrote the final version of the paper, I edited the text, and createdall the figures.iiiTable of ContentsAbstract . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . iiPreface . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . iiiTable of Contents . . . . . . . . . . . . . . . . . . . . . . . . . . . . ivList of Tables . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . viiList of Figures . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . viii1 Introduction . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 11.1 Overview of Contributions . . . . . . . . . . . . . . . . . . . 31.2 Recovering Artist-Intended Surfaces from 3D Curves . . . . . 41.3 Recovering Character 3D Model from a Cartoon Drawing . . 51.4 Recovering 3D Character Pose from a Gesture Drawing . . . 62 Previous Work . . . . . . . . . . . . . . . . . . . . . . . . . . . 82.1 Sketch-based 3D Modeling . . . . . . . . . . . . . . . . . . . 82.1.1 Incremental Approaches . . . . . . . . . . . . . . . . 82.1.2 Shape and Pose Reconstruction from Photographs andVideo . . . . . . . . . . . . . . . . . . . . . . . . . . . 92.1.3 Character Shape Reconstruction from Complete Draw-ings . . . . . . . . . . . . . . . . . . . . . . . . . . . . 112.1.4 Skeleton-based 3D Modeling . . . . . . . . . . . . . . 142.2 Posing Characters . . . . . . . . . . . . . . . . . . . . . . . . 142.2.1 Adding 3D Effects to 2D Drawings . . . . . . . . . . 142.2.2 3D Character Posing . . . . . . . . . . . . . . . . . . 152.3 Surface Reconstruction from 3D Curve Networks . . . . . . . 163 Design-Driven Quadrangulation of Closed 3D curves . . . 203.1 Introduction . . . . . . . . . . . . . . . . . . . . . . . . . . . 203.2 Quadrangulating a Closed 3D Curve . . . . . . . . . . . . . . 24ivTable of Contents3.2.1 Segmentation and Matching . . . . . . . . . . . . . . 253.2.2 Quadrangulation . . . . . . . . . . . . . . . . . . . . . 323.3 Processing Curve Networks . . . . . . . . . . . . . . . . . . . 363.4 Results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 383.5 Conclusions . . . . . . . . . . . . . . . . . . . . . . . . . . . . 414 Modeling Character Canvases from Cartoon Drawings . . 434.1 Introduction . . . . . . . . . . . . . . . . . . . . . . . . . . . 434.2 Framework Overview . . . . . . . . . . . . . . . . . . . . . . 474.3 Part Segmentation . . . . . . . . . . . . . . . . . . . . . . . . 504.4 Canvas Modeling . . . . . . . . . . . . . . . . . . . . . . . . . 574.4.1 Computing a 3D Curve-Skeleton and 3D Contours . . 584.4.2 Canvas Connectivity . . . . . . . . . . . . . . . . . . 614.4.3 Canvas Surfacing . . . . . . . . . . . . . . . . . . . . 624.5 Perceptual and Design Validation . . . . . . . . . . . . . . . 664.5.1 Creating Overlaid 3D Skeletons . . . . . . . . . . . . 664.5.2 Comparison to Ground Truth and Artist Drawings . 694.5.3 Perceived Contour Segmentation . . . . . . . . . . . . 694.6 Results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 724.7 Conclusions . . . . . . . . . . . . . . . . . . . . . . . . . . . . 775 Gesture3D: Posing 3D Character via a Gesture Drawing . 785.1 Introduction . . . . . . . . . . . . . . . . . . . . . . . . . . . 785.2 Related Work . . . . . . . . . . . . . . . . . . . . . . . . . . 815.3 Parsing Gesture Drawings . . . . . . . . . . . . . . . . . . . . 815.4 Framework Overview . . . . . . . . . . . . . . . . . . . . . . 845.5 Character-Contour Correspondence . . . . . . . . . . . . . . 865.5.1 Solution Space . . . . . . . . . . . . . . . . . . . . . . 875.5.2 Unary Assignment Cost . . . . . . . . . . . . . . . . . 885.5.3 Assignment Compatibility . . . . . . . . . . . . . . . 895.5.4 Global Consistency . . . . . . . . . . . . . . . . . . . 915.5.5 Solver Mechanism . . . . . . . . . . . . . . . . . . . . 935.6 2D Pose Optimization . . . . . . . . . . . . . . . . . . . . . . 945.7 Full Pose Optimization . . . . . . . . . . . . . . . . . . . . . 945.8 Validation . . . . . . . . . . . . . . . . . . . . . . . . . . . . 985.9 Results . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1005.10 Conclusions . . . . . . . . . . . . . . . . . . . . . . . . . . . . 104vTable of Contents6 Discussion and Conclusion . . . . . . . . . . . . . . . . . . . . 1056.1 Discussion . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1056.2 Future Work . . . . . . . . . . . . . . . . . . . . . . . . . . . 1066.3 Conclusions . . . . . . . . . . . . . . . . . . . . . . . . . . . . 107Bibliography . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 108viList of Tables3.1 Algorithm statistics for different curve networks. . . . . . . . 41viiList of Figures1.1 Graphical interface of a commercial industry-standard 3Dmodeling package. . . . . . . . . . . . . . . . . . . . . . . . . 21.2 A brief overview of Chapter 3. . . . . . . . . . . . . . . . . . 41.3 A brief overview of Chapter 4 . . . . . . . . . . . . . . . . . . 51.4 A brief overview of Chapter 5. . . . . . . . . . . . . . . . . . 62.1 Stick figure drawings (a), lines of action (b), and outer silhou-ettes (c) allow for multiple perceptually valid pose interpreta-tions. (d) Poor view selection results in highly foreshortenedcontours leading to loss of pose information (e.g bends onthe left arm or the curved spine). Gesture drawings, con-sciously drawn from descriptive views (e) effectively conveythe intended pose. . . . . . . . . . . . . . . . . . . . . . . . . 102.2 Character drawings do not conform to the assumptions madein previous work. (top) The contours of a surface of revo-lution whose axis is not in the view plane are typically notplanar.(bottom) The contours of a typical character includenumerous occlusions; a single contour curve can consist ofmultiple part outlines (see left arm and torso outline in (a))and as shown by the side view (b) the contour curves arefar from planar or view aligned. Our method introduced inChapter 4 (c) successfully handles such inputs generating acharacter model similar to the ground truth input (b). . . . . 122.3 (a) Lacking part information, character shape reconstructioncan at best exploit overall shape smoothness, e.g [68, 95]; (b)by using a skeleton to facilitate contour partition and partmodeling, we generate a more believable character shape. . . 132.4 Using Laplacian diffusion (b) or Thin-Plate Splines [40](c) tosurface a four-sided cycle leads to unintuitive results. (d) Incontrast the flow lines on an interpolating Coons patch, byconstruction, bridge opposite cycle sides. . . . . . . . . . . . . 19viiiList of Figures2.5 (top) Using a purely topological approach and applying mid-point subdivision (forming either four or six sides) generatesa quad mesh with poor flow line layout (left and center).Our method in Chapter 3 (right) uses geometry driven seg-mentation and matching to generate smooth flow lines and apredictable surface. (bottom) On a concave cycle, parameter-ization onto a convex domain (a rectangle) leads to foldovers(left), our method automatically segments the cycle into con-vex quadrilaterals leading to a fair surface (right). . . . . . . 193.1 Steps to quadrangulating a design network of closed 3D curves(a) : Closed curves are independently segmented (b) and it-eratively paired and refined to capture dominant flow-lines aswell as overall flow-line quality (c); final quadrangulation ingreen and dense quad-mesh (d); quadrangulations are alignedacross adjacent cycles to generate a single densely sampledmesh (e), suitable for design rendering and downstream ap-plications (f). . . . . . . . . . . . . . . . . . . . . . . . . . . 213.2 Closed 3D curves: ambiguous hexagonal 3D curve (top) com-pared to complex curves with a clear design intent (bottom). 223.3 Artist designed interpolating quad-meshes. . . . . . . . . . . . 233.4 After the initial segmentation (a), we alternate matching andrefinement steps to obtain a pair-based curve segmentationwhich is converted into a quadrilateral network (c) . To mini-mize T-junction count (d) we compute global interval assign-ment, and use it to sample iso-lines on discrete Coons patches. 243.5 Iterative segmentation refinement: (a) initial segmentationwhere the matching highlights correct dominant side matches.The match quality is drastically improved by segmenting thebottom curve (b), and repeating the process (c) to obtain aneven segment count. Further refinement has no real impacton matching cost. . . . . . . . . . . . . . . . . . . . . . . . . . 273.6 Estimated bridge curvature for different segment layouts mea-sured as angle (red) between bridge direction ti and p\u2212p\u2032 (at apoint p). The dashed lines visualize representative intersect-ing flow-lines (a). Shape similarity and distance cost terms(b). . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 283.7 Initial bridge direction ti of segment i is determined by adja-cent segment flow directions fm, fn and its normal. . . . . . 30ixList of Figures3.8 A disconnected dual graph (left) does not allow for a validprimal quad mesh. Splitting the cycle into two by a tempo-rary curve segment (dashed) generates valid graphs for bothparts which combined together induce a valid primal quadmesh (right). . . . . . . . . . . . . . . . . . . . . . . . . . . . 333.9 Two intersection orders induce different quad connectivity,with the one on the right inducing a better quad shape, andconsequently a smoother flow. . . . . . . . . . . . . . . . . . . 333.10 We first position interior vertices (left) and then use the chain-long quads to position the interior curves (center). Finally,the resulting quad cycles are quad-meshed using discrete Coonspatches (right). . . . . . . . . . . . . . . . . . . . . . . . . . 343.11 Our distance based weighing (right) generates smoother flowline evolution than topology based one [94]. . . . . . . . . . 343.12 Removing interior vertices: (Left) initial match (top) andinduced quadrangulation (bottom); (Right) the final matchwith purple and green pairs flipped (top) has a slightly highercost but the induced quadrangulation (bottom) has no inte-rior vertices, leading to smoother flow-lines. . . . . . . . . . . 353.13 Separately processed cycles (a) introduce T-junctions. Wefirst resolve the T-junctions across pairs of neighbouring patchesby propagation (b), generating a well defined hierarchy ofmatching primary segments. We then use integer program-ming to compute interval assignments (c) that minimizes thenumber of T-junctions, typically leading to a watertight mesh(d). . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 363.14 Quadrangulation and meshing of closed curves. . . . . . . . . 383.15 Quad meshes of complex closed curves including interior cycles. 393.16 Artist generated meshes (left) and ours (right) exhibit verysimilar flow-line patterns. . . . . . . . . . . . . . . . . . . . . 393.17 Quadrangulation and meshing of curve networks. The starsindicate the network locations of the highlighted complex re-gions. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 42xList of Figures4.1 Character drawings (c) are traditionally anchored around askeleton (a), surrounded by generalized surfaces of revolu-tion (b). We use the drawn character contours (d) and acorresponding 3D skeleton (red-to-blue coloring reflects near-to-far skeleton depth variation), to automatically compute a3D canvas, employed to freely manipulate the character in3D (e). . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 434.2 Character contours alone (left) frequently do not provide suf-ficient information to conclusively determine 3D shape bothon occlusion free (top) and partially occluded (bottom) in-puts. A 3D skeleton, shown in the insets, resolves much ofthe ambiguity present in contours alone facilitating plausibleshape interpretation. . . . . . . . . . . . . . . . . . . . . . . . 444.3 The canvas (center) of the catwoman in Fig. 4.1: (left) thickblack line shows reconstructed 3D contour curves, (right) in-sets visualize representative trajectories and profiles. . . . . . 454.4 (a) Lacking part information, character shape reconstructioncan at best exploit overall shape smoothness, e.g [68, 95]; (b)by using a skeleton to facilitate contour partition and partmodeling, we generate a more believable character shape. . . 474.5 (a) Perceptual studies indicate that viewers group curves thatcan be smoothly joined together ((a), right), seeing those asa continuation of one another; while treating those with non-smooth transition ((a), left) as distinct; viewers tend to pre-fer interpretations that balance part simplicity (b) againstcontour persistence, preferring interpretations that preservecontour shape under small view changes (c). . . . . . . . . . 484.6 Canvas construction: Given a sketch and a skeleton (shownin side view) we first segment the input contours into sectionsassociated with skeletal bones (a), correspondences shownby matching color), correctly resolving occlusions; we usethe segmentation to replace the straight-line skeleton by acurved-skeleton optimized for symmetry (b); and finally gen-erate maximally simple body part geometries around this newskeleton while maintaining contour persistence with respectto the input drawing (c). . . . . . . . . . . . . . . . . . . . . . 49xiList of Figures4.7 Skeleton-driven segmentation of a simple contour (a) mustmatch skeletal topology (b) and reflect bone proximity. Prox-imity alone does not guarantee skeleton matching segmenttopology (c). A more topologically consistent segmentation(d) may need to be refined by bisector rotation to avoid seg-ment overlap (e). Boundaries are then adjusted to best alignwith negative curvature extrema (f). . . . . . . . . . . . . . . 514.8 A character drawing with inter-part occlusions contains mul-tiple contour curves and the left and right outlines of a bodypart may now contain multiple Gestalt continuous segments(a); thus 2D proximity based segmentation is no longer ade-quate (b). Taking into account skeletal depth as well as 2Dproximity but neglecting Gestalt continuity leads to better,but still perceptually wrong results (c,d). Our frameworkaccounts for both considerations resulting in the desired seg-mentation (e). . . . . . . . . . . . . . . . . . . . . . . . . . . 524.9 Segmentation algorithm: iterating between a z-ordering basedpass and consistency validation. . . . . . . . . . . . . . . . . . 534.10 Possible scenarios of contour intersections (filled circles) forrays bounding a mini-bone. Empty circle means the ray hasno associated contour intersection. . . . . . . . . . . . . . . . 564.11 Curve skeleton computation: (a) user posed straight-line skele-ton with the initial trajectory centers and their correspondingtrajectory contour points marked; (b,c) front and side viewsof curve skeleton and 3D contours; (d) final surface with con-tours highlighted. . . . . . . . . . . . . . . . . . . . . . . . . . 594.12 Canvas connectivity (a) with close-ups of quad strips betweentrajectories (b) and triangulated terminal trajectories (c). . . 614.13 Connectivity across joints: (a) visually continuous parts; (b)Discontinuous parts; (c) the top part is deemed continuouswith both lower ones, while the two bottom parts are deemeddiscontinuous since their shared contour curve has a cusp be-tween them. . . . . . . . . . . . . . . . . . . . . . . . . . . . . 614.14 Given the input sketch (a), contour persistence indicates thatside view contours (b,c) significantly differing from front-viewones are undesirable. Viewers similarly do not anticipate ex-treme foreshortening (d). Our result (e) is persistent with thefront view contours. . . . . . . . . . . . . . . . . . . . . . . . 624.15 We constrain the profile angle to the range between the idealprofile slope given by the two ring radii and the axis direction. 64xiiList of Figures4.16 Ground truth (green) and 3D skeletons created by 3 anima-tors overlaid on two ground truth 2D character drawings (a),(b), also shown from an alternate view overlaid on the groundtruth 3D canvas. The skeletons in (b) shown individually (c).The purple and maroon skeletons, created by manipulatingan overlaid 2D skeleton have differences in 3D limb lengthbetween symmetric limbs. The maximum difference for eachskeleton, 14% and 33%, is marked on the longer limb. Thebrown skeleton was created by animator #2 mimicking theworkflow of animator #3. The angular deviation between thecorresponding bones on the ground truth and artist skeletonsis dominated by control bones (hips and shoulders) whichhave no impact on the result geometry. The maximal devia-tions without (and with) control bones are: 24\u25e6 (31\u25e6) for thepurple skeleton, 24\u25e6 (46\u25e6) maroon, 32\u25e6 (44\u25e6) brown , and 30\u25e6(40\u25e6) blue. Average angle differences are 13\u25e6, 15\u25e6, 15\u25e6, and18\u25e6 respectively. . . . . . . . . . . . . . . . . . . . . . . . . . 674.17 Comparing our results to ground truth data: Left to right:contours and skeletons of ground truth (GT) models; GT(blue) and our (yellow) models rendered from alternate views. 684.18 Left: Given the same input sketch, small variations in skele-ton posing (green and purple Figure in 4.16) lead to minorchanges in character shape. Right: significant change in boneslope and location for a symmetric contour leads to largershape difference. . . . . . . . . . . . . . . . . . . . . . . . . . 684.19 Comparison of our results (b) to sketches produced by artists(a) for the same view and pose. . . . . . . . . . . . . . . . . . 704.20 Overlaid user segmentations (left) for both the elephant andthe scientist are qualitatively similar to the algorithmic results(right). . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 704.21 Canvases and alternate view renders generated using our sys-tem from the inputs on the right. . . . . . . . . . . . . . . . . 714.22 A variety of re-posed renders generated automatically fromthe inputs on the right. . . . . . . . . . . . . . . . . . . . . . 714.23 The explicit cylindrical parameterization of our canvases al-lows for a range of advanced texturing effects, hard to achievewithout a an underlying 3D structure. . . . . . . . . . . . . . 72xiiiList of Figures4.24 Given a single drawing and a posed skeleton we generatequalitatively similar results (b,d) to those created by multi-drawing systems which employ manually specified curve cor-respondences between drawn curves: [106] (a) and [74] (c). . . 744.25 An example of the qualitative evaluation questionnaire. . . . 754.26 Our ability to plausibly recover character shape is limited bythe descriptive power of the inputs. Without cues to the con-trary we generate round bird wings, instead of anatomicallycorrect ones (a). Since we use a standard mesh representa-tion, the canvas can be easily edited to correct the wings oradd extra features (beak) using standard tools (a, right). Ge-ometries not-well represented by generalized surfaces of revo-lution, such as loose clothing (b, pink cape) must be modeledby other means. While some fine details can be captured byusing skeleton refinement (c), alternate editing tools are likelyto achieve this goal faster. . . . . . . . . . . . . . . . . . . . . 765.1 Gesture3D: gesture drawings (b,e) of an input character model(a); estimated 2D skeleton projections (c,f) and new poses au-tomatically computed from the drawings (d,g). . . . . . . . . 785.2 Stick figure drawings (a), lines of action (b), and outer silhou-ettes (c) allow for multiple perceptually valid pose interpreta-tions. (d) Poor view selection results in highly foreshortenedcontours leading to loss of pose information (e.g bends onthe left arm or the curved spine). Gesture drawings, con-sciously drawn from descriptive views (e) effectively conveythe intended pose. . . . . . . . . . . . . . . . . . . . . . . . . 815.3 Portion of a gesture drawing with annotated joint (blue) andpart (red) contours. . . . . . . . . . . . . . . . . . . . . . . . 825.4 Contour-skeleton correspondences, with Gestalt continuouscontours connected by dashed lines. . . . . . . . . . . . . . . 825.5 Implausible bone locations that violate (a) adjacency, (b) ori-entation, or (c) crossing cues; consistent placements (d). . . 835.6 Depth ambiguity . . . . . . . . . . . . . . . . . . . . . . . . . 845.7 Occlusion types. . . . . . . . . . . . . . . . . . . . . . . . . . 845.8 Less natural (b) and more natural (c) interpretations of adrawn pose (a) (leg bent sideways vs forward). . . . . . . . . 855.9 Overview: (a) algorithm input; (b) discrete 2D joint embed-ding; (c) optimized 2D embedding; (d) 3D skeleton (colorvisualizes depth) and posed model. . . . . . . . . . . . . . . . 86xivList of Figures5.10 Joint cost visualization. Here the color shows the matchingcost on a scale from red (poor match) to blue (good). . . . . 875.11 Full solutions: (a) contains overlaps; (b) poor coverage; (c)preferred. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 925.12 Comparing our results to GT data and artist modeled poses.We use as input the projected contours of the posed GT mod-els combined with their bind posed originals (Figure 5.14) toautomatically create poses qualitatively similar to both GTand artist results. . . . . . . . . . . . . . . . . . . . . . . . . . 995.13 Overlays of viewer created skeleton embeddings (lines removedfor clarity) and our results on same inputs. . . . . . . . . . . 995.14 Typical two-stage processing results. Left to right: inputmodel, drawing, 2D skeleton fitting, output model. . . . . . 1015.15 (center) 3D posing using only drawing conformity, (right) full3D solution. . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1025.16 Impact of different bind poses. . . . . . . . . . . . . . . . . . 1025.17 (right) Davis et al.[33] trace stick figures over gesture draw-ings and then pose characters semi-automatically. (left), Weuse the original drawings to automatically pose characters. . 1035.18 Extreme mismatch in proportions between model and draw-ing (a) can lead to poor depth reconstruction (b); correctingthe proportions in the drawing (c) corrects the reconstruc-tion. (d) Ambiguous drawings using highly oblique views cancause our 2D pose estimation to fail. . . . . . . . . . . . . . . 103xvChapter 1Introduction3D modeling is used ubiquitously in manufacturing, entertainment, con-struction, and its influence is rapidly expanding to health care, education,and other industries. It is next to impossible to find a new building, a newcar, or a top-selling movie made without 3D models. At the same time,creating 3D models in modern software packages is time consuming, cum-bersome, and requires expert 3D modeling skills. Typically 3D modelingsoftware relies on the use of specialized complex 3D modeling tools (Figure1.1), which to regular artists are far from natural [83].Sketching is a natural way to communicate ideas quickly [98]. Sketchingis often a first step in making fine art, creating illustrations or cartoons, 3Dmodeling, etc. The expressive power of sketching along with its accessibilitymake it a method of choice to visually convey an idea.A tool for automatically interpreting sketches and creating 3D models,envisioned by the designer of the sketch, could be the Holy Grail of mod-eling [98]. Such a tool may benefit numerous applications, enabling rapidprototyping, creating animations or models quickly and, perhaps even moreimportantly, enabling more people to use 3D modeling.To embark on a quest for the tool, one may look at the core componentsof a sketch, which are its curves. They are the first elements to be drawnbefore shading or color, and are essential to conveying geometric information[49]. Interpretation of a sketch is largely through interpretation of its curves.Unfortunately, while curve drawings are a natural and well understoodrepresentation for humans, they are hard to interpret algorithmically [80].The complexity of an algorithmic interpretation roots in the complexity ofthe human perception of such drawings, which we are yet to understand.The fundamental problem of algorithmic interpretation is that mathemati-cally, each 2D curve has an infinite number of 3D interpretations [86]. Evenif curves are embedded in 3D, there is an infinite number of surfaces passingthrough the curves. Moreover, sketches typically contain occlusions, ambi-guities, and distorted proportions that complicate the matter even further.Nevertheless, as we discuss in Chapters 3-5, for artist-drawn 2D sketches,human observers tend to imagine unique 3D curves, successfully overcoming1Chapter 1. IntroductionFigure 1.1: Graphical interface of a commercial industry-standard 3D mod-eling package.ambiguities and inaccuracies. Furthermore, given such imagined or artistmodeled 3D curves, human observers tend to imagine unique 3D shape.The key to understanding why, we believe, is the intent of sketches: theyare drawn to convey shape. This intent drives artists to draw a sufficientnumber of significant curves, unambiguously depicting the object from anon-accidental view. Such an intent is the crucial trait for modeling and per-ception research with the purpose of discovering the mathematical nature ofsketch curves, and therefore give a hope of algorithmic sketch interpretation.Particularly,\u2022 Significant curves. Depending on the context, 2D sketches and 3Dcurve networks typically include ridge\/valley lines [26], curves depict-ing sharp features, and the lines of principle curvature [42, 119]. 2Dsketches also include occlusion contours in the selected view.\u2022 Sufficient number of curves. While arbitrary 2D curve drawingsor 3D curve networks may have vastly different interpretations, artistcreated sketches typically have enough curves to uniquely convey theimagined 3D object.\u2022 Non-accidental view. For 2D sketches, designers tend to choosenon-accidental views with few occlusions and least foreshortening [93].This suggests that observers interpret 2D geometric properties as stronglycorrelated with 3D geometry rather than being caused by a particularchoice of viewpoint [138].Such observations are the cornerstones of our research, serving as a21.1. Overview of Contributionsbase for our algorithms. Additionally, we derive cues on sketch interpre-tation from well-studied features of human perception. Those features, bylarge introduced by Gestalt psychologists, include anticipation of symmetryand simplicity (Good Gestalt Principle), alignment and regularity, et cetera[132]. In our algorithms, we aim to mimic such behavior.Apart from information in the sketch itself, human perception of curvesalso depends on context and prior knowledge [102, 121]. Correctly interpret-ing sketches requires knowledge of the context and the relevant priors. Inour research, each project\u2019s scope of applicability defines such context andprovides the necessary priors for interpreting curve shape. For instance,an observation that body parts of most characters can be represented assurfaces of revolution, provides missing necessary information when inter-preting a character sketch (Section 4). In other words, prior information ofthis kind allows us to approach problems that otherwise would be ill-defined.To summarize, this thesis is focused on using specific prior knowledge,findings in perception research, and insights from artist literature to algo-rithmically interpret sketched objects, in the areas of direct practical appli-cations. To validate our results, we compare them with the ones manuallycreated by professional artists, and, whenever possible, compare our algo-rithmic choices with human ones.1.1 Overview of ContributionsThe contributions of the dissertation can be split into two main categories.First, in each chapter we distill the artistic knowledge of the area, along withcues drawn from perceptual evaluations, into a set of principles that guidethe interpretation of curve drawings. And second, we use those principles tobuild systems allowing us to algorithmically interpret curve drawings withina particular domain. We then confirm our analysis and intuition with variousevaluations and direct comparison with artists\u2019 results.Thus, we introduce three novel systems for algorithmically interpretingsketches and recovering the depicted 3D shape. Our work encompasses twoseparate shape domains: CAD objects (Chapter 3) and characters (Chapters4 and 5).\u2022 In Section 1.2 and Chapter 3, we introduce our first contribution, anapproach to automatically generate 3D surfaces of CAD objects fromartist-drawn 3D curve networks. Such an approach complements 3Dcurve sketching interfaces such as ILoveSketch [7] or systems for lifting2D sketches into 3D [138] to rapidly create CAD models.31.2. Recovering Artist-Intended Surfaces from 3D Curvesinput curve network shape-aware quad-meshFigure 1.2: A brief overview of Chapter 3.\u2022 In Section 1.3 and Chapter 4, we introduce a new technique to recover3D character shape from a 2D cartoon drawing and a correspondinglyposed 3D skeleton. We demonstrate that this system can be used tocreate believable 3D models from a single drawing and a 3D skeleton,sidestepping the tedious 3D modeling step.\u2022 In Section 1.4 and Chapter 5, we introduce a system to pose a 3D char-acter directly via a gesture drawing. This system is intended to replacethe tedious and cumbersome rig-based posing process in a traditional3D modeling package.1.2 Recovering Artist-Intended Surfaces from 3DCurvesAdvances in sketching interfaces enable artists to directly draw early conceptsketches in 3D while following their pen-and-paper drawing style, creating3D curve networks [7]. Such curve networks are known to effectively conveycomplex 3D shape [87], and, if drawn by an artist, typically convey the shapeunambiguously. In Chapter 3 we introduce the first solution to construct-ing the imaginary surface interpolating a general 3D design curve network,consistent with artist intent.To approach the informal notion of artist intent when defining a surface,we derive our insights from ideas in 3D modeling and perception literature.Namely, we observe, based on design literature, that the artist-drawn 3D41.3. Recovering Character 3D Model from a Cartoon Drawing+ canvas surfacingpartsegmentationFigure 1.3: A brief overview of Chapter 4curves can be treated as representative flow-lines, an input-derived sparseset of curves, correlated with lines of curvature. We further observe thatviewers complete the intended shape by envisioning a dense network ofsmooth, gradually changing, flow-lines that interpolates the input curves.Components of the network bridge pairs of input curve segments with sim-ilar orientation and shape.Consequently, we introduce the novel algorithm that mimics this behav-ior by iteratively segmenting and matching the input curves, and then usesthe matching to effectively construct an interpolating surface consistent withartist intent (Fig. 1.2).1.3 Recovering Character 3D Model from aCartoon DrawingTraditional 2D cartoon characters are a mainstay of computer animation.Viewers appreciate the feel of hand-drawn art, while animators enjoy theflexibility and explicit control offered by this medium. This flexibility, un-fortunately, comes with the tedium of drawing numerous individual frames,and the cumbersome burden of managing view and temporal coherence.Recent research [112] and practice [104] advocate the use of an underlying3D model to enable easy 3D control over the view, pose, deformation andpainterly rendering effects of cartoon characters. In current animation prac-tice, such models are manually constructed using 2D cartoon drawings as avisual reference, and are then manually rigged to suitably designed skeletonsfor posing and animation. In chapter 4 we introduce a novel technique forthe construction of a 3D character model directly from a 2D cartoon draw-ing and a user-provided correspondingly posed 3D skeleton, enabling artiststo directly articulate the drawn character in 3D.We observe that traditional cartoon characters are well approximatedby a union of generalized surface of revolution body parts, anchored by a51.4. Recovering 3D Character Pose from a Gesture DrawingFigure 1.4: A brief overview of Chapter 5.skeletal structure, which motivates our choice of input. We also observe thatwhile typical 2D character contour drawings allow ambiguities in 3D inter-pretation, our use of a 3D skeleton eliminates such ambiguities and enablesthe construction of believable character models from complex drawings. Weanalyze and distill the insights on the nature of character sketches fromperception and art literature, namely, we explore the principles of Gestaltcontinuity, simplicity, and contour persistence.Beyond this analysis, our contribution is the method based on those in-sights that enables generating 3D character models from curve drawings.The core of the method consists of two algorithms: first, the algorithm ofbody part delineation that segments the input 2D contours into sections out-lining individual body parts and resolves inter-part occlusions; and second,the algorithm that imbues the outlined contours for each body part withdepth and creates the 3D model, balancing perception cues, image fidelity,and shape priors (Fig. 1.3).1.4 Recovering 3D Character Pose from aGesture DrawingWhile posing 3D characters is a common task in digital media production,performing it using traditional 3D interfaces is time consuming and requiresexpert 3D knowledge. Alternative approaches which use stick figures or linesof action as a posing reference are problematic, since these representationsare inherently ambiguous even to human observers.In contrast to these representations, gesture drawings - rough yet ex-pressive sketches of a character\u2019s pose - are designed by artists to facilitatea single perceptually consistent pose interpretation by viewers. Artists are61.4. Recovering 3D Character Pose from a Gesture Drawingskilled at quickly and effectively conveying poses using such drawings, anduse them ubiquitously while storyboarding. Actual animation, however, istypically done by manual manipulation of the skeleton, and those draw-ings are often used only as a reference [83]. In Chapter 5 we introduce thefirst method to pose a 3D character directly via a single vectorized gesturedrawing as the only input.The contribution of the chapter is two-fold: we formulate the propertiesof effective gesture drawings, bringing together insights from various fields,such as psychology, art, and computer graphics, highlighting key perceptualcues which enable viewers to perceive the artist intended character poses;we then use these observations to introduce the first gesture drawing basedalgorithm for posing 3D characters. Our method enables artists to directlyconvert their ideated posed character drawings into 3D character poses, andsupports complex drawings with occlusions, variable body part foreshorten-ing, and drawing inaccuracies (Fig. 1.4).7Chapter 2Previous Work2.1 Sketch-based 3D ModelingAlgorithmic sketch interpretation has a long history rooted in computervision and artificial intelligence research [25, 59]. Those early works focusedon providing semantic descriptions of elements in the drawing, rather thana complete 3D model [79, 129]. For instance, Huffman [59] and Clowes[25] studied the problem of line-labeling of polyhedra drawings. Waltz [129]extended the line labels idea to include shadows, grouping lines into bodies.Mackworth\u2019s work [79] deals with \u2019naturalistic\u2019 drawings of sketch maps,classifying lines into shorelines, rivers, roads, and mountains.The research question of how to infer 3D model from a line drawingemerged not long after 3D modeling itself [86]. The review literature [98]often name 3D Paint by L. Williams [135], Teddy by Igarashi et al. [60],and SKETCH by Zeleznik et al. [140] the first sketch-based 3D modelingsystems.2.1.1 Incremental ApproachesThose early methods were incremental, i.e. complex 3D shapes were mod-eled via a sequence of simple operations. They did not aim to interpretnatural drawings; instead, they provided interfaces with a set of sketch-based modeling operations to create a limited variety of shapes: geometricprimitives in SKETCH [140] or \u2019inflation\u2019 surfaces in Teddy [60].As incremental approaches developed and matured, the range of sur-faces they could model expanded [90], and their input method graduallytransitioned closer to the pen-and-paper drawing process [99].We can categorize the more recent methods into single- and multi-viewincremental approaches. A more extensive review of related literature canbe found in the survey by Olsen et al. [98], here we focus on the works mostrelevant to our thesis.82.1. Sketch-based 3D ModelingMulti-view. Many systems employ a multi-view approach: users buildmodels by drawing contour strokes in views where they are expected tobe parallel to the screen, or to project with little foreshortening onto theevolving geometry [95, 123]. Frequent view changes and incremental draw-ing order are critical when modeling characters using such approaches, asit is next to impossible for all contours of a 3D model to be entirely flat(Fig. 2.2). Borosan et al. [17] proposed simultaneously creating and rigging3D characters using an interface where body parts are added incrementally,one at a time, and the associated skeleton is generated on the fly. Our sys-tems introduced in Chapters 4 and 5 are independent of drawing order; theyallows artists to freely sketch the characters they envision and to interpretlegacy sketches without need for oversketching.The general issue with incremental approaches, however, is that the re-sult strongly depends on the order in which the artists draw strokes, whichmakes interpretation of complete natural drawings highly unlikely. Similarly,choice of views to draw may not be an easy task for a user. Moreover, theyoften require additional user input, such as annotations, correct drawingorder, etc.Single-view. Single-view incremental modeling approaches, such as theones by Cherlin et al., Gingold et al., Shtof et al., and Chen et al. [21,23, 46, 118] rely on additional information to facilitate modeling of complexshapes. Namely, some methods [99], for the purpose of modeling smoothshapes from existing drawings and photographs, leverage contour drawingorder and user annotation; others, such as Shtof et al. [118] and Chen etal. [21] snap parameterized primitives to input contours via a combinationof optimization and user annotation, and rely on user assistance to generate3D models from annotated sketches and photographs respectively. Gingoldet al. [46] interactively place tubular and elliptical 3D primitives to matchartist drawn contours; as they note, their system does not directly use the2D image. Cherlin et al. [23] treat each new pair of contour strokes as2D profiles defining a new generalized surface of revolution part, whosetrajectory is either circular or manually defined.2.1.2 Shape and Pose Reconstruction from Photographsand VideoIn the meantime, in computer vision, a very relevant problem was beingsolved \u2013 shape and pose reconstruction from monocular images and video(for historical review see [3]). Compared to sketch-based modeling, much92.1. Sketch-based 3D Modeling(a) (b) (c) (d) (e)Figure 2.1: Stick figure drawings (a), lines of action (b), and outer silhou-ettes (c) allow for multiple perceptually valid pose interpretations. (d) Poorview selection results in highly foreshortened contours leading to loss ofpose information (e.g bends on the left arm or the curved spine). Gesturedrawings, consciously drawn from descriptive views (e) effectively conveythe intended pose.more data is accessible here, though the range of shapes, poses, and motionsis severely limited compared to non-realistic animation demands.A variety of recent multi-view reconstruction methods model humansubjects from silhouettes, potentially aided by skeletons [110, 139]. Theywork with large collections of silhouettes captured from a range of viewsand poses [92]. The main drawback of this class of methods is that a largenumber of input drawings is very unlikely to be produced by hand; instead,those methods aim at reconstructing from a series of photographs or videosequences.Reconstruction from video aims to capture a continuous motion, wherethe pose in each frame is very close to a previously reconstructed pose in thepreceding frame, and heavily relies both on this existing previous pose andon fine image-level correspondences between frames (e.g. [34, 44, 125]). Ourthesis has more in common with pose estimation from a single frame, or poseinitialization, where no such priors are available (e.g. [22, 43, 61, 111]). Aswe show in Chapter 5, both outlines and incidental-view occlusion contours(Figure 2.1c,d) are insufficient to deduce a pose; single-frame pose estimationmethods therefore frequently combine this information with textural andshading cues which are unavailable in our setup.Recent posing approaches (e.g. [61]) predict the most likely 3D pose bylearning from large databases of real and synthetic human motion data. Suchdatabases bias the results toward more frequent poses and can be difficult102.1. Sketch-based 3D Modelingto obtain for non-humanoid or non-realistic characters, or for extreme\/non-physical poses. The frameworks introduced in the current thesis overcomethe lack of extensive anatomic pose priors, and allow recovery of atypicalcharacter shapes and poses by leveraging the descriptive cues artists providewhen creating gesture and concept drawings.For a more complete recent review of this area please refer to [125] andthe references therein.2.1.3 Character Shape Reconstruction from CompleteDrawingsIncremental approaches allow users to provide guidance and control overthe modeling, and, in general, allow for more varied user input. However,in an incremental framework, the input method is often not natural, andreusing existing drawings is hard, if even possible. In contrast, methods forrecovering 3D shape from complete drawings are aimed at using existingnatural art as input. These methods allow us to preserve the standard 3Dmodel development process that often starts with sketches.These methods can use either a single drawing to recover 3D shape, ormultiple drawings. Using multiple drawings, artists have the freedom tospecify some details or shape features invisible from a single view. At thesame time, using these methods typically requires more user input to spec-ify correspondences between curves on different drawings. Single-drawingmethods are aimed at quick modeling and so typically don\u2019t require extrauser input; instead, they often rely on simplifying assumptions about thedepicted 3D shape.Using Multiple Complete Drawings. A number of methods, such asthe ones by Fiore et al., Rivers et al., Jain et al., and Levi and Gotsman[41, 65, 74, 106] use collections of vector character drawings taken from dif-ferent views to create a 3D shape proxy or enable direct rendering fromin-between views. The biggest problem this approach has is finding the cor-rect correspondences between curves in different drawings. Those methodsrely either on user-annotated dense curve correspondences in between thedrawings [41, 106], or manually specified correspondence between each in-dividual drawing and a user-positioned 2D [65] or 3D [74] skeleton. Themethods require at least three strategically posed drawings to achieve ac-ceptable results. In contrast, our method in Chapter 4 generates reposedcharacter renders that are qualitatively comparable to renders produced by112.1. Sketch-based 3D Modelingfront front frontalternate alternate alternate(a) (b) (c)Figure 2.2: Character drawings do not conform to the assumptions madein previous work. (top) The contours of a surface of revolution whose axisis not in the view plane are typically not planar.(bottom) The contours ofa typical character include numerous occlusions; a single contour curve canconsist of multiple part outlines (see left arm and torso outline in (a)) andas shown by the side view (b) the contour curves are far from planar or viewaligned. Our method introduced in Chapter 4 (c) successfully handles suchinputs generating a character model similar to the ground truth input (b).these methods from a single, descriptive drawing and a matching skeletonwith no additional annotation (Fig. 4.24).A related line of work is in the area of inbetweening [134], where thetask is to interpolate the motion of a character between given frames. Thegeometry of a character is assumed to stay roughly the same throughoutthe animation, and only the view and the pose might change. For complexcases, these approaches also rely on user-annotated curve correspondencesbetween different frames.Using a Single Complete Drawing. A range of methods attempt torecover character models from single view sketches with no extra input [20,28, 37, 68]. However, in doing so they by necessity enforce a range of strongsimplifying assumptions. In the domain of character models, Buchanan etal. [20] lift an occlusion-free 2D contour into 3D by placing circular arcsalong a 2D geometric skeleton; they assume the entire contour to be pla-nar and near-perpendicular to the view direction. Cordier et. al. [28] liftcontour drawings of reflectively symmetric and symmetrically posed char-acters into 3D. They expect every part contour to be planar, and expecteach part to be represented as a separate curve in the drawing. Karpenko122.1. Sketch-based 3D Modeling(a) (b)Figure 2.3: (a) Lacking part information, character shape reconstructioncan at best exploit overall shape smoothness, e.g [68, 95]; (b) by using askeleton to facilitate contour partition and part modeling, we generate amore believable character shape.and Hughes [68] successfully process character drawings containing partialocclusions and asymmetric poses, but assume that each contour curve isplanar and perpendicular to the view direction. Lacking part structure,they cannot leverage geometric priors on individual body-part shape anduse surface inflation (Fig. 2.3) to generate the outputs. Entem et al. [37]model animals from a canonical side-view sketch and rely on T-junctions tosegment the contours into separate part outlines. They assume all contourcurves to be planar and perpendicular to the view direction, and only handlelocal T-junction type occlusions between immediately adjacent body parts.The assumptions listed above do not hold for the vast majority of artic-ulated character drawings: these drawings frequently contain general inter-part occlusions, individual contour curves frequently extend across multiplebody parts, these parts are rarely perfectly symmetric, and part contoursare rarely planar (Fig. 2.2).Research in this category is related to our work presented in Chapters 4and 5, however, we do not impose such simplifying assumptions on the 3Dshape.By leveraging the additional information provided by the 3D skeleton,our method in Chapter 4 successfully relaxes all of these assumptions and isable to handle inputs such as \u2019sneaky\u2019 (Fig. 2.2 (bottom)) or the catwoman(Fig. 4.1, 4.3) which repeatedly violate them.Sy\u00b4kora et al. [122] use user annotation to recover a bas-relief with ap-proximate depth from a single sketch which they use to illuminate a 2D draw-ing. Their method makes no effort to create a 3D canvas that is plausiblefrom all views; as they note, their proxy meshes \u201dexpose their approximatenature when rendered from sideviews using a perspective camera\u201d.In CAD domain, a variety of methods, e.g. one of Xu et al. [138] or one ofLipson and Shpitalni [78] (see [138] for full references) infer 3D curves from132.2. Posing Charactersa 2D sketch of a CAD model. Our research in Chapter 3 complements theseworks by inferring the artist-indended surfaces, thus allowing the creationof a complete 3D model.2.1.4 Skeleton-based 3D ModelingAnother alternative approach to modeling 3D shapes is skeleton-based mod-eling. Organic 3D forms created using implicit functions defined aroundinteractively manipulated skeletal primitives have existed for at least twodecades [13]. Recently, [8, 17] proposed to simultaneously create 3D shapesand corresponding skeletons as a means to integrate skeletal deformation andinteractive shape sculpting. While those methods do not require a skeletonas an input, neither framework can incorporate a complete 2D drawing intothe modeling process.2.2 Posing CharactersPosing characters can be done either with explicit 3D model, or via 3D-likeeffects on 2D sketches.2.2.1 Adding 3D Effects to 2D DrawingsExisting 2D animation tools support a limited range of 3D effects. They en-able occlusions via explicit layering [2, 4, 58] and approximate out-of-planedeformation using non-uniform scaling that mimics foreshortening [64]. Theseapproaches use a fixed 2D contour topology and are inherently unsuitablefor generic 3D manipulation which requires topological changes in charactercontour and reveals a priori occluded geometry (see Fig. 4.1(e)).Recent industry-driven research (e.g. [104]) aims to enhance hand-drawnanimation with 3D effects such as volumetric textures [9, 113], or clothsimulation [65], by utilizing separately created 3D models or proxies in thebackground. In Chapter 4, we produce the underlying 3D proxy requiredby these techniques using a single 2D cartoon frame and an appropriatelyposed 3D skeleton as input. Our problem formulation is a novel intersectionof skeleton-driven 3D modeling, sketch-based single-view modeling, and 3Dcharacter construction.142.2. Posing Characters2.2.2 3D Character PosingSketch Based Articulation Rather than creating a model from scratch,methods such as [71] deform a 3D character template to fit a contour draw-ing. They either expect the template and drawn poses to be aligned, orexpect users to manually specify coarse template-drawing correspondences.They then use local shape compatibility between the input outlines and thecorresponding 3D geometry to obtain dense correspondences. Since contoursin gesture drawings are approximate and highly abstracted, local shape com-patibility cannot be used as a reliable criterion in Chapter 5. Despite thisextra challenge, our method does not require manual correspondences norexpects the drawn pose to resemble the input bind one.Character Posing Interfaces In most industry setups, characters areposed via 3D skeleton manipulation. Users either manually adjust jointangles, or use Inverse Kinematics (IK) based tools to place bone end-pointsat specific locations [141]. While IK-based frameworks relieve some of thetedium of adjusting individual joints, they still require experience with 3Dmodeling systems and non-trivial posing time.Recent research demonstrates effectiveness of alternative posing approaches,such as handles [63], selected regions and exterior cages [66, 130], or anima-tion devices [47]. Handle and cage based approaches focus on expanding thespace of possible deformations, while animation devices focus on reducingamount of work needed. However, when artists ideate their desired posesthey prefer to use pen and paper. Using these ideation drawings as-is tocreate 3D poses saves artists time and effort.Hahn et al. [51] and Guay et al. [48] propose incremental, multi-view,sketch-based posing interfaces. Lines of action, imaginary lines runningdown a character\u2019s spine or other major bone chains (Figure 5.2b) are usedby artists for coarse pose communication [48]. Guay et al. use line-of-action strokes to pose characters by placing user-specified correspondingbone-chains along these strokes.This input allows multiple pose interpretations for body parts not di-rectly present on the line of action or its continuation, and requires anincremental multi-view interface to pose non-coplanar bone-chains. Hahn etal. [51] propose an interface where a user poses characters one limb at a time,by first drawing a stroke along a limb in the current pose and then draw-ing a corresponding stroke depicting its new pose. The system then posesthe limbs by aligning them to the strokes. It assumes uniform foreshort-ening along the posed limbs, and requires multiple stroke pairs and view152.3. Surface Reconstruction from 3D Curve Networkschanges to generate complex poses. Our work in Chapter 5 complementsthese approaches by providing a single-view drawing-based posing mecha-nism, allowing artists to directly use their gesture and keyframe drawingsfor character posing.A number of recent methods use stick-figures [33, 54, 77, 82] - 2D pro-jections of the desired 3D skeleton of the posed character (Figure 5.2a) -to compute a corresponding 3D skeletal pose As the authors acknowledge,stick-figures are inherently ambiguous and allow for multiple geometricallyvalid and perceptually plausible 3D interpretations. Hecker and Perlin [54]and Mao et al. [82] propose users to encode the relative depth of bones andjoints via pen pressure or stroke width. Such interfaces become unwieldyfor typical characters (e.g. Figure 5.1) which have dozens of bones. Daviset al. [33] resolve ambiguities through user annotation, followed by usersselecting the desired character pose from multiple plausible solutions. Linet al. [77] use stick-figures to pose characters sitting in a chair, and reduceambiguities by using specific priors relevant only for sitting characters. Weiet al. [131] and Choi et al. [24] use drawn stick-figures to query a databaseof human poses. Such databases can be difficult to obtain for custom skele-tons, especially of non-humanoid or non-realistic characters. Reliance ondatabases inherently biases the reconstructed poses toward more frequentdatabase instances. In contrast to stick figures, gesture drawings are unam-biguous to human observers, motivating our approach. At the same timewhile matching 2D stick figures to 3D skeletons is straightforward up to in-herent ambiguity between symmetric limbs, matching characters to gesturedrawings is an open and challenging problems we successfully address for thefirst time. Small inaccuracies in 2D stick-figures can lead to large changesin the recovered 3D pose [33]. To improve accuracy Davis et al. [33] adviseartists to first draw a gesture or bubble sketch of the target posed charac-ter, and then use it to assist in positioning the stick-figure (Figure 5.17).Our work in Chapter 5 operates directly on gesture drawings and robustlyovercomes artist inaccuracies by balancing image conformity against otherperceptual cues (Figure 5.15).2.3 Surface Reconstruction from 3D CurveNetworksOne can create 3D curve models of CAD objects via a variety of tools, e.g.via a sketch-based interface [7], or by lifting a 2D sketch into 3D [138]. Thosetools, however, are capable of creating curves only. The problem is therefore162.3. Surface Reconstruction from 3D Curve Networksto find the interpolating surface envisioned by the designer, given such curvenetworks.Early approaches aimed to reconstruct polyhedra, given 2D or 3D straightline drawings [85, 133]. In contrast, inferring a smooth surface from a set of2D\/3D freeform curves is a more ambiguous and challenging task.There is a large body of work on interpolating closed curves, or cycles,with smooth surfaces, much of it in the context of hole filling [81]. While asmall portion of the methods [1, 32, 40, 75, 95, 107] can operate on arbitrarilyshaped curves, the majority assume that the curve is pre-segmented inton sub-curves and can be mapped to a planar n-sided polygon with littledistortion, e.g. [27, 45, 128].Fitting to n-sided curve cycles A variety of popular techniques areavailable for interpolating and approximating networks of regular quad ortriangular patches [38], see [100] for a recent sketching motivated approach.These methods, including the well-known Coons patches [27], and their dis-crete extension [39] provide an effective solution. We show in Chapter 3that unlike other fitting approaches, these are widely used by modelers anddesigners as the resulting surfaces closely reflect designer intent.For cycles with n > 4 existing approaches can be classified into singlesurface fitting, e.g. [45, 128], or subdivision into quad or triangular cycles,e.g. [94, 112]. The first category of methods interpolate the cycles witha single surface patch by utilizing suitable n-sided convex 2D polygons asparameter domains. As acknowledged by Varady [128] the fitted surfacequality is strongly dependent on the quality of the 2D parameterization.Subdivision approaches, e.g. [94, 112], quadrangulate the input cycles,and then use available techniques to interpolate or approximate the resultingquad network. In the basic midpoint scheme a single vertex is placed in thecenter of a patch and then connected to the middle of each side. To generatea watertight surface across heterogeneous networks, Schaefer et al. [112] andNasri et al. [94] introduce more sophisticated quadrangulation schemes thatmaintain a fixed number of intervals, or sub-segments along each side whileaiming to control both the number and valence of the added extraordinaryvertices [94]. Note that all these approaches require n to be specified by theuser, which may not be evident from the topology of the network.A variety of techniques are available for interpolating and approximatingnetworks of regular quad or triangular patches [38], see [100] for a recentsketching motivated approach. These methods, including the well-knownCoons patches [27], and their discrete extension [39] (Figure 2.4, (d)) pro-172.3. Surface Reconstruction from 3D Curve Networksvide an effective solution which naturally aligns the surface iso-lines withthe flow-line sequences indicated by the boundary curves. Design and per-ception literature indicate that designers expect the curve cycle boundariesto correspond to representative flow-lines implying surface curvature direc-tions, a behavior captured by Coons interpolation (Figure 2.4, (d)), but notthe other fitting approaches. These methods are widely used by modelersand designers as the resulting surfaces closely reflect designer intent.Surface Fitting to Arbitrary Curve Cycles These more generic ap-proaches typically utilize a diffusion process that optimizes surface fairness.As we show in Chapter 3, those traditional approaches fail to capture de-signer intent on structured inputs, even if supplied with pre-defined normalsalong the input curves[75, 89]. Moreover, such normals are not part of atypical curve-based modeler output [7, 95, 115]. Other approaches imposevery strict constraints on the inferred surface, e.g. developability, often in-compatible with artist intent [107]. This approach is too restrictive for ageneral modeling setup, where many inputs, including the cycle in Figure2.4 (a), aim to convey non-developable surfaces.Quad Meshing: Our work draws on ideas from coarse-to-fine planarmeshing approaches, such as sub-mapping [101, 108]. In contrast to thoseit supports irregular quad connectivity, automatically introducing irregu-lar interior vertices when warranted by the boundary shape (e.g. Figure3.10). More significantly it operates on 3D curves, without the benefit ofa well defined planar domain. While planar meshing methods focus on el-ement quality or shape, our goal is to recover and quadrangulate a surfaceenclosed by designer-drawn curves.Many recent publications addresses quad meshing of existing 3D surfaces[14, 15, 30, 67, 76, 84, 127]. These methods aim to align the output quadmeshes with the principal curvature directions in anisotropic regions gener-ating smooth orthogonal families of flow-lines. In our setup no underlyingsurface is available. Instead we aim to align the output meshes with theflow-line directions conveyed by the input designer curves, which as notedabove strongly correlate to curvature lines.As shown by figure 2.5 (top) using the actual shape of the curves to deter-mine the end-point locations and induced topology as done by our methodin Chapter 3 can significantly improve both the flow line layout and theresulting surface shape. Contrary to all the approaches above our methodcan operate on curve cycles with large concavities (Figures 2.5 (bottom)).182.3. Surface Reconstruction from 3D Curve NetworksFigure 2.4: Using Laplacian diffusion (b) or Thin-Plate Splines [40](c) tosurface a four-sided cycle leads to unintuitive results. (d) In contrast theflow lines on an interpolating Coons patch, by construction, bridge oppositecycle sides.Figure 2.5: (top) Using a purely topological approach and applying mid-point subdivision (forming either four or six sides) generates a quad meshwith poor flow line layout (left and center). Our method in Chapter 3 (right)uses geometry driven segmentation and matching to generate smooth flowlines and a predictable surface. (bottom) On a concave cycle, parameteriza-tion onto a convex domain (a rectangle) leads to foldovers (left), our methodautomatically segments the cycle into convex quadrilaterals leading to a fairsurface (right).19Chapter 3Design-DrivenQuadrangulation of Closed3D curves3.1 IntroductionIn this chapter we present our first contribution, Design-Driven Quadran-gulation from Closed 3D Curves. Here we introduce a new approach tocreating surfaces interpolating closed 3D curves created by sketch-based orother curve modeling systems. The project has since been published in ACMTransactions on Graphics [11].Sparse networks of closed 3D curves are the foundation of shape in bothtraditional CAD modeling [39] and increasingly popular sketch-based mod-eling interfaces [7, 95, 115]. As we mentioned in the introduction of thedissertation, recent research affirms that such 3D curve networks do effec-tively convey complex 3D shape [35, 88, 89] (Figure 3.1 (a)). We aim torecover and compactly represent this conveyed shape (Figure 3.1 (f)), fordesigner-drawn curve networks,such as those generated by Abbasinejad etal. [1] from sketched 3D curves [7].While arbitrary 3D curve cycles have highly ambiguous interpolatingsurfaces (Figure 3.2 (top)), designer created curve cycles, even when highlycomplex, typically convey a uniquely imagined surface (Figure 3.2 (bot-tom)). These curves are designed to serve as a visual proxy of the 3D object,with the expectation that every element of surface detail is explicitly cap-tured by the network [42]. To this end, design texts repeatedly emphasizethe significance of using representative flow-lines of the object [16, 42], ascurve network elements. While design literature provides no precise math-ematical definition of flow-lines, design and modeling references [42, 119]suggest that flow-lines are strongly correlated to sharp features and lines ofcurvature but allow for artistic license at surface discontinuities, over finedetails and in umbilic regions.203.1. Introduction(e) design-driven quadrangulation(a) input curve network(b) initial segmentation(c) pairing and iterative refinement(d) final quadrangulation and quad-mesh(f) design renderingFigure 3.1: Steps to quadrangulating a design network of closed 3D curves(a) : Closed curves are independently segmented (b) and iteratively pairedand refined to capture dominant flow-lines as well as overall flow-line quality(c); final quadrangulation in green and dense quad-mesh (d); quadrangula-tions are aligned across adjacent cycles to generate a single densely sampledmesh (e), suitable for design rendering and downstream applications (f).These observations, confirmed by perception studies [120], suggest thatany additional flow-line on the surface must be expressible as a blend of theexplicitly defined flow lines on the designer-created curve cycles. Viewerscomplete the intended shape by envisioning a dense network of such blended,gradually changing flow-lines. An examination of artist-drawn dense net-works (e.g. Figure 3.3) confirms this observation; moreover, artists takeadvantage of this property by implicitly pairing opposite representative flow-lines, and constructing curve sequences that smoothly evolve from one inputflow-line to its mate along the interior surface. The resulting surface is de-scribed by the union of these sequences, and forms a quad-dominant mesh.Consistent with this examination, popular CAD tools capture the geometryof four-sided curve cycles using Coons patches [27] (Figure 2.4 (d)), whoseiso-lines implicitly define a sequence of flow-lines that bridge the oppositesides of each cycle.Our surface-fitting algorithm aims to replicate this behavior. Beforeformally describing the algorithm, we introduce some terminology. A 3Dcurve network is a graph of connected 3D curves, where one or more curvecycles have been marked for surfacing by the designer. A quadrangulatedcurve network (left, black) requires that all curve cycles marked for surfacingbe four-sided. A single quad-mesh can be created from quadrangulatedcurve network cycles by sampling parametric four-sided patches. The dualof a quad network is a graph whose vertices correspond to quad cycles, andwhose edges correspond to shared cycle sides. Each dual poly-chord, drawn213.1. IntroductionFigure 3.2: Closed 3D curves: ambiguous hexagonal 3D curve (top) com-pared to complex curves with a clear design intent (bottom).on the left in a different color, is a sequence of dual edges that correspondsto a chain of quadrilaterals sharing opposite sides [31].If we can extract the flow-line pairings that artistsuse, we can then reconstruct the surface in a natu-ral manner using a dual quadrangulation approach, de-scribed below. The grand challenge, therefore, is in ob-taining a suitable segmentation of a curve cycle intopairs of matching opposite flow-lines. As we expect in-ternal flow lines to change smoothly and gradually, thesebridged segment pairs should have similar orientation and shape. When ex-amining artist generated flow-networks, we observe that the preference forpairing segments becomes more pronounced as the degree of compatibilitybetween them increases, often at the expense of sub-optimal pairing of othersegments. This effect is evident in the highlighted regions in Figure 3.3. Onthe left, the strong preference for the blue pair enforces the far less obviousred one. On the right, the dominant yellow and blue matches enforce the farless attractive purple one. Such dominant preference order can be formallydescribed as a stable matching, where a matching is considered stable when223.1. IntroductionFigure 3.3: Artist designed interpolating quad-meshes.there are no two elements that prefer each other to their current match [62].We simultaneously compute the segmentation and its corresponding sta-ble pairing using a tailored discrete optimization strategy which interleavesmatching and segmentation steps.Given the computed segmentation and pairing (Figure 3.1 (c)), we con-struct a network of quadrilateral cycles (Figure 3.1 (d)) whose dual poly-chords connect the matched flow-line curve segments and interpolate thosewith tensor-product surface patches. Using this construction, the iso-lines ofthe patches naturally align with the matched curves, forming a dense flow-line network conveying the intended surface. An arbitrarily dense quad-meshdescribing the target shape is then created by tracing patch iso-lines (Figure3.1 (f)).We demonstrate the quad meshes created by our method on a varietyof challenging inputs, including both synthetic models and curve networkscreated by different modeling softwares, comparing our outputs against thosemanually created by design professionals (Section 3.4).Contribution:The main contribution of the chapter is the first solution to constructingthe imaginary surface interpolating a general 3D design curve network. Werepresent this surface using a quad mesh whose iso-lines capture the designflow inherent in the network. Lacking a mathematical model of human per-233.2. Quadrangulating a Closed 3D CurveFigure 3.4: After the initial segmentation (a), we alternate matching andrefinement steps to obtain a pair-based curve segmentation which is con-verted into a quadrilateral network (c) . To minimize T-junction count (d)we compute global interval assignment, and use it to sample iso-lines ondiscrete Coons patches.ception, we distill perception studies and guidelines from design literatureinto a mathematical formulation of flow-line matching and segmentation.We evaluate this formalism by showing results that match both viewer ex-pectation and artist created surfaces.Our key technical innovation is a simultaneous segmentation and pairingalgorithm that locates suitable end segments for the dual poly-chords of theinterpolating quad mesh based on analysis of the input curve geometry.Quad-remeshing techniques often strive to generate rectangular quad el-ements. We note that our primary objective is to capture flow-lines; sincethese lines are often related to lines of curvature, we will typically generatewell-shaped quads. However, when flow-lines conflict with quad orthogonal-ity, we focus on capturing the flow at the expense of irregularly shaped quads(see Figure 3.1). This ensures that our output is consistent with designerexpectations (Figure 3.3).3.2 Quadrangulating a Closed 3D CurveThis section describes our approach for quadrangulating the interior of aclosed curve such that the iso-lines induced by the 4-sided curve cycles cap-ture designer intended flow-lines. The extension of this method to networksof curves is discussed in Section 3.3. We use a dual based quadrangula-tion approach, where we first compute the dual graph of the quadrangu-lation (Section 3.1), and then use it to induce the primal quad connectiv-ity and geometry (Section 3.2). This workflow is illustrated in Figure 3.4.ijflowbridgesegment pair (i,j)To assemble the dual, we segment the input curve into asmall number of matching segment pairs that serve as op-posite ends of dual graph poly-chords and corresponding243.2. Quadrangulating a Closed 3D Curveprimal quad-chains. In this respect, paired segments areanalogous to river banks that both bound and define theflow between them; the poly-chord represents a bridge across the flow, con-necting the paired segments.Simultaneously computing this segmentation and pairing is an ambitiousproblem; we want to explicitly minimize the average matching cost, whileavoiding outlier matches with very high cost. We consider the average,rather than the sum, so that the cost is not affected by the number ofsegments. To render this problem tractable, we use a discrete iterativeoptimization strategy that interleaves matching and segmentation. Givenan existing segmentation and an appropriate cost metric, the right pairingstrategy is not simply one that minimizes an overall cost, but instead onethat prioritizes strongly compatible segment pairs that define dominant flow-lines. As noted in the Introduction, this can be mathematically formulatedusing the concept of a stable matching; we can find such a stable matchingusing the method of Irving [62].Once we have obtained such a pairing, we can then refine our segmen-tation by looking for a subdivision that maximally decreases our averagematching cost without increasing the worst match cost (Section 3.1.5). Tofind the optimal splitting point(s), we examine the pairings in the currentstable matching and consider strategies that improve the current high-costmatches. This new segmentation can then be fed back into the matchingstage. To generate the desired segmentation and pairing, we start from aninitial segmentation and interleave segmentation and matching steps. Sincewe aim for a compact quadrangulation, we use a coarse to fine segmentationupdate strategy, starting with the minimal segmentation for which the no-tion of opposite segments, or bridging directions, is well defined. To avoidover-segmentation we stop the refinement process once the improvement tothe average match cost becomes insignificant.The final segmentation induces a poly-chord graph, which we use togenerate quad network connectivity. The generated interior curves are posi-tioned using an extension of the quadrangulation scheme of Nasri et al. [94](Figure 3.4 (c)). A mesh of the entire network is then computed as discussedin Sections 3.2 and 4 (Figure 3.4,(d,e)).3.2.1 Segmentation and MatchingThe pseudocode below describes the flow of our iterative segmentation andmatching process. Every iteration, we subdivide one or more segments tomaximally reduce the average matching cost, without increasing the worst-253.2. Quadrangulating a Closed 3D Curvematch cost (Section 3.1.4). While the number of curve segments, at in-termediate steps of the algorithm may be odd, each iterative refinementincrements the number of segments, typically by one, admitting a perfectsegment matching after one or two iterations. We continue to iterate untilthere is no significant drop in the average matching cost, rolling back to thelast even segmentation when significant improvement is no longer possible(Figure 3.5). While this algorithm does not guarantee a globally minimalaverage match cost, it captures our design goals admirably in that it findsand preserves dominant segment pairs early and then refines segments asnecessary to reduce the matching cost of poorly paired segments.Notation: The above steps are described succinctly using notation andpseudo-code as follows: Given a curve segmentation \u03c3 = 1, .., n, we referto (i, j) as a distinct segment pair with a matching cost ci,j ((i, j) and ci,jare symmetric). ci,j captures the compatibility of any two curve segmentsto form opposite sides of dual poly-chord in our target quadrangulation.M(\u03c3) is a perfect matching of \u03c3, where each segment is uniquely paired,barring a solitary unmatched segment when the number of segments ||\u03c3||is odd. We define the average cost of a matching M(\u03c3) as cost(M,\u03c3) =(\u2211(i,j)\u2208M(\u03c3) c2ij)\/(2 \u00b7 b||\u03c3\/2||c). A constant drop = 1.25 captures the factorof average cost reduction below which the iterative algorithm terminates.\u03c3= initial segmentation (Sec. 3.1.1);M(\u03c3)= stable matching of segment pairs (i, j) using match cost ci,j (Sec. 3.1.3);U\u2217b =\u221e;cost\u2217 =\u221e;repeatif then||\u03c3|| is even:\u03c3\u2217 = \u03c3;M\u2217 = M ; cost\u2217 = cost(M,\u03c3)U\u2217b = maxM ci,j ;end if\u03c3\u2032=refine \u03c3 (Sec. 3.1.4);M \u2032(\u03c3\u2032)= stable matching of \u03c3\u2032; \u03c3 = \u03c3\u2032;until (||\u03c3\u2032|| is even) and (drop \u2217 cost(M \u2032, \u03c3\u2032) > cost\u2217 or U\u2217b < maxM \u2032 ci,j);create internal quadrangulation curves from poly-chord graph of M\u2217(\u03c3\u2217);We now elaborate on the rationale and details of each step.Initial SegmentationAs described in the Introduction we expect the flow-lines induced betweenany pair of segments to be smooth. Motivated by this continuity prop-erty of flow-lines, we can use any robust corner finding technique, such as263.2. Quadrangulating a Closed 3D CurveFigure 3.5: Iterative segmentation refinement: (a) initial segmentationwhere the matching highlights correct dominant side matches. The matchquality is drastically improved by segmenting the bottom curve (b), and re-peating the process (c) to obtain an even segment count. Further refinementhas no real impact on matching cost.computing discontinuities of discrete curvature along the curve [87], for ourinitial segmentation. We further refine this segmentation to ensure that theline segments connecting curve end-points are near linear using a techniquesimilar to [88]. This property helps define coherent bridge directions formatching cost evaluation, described next.Segment Pairing CostPaired segments have a two-fold impact on the final flow-line network. Theyexplicitly define the sequence of flow-lines evolving from one segment to itsmate. They also impact the family of flow-lines intersecting this sequence.Since the pairing defines a chain of quadrilaterals in the final quad network,these intersecting flow lines connect the two segments by evolving from onepair of end-points to another (see Figure 3.6 (a)). To generate the designer-expected flow-line network, the matching cost must satisfy the followingcriteria. First, to minimize the variation of flow-lines that evolve from onesegment to the next we aim for the segments to be similar. Matching impactsthe shape of the intersecting family of curves, or bridge, which in general wewant to be as straight as possible, minimizing its curvature. Internal flow-lines should reflect input curve geometry, thus we would like the bridge tobe aligned with intersecting flow lines evolving from input curve chains con-necting the two segments, or, since these chains can be very complex, to atleast align with intersecting sequences evolving from neighboring segments.Lastly, to best capture the general correlation between flow-lines and linesof curvature of the imagined surface, we expect intersecting sequences offlow lines to be orthogonal. We capture the last two requirements through a273.2. Quadrangulating a Closed 3D Curveijtip\u2019p(b) distance di,j, similarity si,j ijTi,jdi,jL2i,j(a) bridge curvature bi,j using bridge directions ti Figure 3.6: Estimated bridge curvature for different segment layouts mea-sured as angle (red) between bridge direction ti and p \u2212 p\u2032 (at a point p).The dashed lines visualize representative intersecting flow-lines (a). Shapesimilarity and distance cost terms (b).per-segment preferred bridge direction, which depends on the segment andits two neighbors. We use these directions to define bridge curvature bi,j .Our matching cost combines bridge curvature, a term measuring similaritybetween the segments si,j , and a weak distance term di,j used to prioritizemore close-by matchesci,j = wbbi,j + wssi,j + wddi,j .As the segments typically have fairly similar shape, bridge curvature domi-nates the cost with wb = 0.8 and ws = wd = 0.1.Bridge Curvature: To estimate the curvature of the anticipated inter-secting flow lines, or bridge, between segments i and j, we use the predictedbridge directions ti and tj for both ends of the bridge. As illustrated inFigure 3.6, the flow-line shape depends both on these directions and the rel-ative location of the segments. As start and end positions, plus directions,allow for fitting of multiple flow-line curves, explicitly evaluating flow-linecurvature is problematic. Instead we use an angle based curvature predictordefined as follows. Let p be a point on the segment i, and let p\u2032 be thepoint where the angle between the vectors ti and p \u2212 p\u2032 is minimal on thesegment j, i.e. p\u2032 = argminx\u2208j |\u2220(ti, x\u2212 p)|. Then, for a given point, the an-gle \u2220(ti, p\u2032 \u2212 p) measures the angular difference between the shortest bridgebetween the segments and the one taken when using the estimated bridgedirection ti. To compute deviation across the segment i, ai\u2192j , we averagethe angular difference over all points. Finally, we set the bridge curvature tothe maximum of the per-segment deviations, namely bi,j = max(ai\u2192j , aj\u2192i).283.2. Quadrangulating a Closed 3D CurveBridge directions: The bridge direction ti is the predicted optimal tan-gent direction for the flow-lines intersecting the segment i.As such, it depends both on the segment orientation, and onthe bridge directions at neighboring segments.The initial bridge direction ti, for any segment i, is es-timated from the initial segmentation (Figure 3.7(a-c)) andthen refined in every subsequent algorithmic iteration (Fig-ure 3.7(d)). The initial bridge direction ti = ni, is set tocapture a direction orthogonal to the segment and lying onthe imaginary surface emanating from it. Specifically, we define ni as theperpendicular to the straight line fi connecting its end-points, in the best-fit plane of the segments i and its neighbors. Neighboring segments canalso strongly influence bridge direction. An adjacent segment m is consid-ered to influence the bridge direction of i if it is of reasonable arc-length l(1.5 \u2217 lm > li), and if its general flow direction fm is likely to form flow-lines intersecting those emanating from i (\u2220(fm, fi) \u2264 135 \u25e6). The bridgedirection ti is refined to be the average f of its influential neighbours (Fig-ure 3.7(a)(b)), or left as ni if none exist (Figure 3.7(c)).Then, at every algorithmic iteration, we update bridge directions (Fig-ure 3.7(d)), using dominant pairs, i.e. pairs (i, j) such that cij < dom, wheredom = 0.15. First, we refine the bridge direction of the dominant pairs. Weupdate ti and tj of all dominant pairs (i, j) to their current average (thusimplicitly lowering their bridge curvature estimate bi,j). Next, for any seg-ment i that is not dominantly matched but has a neighbor m that is partof a dominant pair, we use tm to update ti. Specifically, we attempt to setti to either align, or to be orthogonal to, tm. if the angle between ti andtm is less than 135\u25e6, we set ti to be orthogonal to tm in the plane definedby ni. If 135\u25e6 \u2264 \u2220(ti, tm) \u2264 225 \u25e6, we set ti = tm. If ti has two dominantneighbors, we use the one with lower matching cost for the update. Theremaining bridge directions are left unchanged in this iteration.Distance and Similarity: The distance di,j is simply the Euclidean dis-tance between the segment centers (Figure 3.6b). Given two curve segmentsi, j, we measure their similarity in terms of shape and scale. We measurescale as the difference in curve length \u2016li \u2212 lj\u2016.To compare shape, we firstcompute a best-fit affine transform Ti,j from i to j. We do this by resamplingthe curves by arc-length using the same number of points, 50 for all our ex-periments. We then use a linear least squares formulation to find the affinetransformation which minimizes the L2 distance between the two point-sets.293.2. Quadrangulating a Closed 3D Curve(d) updating ti (magenta): average ti\u2019s for dominant pairs (green), then set ti\u2019s of their adjacent segments (orange) to be || or .fifnfmti(a) ti=fm+fn (b) ti=fn (c) ti= to fi in best-fit plane to fi ,fm ,fnfifnfmtififnfmtiFigure 3.7: Initial bridge direction ti of segment i is determined by adjacentsegment flow directions fm, fn and its normal.We use a generic affine transform instead of a rigid one to allow for non-uniform scale and shear. We then measure similarity as the L2 closest-pointdistance between the transformed curve and its mate Li,j . All distancesare normalized by the diameter of the processed curve, i.e. by the maximaldistance between two points on the curve. Similarity between curves is thenset to si,j = 0.5\u2016li \u2212 lj\u2016 + 0.5(1 \u2212 e\u2212L2i,j\/\u03c32). The second term measuresthe affine invariant shape difference of two curve segments. Specifically, wedefine a function that is zero if the curves are identical and 1 if they are max-imally different. We achieve this mapping using a Gaussian fall-off functionapplied to the L2 distance between the curves segments. Normalizing thisdistance by the diameter of the curve loop and setting \u03c3 = 1\/3, set usingthe three-sigma rule, results in the desired shape difference function.Stable matching of segment pairsGiven a curve segmentation and a cost of pairing any two curve segmentsto form opposite sides of a poly-chord, this step aims to match segmentpairs in a manner that maximally satisfies the dominant pairing preferencesproducing a stable matching.The standard algorithm for computing a stable matching [62] consists oftwo phases. First, each segment \u201cproposes\u201d to all other segments in order ofpairing preference, continuing to the next segment if and when its currentproposal is rejected. A segment rejects a proposal if it already holds, orsubsequently receives, a proposal from a segment it prefers. In our setup,since matching costs are symmetric, if the number of segments is even thisstep ends with each segment holding a proposal from another segment. Ifthe number of segments is odd, one segment is left out by the process andis ignored by the subsequent step.303.2. Quadrangulating a Closed 3D CurveHeld proposals form a set S of ordered segment pairs (i, j), where i holdsa proposal from j (j is i\u2019s current favorite). S is a stable matching if (j, i) \u2208 Swhenever (i, j) \u2208 S. A second phase of repeated co-rotations, described be-low, transforms S into a stable matching. Suppose that (i, j) \u2208 S, but not(j, i). For each such i we identify the current second favorite to be the firstsuccessor of j in i\u2019s preference list who would reject their held proposal in fa-vor of i. A rotation relative to S is a sequence (i0, j0), (i1, j1), ..., (ik\u22121, jk\u22121)such that (im, jm) \u2208 S for each m, and jm+1 is im\u2019s current second fa-vorite (all indices are modulo k). A co-rotation replaces pairs (im, jm), with(im, jm+1)in S.The standard method [62] is proven to provide a stable match for an evennumber of participants, unless an odd party is found [124], i.e. a rotationsuch that k is odd, and pi = qi+(k+1)\/2 for all i. In that case no stablematching exists. In the rare case of an odd party, we have an odd-lengthcycle of segments with equal pairwise costs, e.g. an equilateral triangleor three perfectly symmetric curves (Figure 3.10). This case can be seenas a generalization of the standard midpoint splitting, and is resolved bysplitting each segment in the cycle into two. Once the split is performed, aclear difference in cost emerges and the matching is repeated.Segmentation RefinementThe refinement process looks for a segment, or segments, to subdivide so asto maximally decrease the average matching cost. Our refinement examinestwo segmentation strategies, first searching for a single edge refinement andthen a global mid-edge split. Since the number of segments is typically verysmall, a stable matching computation is practically instantaneous. Using thefirst approach, we quickly iterate over all segments, segmenting each one andevaluating the cost of the match computed with the refined segmentation.We then select the segmentation that maximally lowers the cost. Using thisstrategy, the one question we need to address is where to place the split, asthe location can impact the subsequent segmentation cost.The basic strategy of splitting the segment in half is tested first, then amore targeted strategy that leverages the computed matching is applied tothe currently matched segments. Given a current segment i which is matchedto j we search for all segments k that are either unmatched, or that preferto be matched to i rather than their current mate l, i.e. ck,i < ck,l. Insuch situations, for instance the bottom curve on the basket (Figure 3.5),splitting the curve strategically into i1 and i2 can often satisfy this preferenceby generating matches (i1, j) and (i2, k). To minimize the cost of (i1, j) and313.2. Quadrangulating a Closed 3D Curve(i2, k) we break i into two possible subdivisions i1, i2 based on arc-length (l)ratio, where li1\/li2 = lj\/lk or li1\/li2 = lk\/lj , and li1 + li2 = li, and test thematches induced by these segmentations.While theoretically more comprehensive or global segmentation refine-ment strategies may exist, we found our approach to work well in practice.It preserves dominant pairs and improves poor matches as intended by oursubdivision heuristic.3.2.2 QuadrangulationOnce we have an acceptable perfect stable match whose cost cannot be re-duced by further segmentation, we use this segmentation and matching andits induced poly-chord graph (see Figure 3.4), to construct a quadrangula-tion.Extracting Quad Connectivity: Using standard dual notations [31] wesay that two poly-chords (i, j) and (k, l) intersect in the graph theoretic senseif and only if their corresponding curve segments are interleaved on the closedcurve. For instance, the purple and red segments on Fig. 10 are interleaved,resulting in intersecting poly-chords. To generate a valid quadrangulation werequire that the poly-chord graph be connected . This is easily accomplishedby adding curve segments connecting end-points of common segments ofcomponents of the poly-chord graph and turning each graph componentinto a smaller closed curve, for which our algorithm can be re-run (Figure3.8). To avoid T -junctions we disallow the newly added segments from beingfurther refined. To make the quad layout more compact, we merge adjacentpoly-chord (i, j) and (i+1, j\u22121) when the transition between the consecutivesegments is smooth.An intersection between two poly-chords corresponds to a quadrilateralin the final network. Connectivity between these quads is determined by theintersection order, e.g. determining the top-down order of the intersectionsof the green poly-chord with the blue and red ones in Figure 3.9. We de-fine the quad connectivity by incrementally embedding poly-chords into thelayout of cells, or regions, bounded by input boundary segments and pre-viously added poly-chords. Given the graph whose vertices are these cellsand whose edges connect adjacent cells, we embed a poly-chord by comput-ing the shortest path in this graph between the two vertices or cells, corre-sponding to the boundary curve segments connected by the poly-chord. Thispath minimizes the number of intersections between the new poly-chord andthose already embedded. This choice minimizes the number of dual graph323.2. Quadrangulating a Closed 3D CurveFigure 3.8: A disconnected dual graph (left) does not allow for a valid primalquad mesh. Splitting the cycle into two by a temporary curve segment(dashed) generates valid graphs for both parts which combined togetherinduce a valid primal quad mesh (right).Figure 3.9: Two intersection orders induce different quad connectivity, withthe one on the right inducing a better quad shape, and consequently asmoother flow.cycles. Such cycles correspond to interior primal quadrangulation verticesadding which, as discussed below, can reduce flow smoothness. Given twoequal length choices, we prefer one that induces better shaped quadrilaterals,where quality is measured as the scaled Jacobian [19] (Figure 3.9).Extracting quad geometry: The dual graph defines the connectivity ofour quadrangulation. To position the interior vertices and curves we usea two step process which leverages the quad topology to generate interiorcurves best reflecting the flow directions. Specifically we note that each chainof quads can be seen as a four-sided uv patch interpolating two flow-line endsegments. Associating the v coordinate with the end segments, we expectthe patch u-isolines to smoothly interpolate them. Our geometry computa-tion builds on the geometry construction in [94] which shares the same goal.We first compute the interior vertex positions that best satisfy our require-ments, using a global optimization of a per-vertex formulation [94], that setseach vertex G to a weighted sum of vertex positions in neighboring quads:333.2. Quadrangulating a Closed 3D CurveFigure 3.10: We first position interior vertices (left) and then use the chain-long quads to position the interior curves (center). Finally, the resultingquad cycles are quad-meshed using discrete Coons patches (right).Figure 3.11: Our distance based weighing (right) generates smoother flowline evolution than topology based one [94].G =\u2211ni=1(Ei + Ei\u22121 \u2212 Ci)\/ai\u2211ni=1 1\/ai(3.1)where Ei are quad network vertices that share side curveswith G, Ci are the diagonal quad corners between Ei+1 andEi, and ai = \u2016Ei \u2212 Ci\u2016\u2016Ci \u2212 Ei+1\u2016 is an estimate of thearea of the corresponding quad (see inset). We then generate straight-lineedges connecting these and boundary vertices as an intermediate approxima-tion of the quadrangulation. Using this initial network each interior curveis now computed as a u-isoline on the quadrilateral patch containing twobounding flow-lines and the curve paths connecting them, using a discreteCoons formulation [38] (Figure 3.10). This formulation takes into accountthe distance of the new curve from the bounding flow lines, improving onthe original formulation of Nasri et al [94] (Figure 3.11).Meshing: To fit a surface in the interior of each quad-patch we can useany number of methods. The examples shown in this chapter use a quadmesh sampled on a discrete bicubic Coons surface [109]. This constructionprovides continuity across shared boundaries when the cross tangents are343.2. Quadrangulating a Closed 3D CurveFigure 3.12: Removing interior vertices: (Left) initial match (top) and in-duced quadrangulation (bottom); (Right) the final match with purple andgreen pairs flipped (top) has a slightly higher cost but the induced quadran-gulation (bottom) has no interior vertices, leading to smoother flow-lines.continuous. More sophisticated fitting tools which provide better cross-patch continuity can be used as well.Minimizing Flow Dislocation: The segmentation and pairing algorithmoptimizes the cost of the individual flow-line matches, does not explicitlyconsider the impact of the quad patch connectivity on the final flow. Specif-ically, at the matching stage it is hard to predict the impact of the intro-duction of interior patch vertices on the smoothness of the flow lines. Insome cases these vertices are essential to forming a good surface such as onthe top of the espresso machine (Figure 3.17), but in other cases removingthem can improve the flow (Figure 3.12). Thus, given a quadrangulation,we test if removing any of the interior vertices can improve the surface qual-ity. Recall that each such vertex corresponds to a cycle in the dual graph.We thus attempt to break cycles in the dual graph if the quadrangulationquality improves and the increase in the overall matching cost is acceptable.Specifically, for each edge \u3008(i, j), (k, l)\u3009 of a cycle in the poly-chord graph weevaluate the consequence of swapping segment pairs to (i, l) and (k, j), or(i, k) and (l, j). A swap is valid if the following three criteria are satisfied:the quad quality, measured using the scaled Jacobian, is improved, no newcycles are introduced into the graph and the cost of the matches after theswap is no greater than the worst match cost before it. We thus perform avalid swap for the poly-chord edge of the cycle with the minimum increasein matching cost (Figure 3.12).353.3. Processing Curve Networks(a) (b)(c) (d)Figure 3.13: Separately processed cycles (a) introduce T-junctions. We firstresolve the T-junctions across pairs of neighbouring patches by propaga-tion (b), generating a well defined hierarchy of matching primary segments.We then use integer programming to compute interval assignments (c) thatminimizes the number of T-junctions, typically leading to a watertight mesh(d).3.3 Processing Curve NetworksUp until now, we have only considered the meshing of a single curve cy-cle. The reason for this is that in curve networks, the majority of verticesadjacent to two or more cycles define corners that induce our initial segmen-tation. The remaining vertices form T-junctions that should not bias theflow-lines within cycles where the incident curves are continuous. Once theindividual cycles have been quadrangulated however, we must ensure thatthe geometry is watertight across the common boundary of adjacent cycles.For a quad-mesh fitting this requires the sampling, or interval count, alongshared boundaries to be the same on both sides. This goal is easy to achievefor a conforming quad-patch layout, such as those generated inside each in-put cycle, using a fixed number of intervals per boundary curve. Special careis needed though, when meshing curve networks where cycle segmentationcreates T-junctions.We optimize interval assignment using two modifications to the basic cy-cle quadrangulation algorithm described above. The first stage, performedafter segmentation and matching process, described above, for each cycle, re-solves the initial, primary, T-junctions between pairs of neighbouring cycles.A T-junction occurs when one curve has a segment end-point, or vertex, ata boundary point and another curve does not. Given a T-junction, we first363.3. Processing Curve Networksattempt to resolve it by merging adjacent vertices based on a threshold dis-tance, while keeping in place both sharp corners and T-junctions present inthe original artist input. Throughout our experiments, we set our thresholdto 5\u03b4, where \u03b4 is the minimum Euclidean distance between adjacent samplesof the input polylines. Intuitively, the finer the initial sampling, the moreprecise the algorithm is, the smaller the merging threshold we need.For any T-junctions that we cannot resolve in this manner, we split theadjacent segment and its matching segment in the corresponding cycle. Wethen refine the matching accordingly. This process resolves all the primaryT-junctions, but in turn introduces secondary T-junctions where the match-ing segments are split (Figure 3.13, (b)). These T-junctions are furtherreduced using another iteration of threshold based merging.Contrary to primary T-junctions, the secondary T-junctions are guaran-teed to be contained in primary segments that share clearly defined primaryvertices (Figure 3.13, (b)), a property we take advantage of in the final inter-val assignment stage. At this point, the network is converted to quad-patchtopology using the method of Section 3.2. In the final step, when generat-ing the per-patch meshes, we need to assign a consistent interval count toeach segment. For a given primary segment, we require that the number ofintervals on both sides of the segment are equal. We further require thateach secondary segment (one bounded by primary or secondary vertices) andits matching segment have the same number of intervals. Finally, we wishto minimize the total interval count while enforcing a minimum number ofintervals per edge based on its length.If we formulate all of these requirements as a wishlist, as shown byMitchell [91], there may exist configurations where no valid assignment ex-ists. We therefore relax our watertightness requirement, which allows us toreformulate this problem in terms of a minimization. Consider a pair ofadjacent primary segments L and R. By virtue of the first step, we knowthat L and R share common endpoints; however, they may each contain adiffering number of secondary segments. If l is a secondary segment on Land r is a secondary segment on R, let nl,R and nr,R represent the number ofintervals that the secondary segments l and r are divided into, respectively.We can then express our minimization condition as the following function:min f(x) = w\u2211(L,R)(\u2211l\u2208Lnl,L \u2212\u2211r\u2208Rnr,R)2 +\u2211L,l(nl,L)The first term in this equation seeks to minimize the number of mismatchedinterval counts along a given pair of adjacent primary segments. The secondterm seeks to minimize the total number of intervals for the entire mesh. We373.4. Results(a) (b) (c) (d)(e) (f) (g) (h)Figure 3.14: Quadrangulation and meshing of closed curves.use w = 1000 to minimize the number of mismatches as much as possible.This minimization is subject to a number of constraints. We require thatopposite segments of each quad patch have the same number of intervals.Additionally, we require that the number of intervals on a given secondarysegment does not fall below a specified minimum. This minimum is deter-mined by dividing the secondary segment length by a user-specified desired(local or global) interval length. Together, the minimization function andconstraints form a quadratic, mixed-integer programming problem, whichwe solve using Tomlab \/CPLEX. This approach lead to valid assignmentsfor all the inputs we tested. The assigned intervals are used to optimize thepositions of the secondary T-junctions and generate the final meshes.3.4 ResultsClosed Curves: We generated a number of synthetic test inputs to eval-uate the behavior of our method on a variety of closed curves with differentside configurations demonstrated in Figures 3.14 and 3.15. These includeda variety of convex regions (Figure 3.14 (a-f)) with different degrees of pla-383.4. ResultsFigure 3.15: Quad meshes of complex closed curves including interior cycles.ArtistArtist ArtistArtistOur methodOur methodOur methodOur methodFigure 3.16: Artist generated meshes (left) and ours (right) exhibit verysimilar flow-line patterns.narity and different number of boundary discontinuities. For some of theinputs the expected surface shape, is best captured by introducing an ex-traordinary interior vertex (c,d). For other regions with n > 4 sides such as(e,f) a regular connectivity better captures the intended shape. Our methodmakes the appropriate choice based on analyzing the relationships betweenthe input curve segments,and the degree of parallelism between them. Thisis in contrast to purely connectivity methods, e.g. [94], where the choice isstrictly based on the number of segments. Figure 3.14 (b) shows an a-typicaltwo sided region, nevertheless reasonably fittted by our method, while (g,h)show non-convex regions, where the optimal pairing is found automaticallythrough refinement of initial segments. The letters in Figure 3.15 showthe robustness of the method in the presence of complex non-convex curvesas well as processing of faces with interior loops. To handle such models,we first locate a pair of matching segments on different loops with minimalmatching cost and introduce the shortest straight segment connecting those.The method then proceeds as usual on the resulting single cycle.Curve Networks: We tested our method on a variety of input curvenetworks (Figure 3.1, 3.4, 3.17) generated by different modeling systems393.4. Results[7, 107, 115]. As demonstrated by the figures these networks contain avariety of complex, non-convex cycles. Our method successfully capturesthe designer intent conveyed by the networks generating predictable andsmoothly flowing quad-meshes interpolating the input curves. While theairplane (Figure 3.17) was created using a classical CAD modeling system,many of the other inputs (car, espresso maker, submarine, starcraft) (Figure3.17) were generated using sketching tools, which easily introduce noise andinaccuracies that hamper traditional surfacing. Our method is robust tosuch artifacts.We compare our outputs on the boat and starcraft to those generated byan artist (Figures 3.3 3.16). The flow-line structure of our meshes is largelyidentical to artist generated one, with only minor differences, such as flowon the side of the boat cabin, where both our and artist interpretations arefeasible (our outputs contain a few extra cycles not present in the artistmodels).Quantitative Evaluation: On an Intel i7 CPU 870 2.93GH machine ourmethod takes on average two seconds to quadrangulate a single curve cycle(most of the time spent on matching), making it amenable for interactivesurfacing in a sketch based modeler like ILoveSketch [7]. The most timeconsuming regions are the front of the car (166s) and the top of the speaker(66s) (Figure 3.17). Intervals assignment is practically instantaneous, tak-ing 0.1s for a an average network and taking 2s to process the largest model(plane). The quad statistics for the models we tested are summarized inTable 3.1 and include numbers of input cycles, number of output quad cy-cles, mesh size(s), and the number of added extraordinary vertices, All thegenerated meshes are watertight.Limitations: Our approach has three broad limitations which can be ad-dressed by future research.Global context: The biggest limitation of our method is lack of globalcontext. Our flow-line analysis for each input cycle in a network is inde-pendent. In practice however, most adjacent cycles meet at sharp corners,typically resulting in a similar segmentation and flow across shared curvesegments. The context of adjacent cycles could be useful in enforcing flowline continuity across cycles and predicting the flow within an individuallyambiguous cycle.Failure cases: While our algorithm works well on design curve inputsfrom a variety of sources, it may not provide meaningful results for arbi-403.5. Conclusionsinput output quad mesh interiorcycles cycles size verticesSphere Bag 3 9 987 0Boat 30 82 4464 9Spaceship 41 94 5008 6Car 26 70 5020 13Espresso 54 75 6904 5Speaker 13 42 8548 1Plane 140 192 10705 10Submarine 39 103 16600 31Table 3.1: Algorithm statistics for different curve networks.trarily shaped curve cycles with no perceptible flow-lines. The absence ofcorners on a completely smooth curve cycle will not provide us a meaning-ful initial segmentation to refine. In such cases we can impose an initialsegmentation based on curvature maxima and arc-length of the input curve.Algorithmic complexity: While our central idea of flow-line segmentationand matching is conceptually clear, various aspects of our implementationcould be streamlined. For example, while most of the parameters used bythe method were derived based on clear algorithmic goals, a few such asdrop in Section 3.2.1) are based on trial-and-error, and could be learnedfrom designer quadrangulated examples.3.5 ConclusionsWe presented the first, to our knowledge, method for quadrangulating gen-eral designer specified closed 3D curves and curve networks. Our resultsshow the approach to robustly process complex curve networks, generatinginterpolating quad meshes consistent with designer intent. Our key insightis an interleaved segmentation and matching algorithm, that pairs domi-nant flow-lines and uses poor matches to guide segmentation refinement,computing a poly-chord graph that captures user-intended bridging direc-tions across a closed curve. We advocate the use of stable matching as theprincipled way to formulate our quadrangulation goals and anticipate it tobe well-suited to other problems relating to shape matching or coherence,where both dominant components and their correspondence is sought.Our work points to a number of future directions. Rather than restrictour input to a constrained geometric definition of a design curve network,we attempted to quadrangulate any 3D curve network as a designer would,413.5. ConclusionsFigure 3.17: Quadrangulation and meshing of curve networks. The starsindicate the network locations of the highlighted complex regions.using the principle of flow-line segmentation and matching. A formal per-ceptual study of the precise difference between ambiguous and design curves(Figure 3.2) is thus an ambitious but worthy goal. While our segmentationrefinement strategy works well in general, approaches with theoretical guar-antees of match quality are also worth exploring. Our method focuses onquad-only meshing, however in some cases designer intent is better served byallowing a small number of triangular elements (e.g. Figure 3.14 (b)), mo-tivating a technique for mixed but predominantly-quad meshes. We wouldalso like to apply our technique as-is to the finite-element meshing of closedplanar domains, balancing flow-line alignment against mesh quality.42Chapter 4Modeling CharacterCanvases from CartoonDrawings4.1 Introduction(a) (b) (c)(d)(e)Figure 4.1: Character drawings (c) are traditionally anchored around a skele-ton (a), surrounded by generalized surfaces of revolution (b). We use thedrawn character contours (d) and a corresponding 3D skeleton (red-to-bluecoloring reflects near-to-far skeleton depth variation), to automatically com-pute a 3D canvas, employed to freely manipulate the character in 3D (e).In this chapter we describe our second main contribution, a novel ap-proach for automatically constructing a rigged 3D character proxy, or can-vas, directly from a single 2D cartoon drawing and a correspondingly posed,434.1. Introduction(a) (b) (c)(d) (e) (f )Figure 4.2: Character contours alone (left) frequently do not provide suffi-cient information to conclusively determine 3D shape both on occlusion free(top) and partially occluded (bottom) inputs. A 3D skeleton, shown in theinsets, resolves much of the ambiguity present in contours alone facilitatingplausible shape interpretation.user-supplied, 3D skeleton. Such approach allows users to sidestep the time-consuming manual modeling and rigging steps (Fig. 4.1(d,e)). The projecthas since been published in ACM Transactions on Graphics [10].Our 3D canvases allow artists to directly articulate the drawn characters,generate convincing cartoon style character renders from alternate views(Fig. 4.1(e)), and provide support for various 3D effects created by draw-ing on and around the canvas (Fig. 4.23). Using a skeleton as an aid, ourframework infers complex, complete character shapes from individual 2Ddrawings with significant contour depth variation, foreshortening, and mul-tiple inter-part occlusions (Fig. 4.3 (left)) - a significant deviation from priorart, which assumes drawn contours that are largely occlusion free, flat, andnearly perpendicular to the view direction (Section 2.2).Our choice of input and subsequent construction methods are motivatedby the observation [57, 136] that cartoon character anatomy is well describedby a union of body parts supported by a skeletal system, where each partis approximately a generalized surface of revolution (Fig. 4.1(a,b)). Artist-drawn character contours are inherently ambiguous (Fig. 4.2) and humanobservers frequently rely on either explicit familiarity with the drawn ob-jects, or on semantic information encoded by additional drawing elements,such as facial features, to consistently interpret the 3D character shape.Such extra information is hard to enumerate or formalize algorithmically;our input skeleton, posed to reflect the character\u2019s structure, helps resolvethese shape ambiguities.444.1. IntroductionTrajectoriesPro\u001fleFigure 4.3: The canvas (center) of the catwoman in Fig. 4.1: (left) thickblack line shows reconstructed 3D contour curves, (right) insets visualizerepresentative trajectories and profiles.Overview Our guiding premise is that when artists create descriptivecharacter drawings, they inherently rely upon and exploit the same percep-tual principles that viewers use to lift drawings off paper and into 3D [116].Following previous work, we rely on viewer preferences for conformity andsimplicity (Section 4.2, Fig. 4.5) in reconstructing individual part geometry.Conformity is the unstated belief that the drawing is an accurate repre-sentation of the 3D character, and that the projected contours of the 3Dcharacters will conform to the drawn contours in the input view and pose.Simplicity (or the law of Pragnanz [69]) states that viewers rely on sym-metry assumptions as strong cues for image understanding [56, 105]. Givenviewer familiarity with character anatomy expected to resemble partwisesurfaces of revolution, this principle suggests a strong viewer preference forenvisioning body-parts with maximal rotational symmetry around the boneaxis (Fig. 4.5 (b)).We augment these two principles with observations about Gestalt contin-uation and shape persistence which help us parse complete, complex draw-ings and reconstruct coherent overall character shapes. To handle inter-partocclusions in the drawings, we exploit Gestalt continuation by noting thatviewers resolve occlusions in line drawings by grouping together disjointcurves whose end-points can be smoothly connected [69] (e.g. the outlinesof the tights of the catwoman in Figure 4.1). In reconstructing the completecharacter geometry from a single view drawing, we rely on the notion ofshape, or contour, persistence. Contour persistence or the non-accidentalview assumption [93, 138] indicates that viewers perceive the artist-selectedview and pose as non-accidental and expect the drawn contours to be in-454.1. Introductiondicative of contour shape in alternate, and especially nearby, views.We begin the modeling process by segmenting the input 2D contours intosections outlining individual body parts corresponding to the bones of theinput skeleton. We resolve inter-part occlusions and group disjoint outlinesegments by leveraging skeletal depth and Gestalt continuation. We use thecomputed contour segmentation to generate the 3D canvas geometry, mod-eling body parts using generalized surfaces of revolution. While a canonicalsurface of revolution is defined by rotating a fixed planar profile curve alonga circular trajectory around an axis, we account for a range of body shapesby supporting both more complex closed planar trajectory curves, and byallowing the profile shape to vary smoothly as the profile rotates aroundthe part\u2019s bone or axis (Fig. 4.3). Supporting profile variation is criticalfor processing asymmetric part contours, such as those on the catwoman\u2019shoofs. To balance conformity against simplicity we first refine the artistgiven straight-line skeleton to a geometric curve-skeleton [29], and symmet-rically locate it with respect to the artist-drawn contours. The surfacesof the different body parts are then optimized to form a unified 3D canvascentered around this curve-skeleton by enforcing conformity while balancingindividual part simplicity against contour persistence across the canvas. Ourfinal canvases are represented as quad-dominant meshes (Fig. 4.3 (center))with explicit angular and axial parameterization which supports a range oftexturing effects (Fig. 4.23).Contribution Our overall contribution is a framework for computing abelievable 3D character canvas from two pieces of user input: a vectorized,single-view, descriptive, 2D contour drawing and a correspondingly createdand posed 3D skeleton. Our key technical contributions are two algorithmsderived from perception principles. First, we present a novel algorithm forcorrectly segmenting artist-drawn contours into body part outlines asso-ciated with individual skeletal bones, which can robustly handle multipleinter-part occlusions (Section 4.3). Second, we show how to use this seg-mentation to generate believable 3D character canvases which balance sim-plicity and persistence, allowing for variable contour depth and overcominginaccuracies in skeleton posing (Section 4.4). Our resulting 3D charactercanvases are, as the name suggests, an ideal support structure for painterlystrokes and cartoon rendering; however, they are not designed to capturethe complex detail of realistic 3D character models.464.2. Framework Overview(a) (b)Figure 4.4: (a) Lacking part information, character shape reconstructioncan at best exploit overall shape smoothness, e.g [68, 95]; (b) by using askeleton to facilitate contour partition and part modeling, we generate amore believable character shape.Evaluation We evaluate our approach in a number of ways (Sections 4.5, 4.6).We show that the task of positioning a 3D skeleton to match a 2D cartoondrawing is well-defined and intuitive, taking most artists less than ten min-utes for typical cartoon drawings. We validate our segmentation algorithmvia an informal evaluation, verifying that viewers consistently segment andassociate character contours to skeletal bones and that this segmentationmatches our algorithmic output. We reproduce ground truth 3D charactershapes from a contour rendering and 3D skeleton, and compare our resultsto both ground truth and artist drawings created from the same input and inthe same views, validating that our results are visually similar to both. Weshow a variety of character canvases created from diversely sourced contourdrawings and 3D skeletons, demonstrating our approach to be resilient tocomplex views, and poses with multiple occlusions and significant foreshort-ening. These canvases are illustrated using cartoon shading and other formsof non-photorealistic rendering, and are confirmed by artists to show plau-sible alternate-view renders of the drawn inputs. Finally, we compare ourmethod to prior work, producing similar output quality with significantlyless user-input.4.2 Framework OverviewWe now describe the three key components of our canvas computation frame-work, and the observations that motivate them (Fig. 4.6).Algorithm Input The input to our system is a 2D vectorized cartoondrawing and a correspondingly posed 3D skeleton with no extra annotation.Like other research in articulated figure modeling [8, 17, 123] our approachis based on the proposition from cartoon drawing literature [57, 136] that474.2. Framework Overview(a) continuation(c) persistence(b) simplicityFigure 4.5: (a) Perceptual studies indicate that viewers group curves thatcan be smoothly joined together ((a), right), seeing those as a continuationof one another; while treating those with non-smooth transition ((a), left)as distinct; viewers tend to prefer interpretations that balance part simplic-ity (b) against contour persistence, preferring interpretations that preservecontour shape under small view changes (c).character shape is well approximated by a union of body parts representedby generalized surfaces of revolution around a skeletal structure. As theshape of a surface of revolution is driven by the choice of an axis, leveragingthis observation for modeling requires a skeletal structure (Fig. 4.4). Whilecurvature extrema and discontinuities in character contours hint at the un-derlying skeletal structure, automatic skeleton extraction [17, 20] may notreflect the artist-intended shape as it always aligns the skeleton with thedominant axis in elliptical regions. This is illustrated by Fig. 4.2 (top),where using a geometric skeleton would lead to the \u201dsnake swallowing anelephant\u201d reconstruction on the right. This bias is confounded by ambiguousskeleton topology in the presence of occlusions (Fig. 4.2, bottom). Fortu-nately, artists can consistently and efficiently pose a 3D skeleton to matcha 2D contour drawing (Section 4.5.1), motivating our choice of input.Skeleton-Driven Contour Segmentation To successfully capture bodyparts with surfaces of revolution, we must first identify which portions of theinput contour belong to the same body part (Fig. 4.6 (a)). Our algorithmtherefore begins by segmenting the input contours into sections associated484.2. Framework Overview(b) (c)+(a)canvas surfacingpartsegmentationcomputingcurve-skeletonFigure 4.6: Canvas construction: Given a sketch and a skeleton (shownin side view) we first segment the input contours into sections associatedwith skeletal bones (a), correspondences shown by matching color), cor-rectly resolving occlusions; we use the segmentation to replace the straight-line skeleton by a curved-skeleton optimized for symmetry (b); and finallygenerate maximally simple body part geometries around this new skeletonwhile maintaining contour persistence with respect to the input drawing (c).with each bone. This segmentation is guided by the principles employed in3D skeleton-driven surface segmentation algorithms, e.g [6, 29]. These meth-ods construct surface charts whose connectivity reflects skeletal adjacencies,associating charts with proximal bones, and aligning chart boundaries withcurvature extrema. We apply these principles of surface segmentation to2D contour drawings. Since, in the presence of occlusions 2D proximity isnot a reliable proxy for 3D proximity (Fig. 4.8), we leverage skeletal depthinformation to facilitate correct proximal bone-to-contour association anduse Gestalt continuity [69] to correctly group disjoint contour segments (seeSection 4.3, Fig. 4.5 (a)).Canvas Modeling We construct a 3D canvas from our segmentation byexploiting the perceptual cues of sketch conformity, simplicity, and contourpersistence (Fig. 4.6 (c)). In our context, conformity requires that the con-tours of the created 3D canvas project onto the 2D character contours in theinput drawing with reasonable accuracy, and simplicity implies a preferencefor maximally symmetric surface-of-revolution part geometries (Fig. 4.5 (b)).Maximizing symmetry when recovering 3D part geometry requires an opti-mal local axis of revolution. However, while the artist-posed straight-lineskeletons adequately describe the character structure, they are not detailedor accurate enough to capture a geometrically centered curve skeleton [29]of the target character surface. We therefore generate the desired curveskeleton by leveraging a correspondence between the straight skeleton andthe segmented 2D contours (Fig. 4.6 (b)), before computing the final canvassurface. We position the curve skeleton to maximize the symmetry of body494.3. Part Segmentationparts. (Section 4.4.1). Using only conformity and simplicity to compute thecanvas geometry around this curve skeleton leads to plausible individual partgeometries, but ignores the shape correlation between adjacent body partsoutlined with a single contour. Contour persistence (Fig. 4.5 (c)) argues forthese joint contours to retain their shape when the viewpoint changes, andespecially to avoid introducing sharp discontinuities [138]. Accounting forsimplicity alone can introduce such undesirable artifacts (see Fig. 4.5 (c))and the accompanying video). We therefore enforce persistence across thecharacter model by restricting the change in local profile slope with respectto its corresponding axis, allowing trajectory shape to deviate from a perfectcircle to accommodate this constraint (Section 4.4, Fig. 4.3).4.3 Part SegmentationExisting research on skeleton-assisted part segmentation of 3D shapes [6, 29]employs a number of perception-driven segmentation criteria, variants ofwhich apply to the segmentation of 2D contours (Fig. 4.7). The primarycriterion is topological - in 3D each bone corresponds to a single segment, andsegments are adjacent only if the corresponding bones are. The secondarycriterion is bone proximity - segments are computed so as to be closest totheir associated bones. Lastly, while the placement of segment boundaries isdominated by proximity to the corresponding bones, boundary locations arealigned with local curvature extrema on the surface to better match bendsat skeletal joints. In describing how to apply these criteria for 2D contoursegmentation we first address the simpler, occlusion-free setup, and thendescribe the extension to the general case.Bisector-Based, Occlusion-Free Contour Segmentation Absent oc-clusions, the contour of a drawn character is a single closed curve. In thisscenario (Fig. 4.7) each terminal bone corresponds to a single segment andeach interior bone (purple in the Figure) corresponds to two segments, oneon each side. A circular \u201dhalf-edge\u201d traversal of the contour uniquely de-fines the connectivity between the segments (Fig. 4.7(b)). We can thereforegenerate a segmentation by appropriately positioning the boundary pointsbetween these topological segments. While we can optimize for proxim-ity by segmenting the contours using the Voronoi diagram of the bones(Fig. 4.7(c)), as-is this segmentation results in a different, undesirable, seg-ment connectivity; note in particular the green and blue segments at thebottom. However, using a subset of the diagram intersections - specifically,504.3. Part Segmentation(a) (c) (d)(e) (f)(b)zoom for (e)zoom for (f)zoom for (e)zoom for (d)Figure 4.7: Skeleton-driven segmentation of a simple contour (a) must matchskeletal topology (b) and reflect bone proximity. Proximity alone does notguarantee skeleton matching segment topology (c). A more topologicallyconsistent segmentation (d) may need to be refined by bisector rotation toavoid segment overlap (e). Boundaries are then adjusted to best align withnegative curvature extrema (f).the first intersection between the contour and a ray emanating from eachskeletal joint along its angular bisector - to define boundaries of contoursegments associated with the participating bones (Fig. 4.7(d)) - results in asolution largely consistent with the circular ordering. Inconsistencies showup only at locations where the contours veer far from the skeleton; at theselocations bisector rays starting at adjacent joints can cross prior to intersect-ing the contour, resulting in ill-defined, overlapping, segments. Such interiorintersections can be trivially detected and fixed by rotating the offending bi-sectors in opposite directions to move the intersection onto or outside thecontour (Fig. 4.7(e)). The resulting segmentation has the desired connec-tivity and each segment is adjacent to its associated bone. As a last step,we adjust boundary locations to align them with bends at skeletal joints bymoving them to nearby curvature extrema (Fig. 4.7(f)).Contour Segmentation with Occlusions While the algorithm aboveworks extremely well for occlusion-free closed contours, real-world charactercontours contain inter-part occlusions which pose two further challenges(Fig. 4.8). First, in the presence of occlusions, 2D distances are not a reliableproxy for 3D distance; in Figure 4.8(b), for example, the contour between thepinkie and ring finger bones is closer, in 3D, to the ring finger bone despitebeing closer in 2D to the pinkie bone. Second, occlusions fragment the514.3. Part Segmentation(a) (c) (e)(b) (d)Figure 4.8: A character drawing with inter-part occlusions contains multiplecontour curves and the left and right outlines of a body part may nowcontain multiple Gestalt continuous segments (a); thus 2D proximity basedsegmentation is no longer adequate (b). Taking into account skeletal depthas well as 2D proximity but neglecting Gestalt continuity leads to better,but still perceptually wrong results (c,d). Our framework accounts for bothconsiderations resulting in the desired segmentation (e).single closed contour into multiple disjoint contours, complicating the use oftopological criteria for segmentation. When contours are fragmented a bonecan be associated with any number of disjoint segments; e.g. in Figure 4.8,the terminal bone of the partially occluded ring finger should be associatedwith two disjoint contour segments. Furthermore, adjacent skeletal bonesmay correspond to segments on different contour strokes. Nevertheless, aswe discuss below, the overall bisector-based segmentation strategy remainsapplicable, but requires modifications that leverage the depth informationprovided by the input 3D skeleton to better estimate proximity, and useGestalt continuation to analyze disjoint contours.2D to 3D Proximity We first note that 2D proximity is still a good proxyfor 3D proximity; a bone can be associated with a farther away contourusing the bisector based approach only if the body part associated withthis bone is partially occluded and the contour in question belongs to theoccluder. For typical 3D character geometry, the depth ordering betweenbones reflects depth ordering between their corresponding body parts, aswell as their contours. Thus in general, a contour closest to a bone in 2D,should be associated with a different bone only if that bone is nearer to theviewer than the original one. While it is conceivable to create geometry andposes that violate this assumption, drawings of such shapes are inherentlyambiguous even to human observers and are thus beyond the scope of this524.3. Part Segmentation(a) (b) (c) (d) (e)Figure 4.9: Segmentation algorithm: iterating between a z-ordering basedpass and consistency validation.chapter. Consequently for closest to the viewer bones we can still use 2Dproximity as a reliable proxy for its 3D counterpart. Similarly, for bonesfarther away we can still continue to rely on 2D proximity as long as weignore, or skip, contours associated with nearer to the viewer bones.While a total depth ordering of bones may not exist, a total ordering ofmini-bones is readily created by precisely subdividing bones that overlap indepth (a la the painter\u2019s algorithm) or approximately by simply subdividingall bones into mini-bones of some small maximum length (one tenth of theshortest bone in our implementation). As discussed below, the latter ap-proach helps address the one-to-many bone to segment matching problem,as we can plausibly assume that each mini-bone has at most one visible con-tour segment on each side. Mini-bones resulting from subdividing a skeletalbone are seen as meeting at unarticulated valence two joints.Topological Consistency The bisector-based segmentation algorithm forocclusion-free inputs ensures topological consistency along the closed inputcontour - that is, adjacent bones are mapped to adjacent, continuous, con-tour segments. When occlusions are present, adjacent mini-bones can beassociated with different, disjoint, contour segments (Fig. 4.8(c,d)) or al-ternatively with hidden, or imaginary, segments. Unlike the occlusion-freecase, a traversal of a single input contour curve in a circular fashion doesnot induce a traversal of the skeleton and vice versa; at most, we can hopethat, as we traverse along mini-bone half-edges on the skeleton using thesame counter-clockwise traversal, the associated contour portions should ei-ther be continuous, or plausibly connected by an obscured contour portion.We argue that humans employ the Gestalt continuity principle to evaluateassociation probability in such cases, and ignore associations inconsistent534.3. Part Segmentationwith this principle (Fig. 4.8(c,d)).Rather than directly assigning contour segments to bones so that everyassignment is Gestalt continuous, we employ a restart mechanism with ataboo list. After assigning mini-bones to contours, we evaluate all assign-ments of adjacent mini-bones for Gestalt continuation. When assignmentsare inconsistent, as is the case in Figures 4.8(c,d), the proximity criterionargues for keeping the correspondence for the segment closer to the bone in2D, while disassociating the segment further away from the bone. If andwhen an assignment is deemed inconsistent, we restart the near-to-far pro-cessing algorithm as the disassociated segment needs to be associated witha different bone.Final Algorithm Our final segmentation algorithm that accounts forboth proximity and topological consistency proceeds as follows (Fig. 4.9):\u2022 We traverse all mini-bones in near-to-far depth order (Fig. 4.9(a)).The rationale for the ordering is that shallower bones, closer to theviewer, have priority over deeper bones in associating with visible con-tours as a consequence of the 2D to 3D proximity linkage.\u2022 For each joint of a mini-bone we compute two joint bisector rays in 2D,one on each side of the joint as described in Section 4.3, and associateeach ray with the first intersecting contour segment that has not yetbeen mapped to a shallower bone. The ray intersections (from a singlejoint for a terminal mini-bone, or from two joints on the same sideof internal mini-bones) demarcate contour segments that are mappedto the mini-bone. Joint bisector ray intersections for deeper bonessegment and associate with the closest intersecting contour segmentthat has not already been mapped to a shallower bone. The orangebone for example, does not associate with the tip of thumb since thistip is already mapped to the shallower blue bone in Figure 4.9(b).\u2022 Once all the mini-bones for a sequence of bones connected via valencetwo joints have been traversed (or an individual bone if it has no va-lence two joints), we evaluate the contour segments associated withthese mini-bones for Gestalt continuity (Section 4.3).\u2022 If erroneously mapped contour segments are detected, we disassociatethem from their current bones, forbid them from being associated tothese bones in the future, and restart the algorithm. In Figure 4.9(b),544.3. Part Segmentationonce the incorrectly associated segment on the right side of the handis found, we restart the algorithm and prohibit the ring finger fromassociating with that segment. In the next iteration we generate theconfiguration in Figure 4.9(c). Similarly, a new incorrect segment forthe pinkie bone is found and the algorithm is restarted (Fig. 4.9(d)).Finally, we finish with the correct assignment in Figure 4.9(e).Mini-joint bisector raysStrictly speaking the joint bisectors for internal mini-bones are simply thetwo opposing directions orthogonal to the bone in 2D (blue in the inset).For mini-bones close to the end-joints of an original bone, such orthog-onal internal bisectors are likely to intersect the joint bisectors emanat-ing from these end-joints before reaching the contours resulting in over-lapping segments which would need to be fixed later on (Section 4.3).1\/31\/3To reduce the number of subsequent fixes we preemptivelyrotate the internal bisectors. Specifically, we split the boneinto thirds; the joint bisectors of the mini-bones in the mid-dle third are left orthogonal to the bone, while at both endsof the bone we set the internal bisector angle to smoothlychange from orthogonal to aligned with the end-joint bisec-tor (see inset).Evaluating Segment ContinuityFor each pair of rays bounding a mini-bone, or sequence of mini-bones,we evaluate whether the mini-bone joint assignments are consistent withthe Gestalt continuity principle by testing if their associated contours areperceived as a continuation of one another. We consider all the possiblescenarios enumerated in Fig. 4.10:A. In the most common scenario where both rays intersect the same con-tour segment (Fig. 4.10(a)) this contour is clearly continuous.B. If neither ray is associated with a contour intersection (Fig. 4.10(b))we similarly deem the assignments as consistent; this case suggests thatthe contour segment associated with the mini-bone chain between them isoccluded.C. In more rare cases, the two rays intersect different contour segments im-mediately next to a shared T-junction (Fig. 4.10(c)).554.3. Part Segmentation(a) (b) (c) (d) (e)Figure 4.10: Possible scenarios of contour intersections (filled circles) forrays bounding a mini-bone. Empty circle means the ray has no associatedcontour intersection.behindin frontThis scenario is consistent with a local occlu-sion (see inset). To associate each mini-bonewith a single contour, we move the intersectionpoint on the occluded contour (see inset) to theT-junction. The current mini-bone is now asso-ciated only with the occluding contour.D. In the fourth scenario, the two rays may intersect different contoursegments while not next to a common T-junction (Fig. 4.10(d)).This is the first scenario where Gestalt continuity needs tobe taken into account to decide if the assignment is topologicallyconsistent. According to perception studies [55], more than 90%of viewers visually connect disconnected curve segments into asingle contour if the angles between the segments and a straightline connecting their end-points (see inset) are less than 18\u25e6. Weemploy this test as-is to evaluate Gestalt continuity for pairs of ray-contourintersections along different contours. If the two contours are deemed dis-continuous, we assume that the ray intersection, or contour assignment, thatis closest to the bone in 2D is more likely to be correct, and disassociate thefarther away contour segment.E. One ray intersects a contour segment and the other ray has no associ-ated intersection (Fig. 4.10(e)). Here we test whether Gestalt continuity issatisfied across a sequence of mini-bones that have no associated intersec-tions due to occlusion using the same test as above.564.4. Canvas ModelingRotating intersecting raysSimilar to the occlusion free scenario, if two rays intersect prior to intersect-ing the same contour curve, they conceptually create overlapping segments.Thus, to preserve consistency we rotate them to flip intersection order. Weapply the same rule to rays intersecting disjoint but Gestalt continuouscurves, using the criterion above to determine continuity.Once the distance and continuity driven segmentation is complete, welocally slide the boundary points on their associated curves towards localcurvature extrema. Whenever a section of a contour remains unmapped,we split it between the closest adjacent mapped segments. In our exper-iments the resulting contour segmentations agreed with viewer intent (seeSection 4.5.3), and we never observed an entire curve left unassigned.4.4 Canvas ModelingA canonical surface of revolution can be computed analytically from its 3Daxis of revolution and its 2D contours (Fig. 2.2 (top)) by first positioning thecontours in 3D by leveraging rotational symmetry at all contour points, andthen defining the surface by setting the radius of revolution at each point onthe axis to the orthogonal distance from the axis to the 3D contours [137].In this scenario, the part segmentation computed in Section 4.3 would besufficient to precisely define a 3D canvas for contour drawings that depictcanonical surfaces of revolution around corresponding bones of the input 3Dskeleton. Unfortunately, character body parts are rarely perfectly symmet-ric. Furthermore, our input, artist-provided 3D skeletons are typically onlya coarse piece-wise linear approximation of a geometrically centered exactcurve-skeleton [29] of the target character surface (Fig. 4.11).To recover a plausible canvas surface despite inexact skeleton posing andimperfect part symmetry we use a three-step process. We first compute a3D curve-skeleton which is close to the artist defined straight-line one, butwell-centered with respect to the drawn contours (Section 5.1). We then usecontinuity along contour curves to determine the canvas connectivity acrossinput skeleton joints, and construct a quad dominant mesh to represent thecanvas (Section 5.2). Finally, we compute the optimal 3D vertex positionsacross the canvas (Section 5.3), balancing rotational part symmetry withrespect to the curve skeleton against contour conformity and persistence.574.4. Canvas Modeling4.4.1 Computing a 3D Curve-Skeleton and 3D ContoursWe define the curve skeleton to have the same topology as its straight-line counterpart, and aim to position each branch so that it is maximallycentered with respect to the contours of its corresponding body part. Weinitialize the curve skeleton by evenly sampling the straight-line skeleton,adding samples along the continuation of terminal bones until the pointwhere that continuation\u2019s projection into 2D space intersects with a drawncontour, to support surface formation in these areas. Each curve skeletonvertex ot is associated with a planar trajectory t with the vertex serving as itsorigin. We simultaneously compute the positions of both the curve skeletonvertices and the right and left contour points on their trajectories, balancingcontour symmetry with respect to the 3D curve-skeleton, similarity betweenthe curve- and straight-line skeletons, and 3D contour smoothness subjectto input conformity (Fig. 4.11).Symmetry In our computation we seek three-fold symmetry. First, weaim for left and right contour curves to be maximally mirror symmetricaround the curve skeleton.trajectorycontoursotntit tcRtnL tnRGiven a planar trajectory with center ot and nor-mal nt that intersects the 3D contours at points cLtand cRt , mirror symmetry is satisfied if the contourpoints are symmetric around the plane (with planenormal it) containing the axis of revolution ((ot, nt)and the view direction (z-axis) as shown in the in-set. We also seek local front-back symmetry at eachcontour point expressed as an expectation for thesurface normal along the contour to be inside theplane spanned by the local axis of revolution and the contour point. Fi-nally, to optimize the rotational symmetry of the surface profiles connectingadjacent trajectories we expect the lines connecting adjacent trajectory ori-gins to be aligned with their respective normals. The combined symmetricenergy is formulated as,Es =\u2211t\u2016(cLt \u2212 ot) \u00b7 it + (cRt \u2212 ot) \u00b7 it\u20162 +(nLt \u00b7 (nt \u00d7 (cLt \u2212 ot)))2+(nRt \u00b7 (nt \u00d7 (cRt \u2212 ot)))2+\u2211(t\u2032,t)\u2016(ot\u2032 \u2212 ot)\u00d7 (nt + nt\u2032)\/2\u20162 (4.1)584.4. Canvas Modeling(b) (c)(a) (d)yxyzFigure 4.11: Curve skeleton computation: (a) user posed straight-line skele-ton with the initial trajectory centers and their corresponding trajectorycontour points marked; (b,c) front and side views of curve skeleton and 3Dcontours; (d) final surface with contours highlighted.where it = nt \u00d7 (0, 0, 1), and the last term\u2019s summation index (t\u2032, t) repre-sents all adjacent pairs of trajectories. The first term expresses the mirrorsymmetry between contours; the next two express the local front\/back sym-metry at each contour; and the last term encodes origin alignment. Sincethis term is direction invariant, we explicitly constrain the lines connectingpairs of adjacent trajectory origins to have the same orientation as the nor-mals (ot\u2032 \u2212 ot) \u00b7 (nt + nt\u2032) > 0 with consistently oriented nt and nt\u2032 . Lastlyto ensure trajectory planarity we enforce(cLt \u2212 ot) \u00b7 nt = (cRt \u2212 ot) \u00b7 nt = 0. (4.2)Skeleton similarity Since we expect the artist skeleton to approximatethe target curve skeleton shape, we minimize the distance between the jointsjc and jl on the two skeletons,Ec =\u2211j\u2016jc \u2212 jl\u20162. (4.3)Contour depth Finally, we minimize depth change along contours,Ez =\u2211(cst ,cst\u2032 )(cst .z \u2212 cst\u2032 .z)2 (4.4)where cst and cst\u2032 , s \u2208 {L,R} are consecutive points on the same contourcurve. This term is most important at joints, where it communicates depthinformation between adjacent body parts.In the combined energy functional, symmetry and skeleton similarity areassigned unit weights, while contour depth is assigned a smaller weight of0.1:E = Es + Ec + 0.1Ez. (4.5)594.4. Canvas ModelingTrajectory Normal Computation Simply including the trajectory nor-mals nt as unknowns in Equation 4.5 results in a highly nonlinear for-mula that is challenging to optimize efficiently. We therefore reduce theenergy complexity to a simple quadratic formulation by independently pre-computing these normals. In general we expect trajectory normals to beclose to the directions of the straight-skeleton bones that the trajectoriesare originally associated with. On a curved skeleton, however, we expectthese directions to change smoothly at valence 2 joints. We use the segmen-tation to determine the best transition angle by considering whether the jointhas visible segment boundaries associated with it (see in-set). If so, we rotate the axis at the curve-skeleton verticesclosest to the joint so that the plane will intersect the con-tour just next to the boundary point. If both boundariesare visible we use an average rotation to best fit both,while if no boundary is visible we rotate the axis to therelevant joint bisector. We then smoothly propagate therotation along the bones. Note that those rotations maydiffer from the joint bisector, shown as a red dashed line in the inset.Contour-Skeleton Matching To account for input contour shape, weneed to match the curve-skeleton vertices of each bone with densely sampledpoints on the input contours that we previously associated with this boneduring our segmentation process. Incorporating the search for best skele-ton\/contour correspondences into the curve-skeleton computation is bothchallenging and time consuming. We therefore pre-compute the matches byleveraging the expectation that contour points on each trajectory are mir-ror symmetric around the local trajectory axis. This expectation impliesthat the line connecting such pairs of points should be orthogonal to the 2Dprojection of the local axis. To compute the correspondences for each ini-tial curve-skeleton vertex, we shoot rays left and right orthogonally to localtrajectory axis nt to locate pairs of intersections on contours belonging toopposite sides of the body part. Note that in the presence of occlusions wemay locate only one such intersection, or no intersections at all. These inter-sections are used as the image space locations of the corresponding contourpoints and are fixed throughout the optimization process.We consequently solve for the 3D positions of the curve-skeleton verticesand the depth of their associated contour points using a quadratic solverthat minimizes the combined energy function subject to the equality andinequality constraints above. We then compute the radii rt of each trajectory604.4. Canvas Modelingas the average distance from its origin to its two contour points and use thosein the subsequent canvas mesh computation step.(a) (b) (c)Figure 4.12: Canvas connectivity (a) with close-ups of quad strips betweentrajectories (b) and triangulated terminal trajectories (c).(b)(a) (c)Figure 4.13: Connectivity across joints: (a) visually continuous parts; (b)Discontinuous parts; (c) the top part is deemed continuous with both lowerones, while the two bottom parts are deemed discontinuous since their sharedcontour curve has a cusp between them.4.4.2 Canvas ConnectivityWe represent the canvas using a set of planar, closed vertex cycles, or trajec-tories circling the skeleton, connected by a quad-dominant mesh. (Fig. 4.12).We place cycles around each trajectory center computed in the previouscurve-skeleton computation stage; all cycles have the same number of ver-tices and a consistent circular indexing facilitating explicit angular and axialparameterization of the parts. We then form quad strips between pairs ofadjacent cycles along each skeleton bone placing edges between vertices withsame angular index on both (yellow strip in Fig. 4.12(b)) and triangulatethe last, terminal, cycles at each terminal joint (yellow, Fig. 4.12(c)). Theconnectivity choices at interior joints are determined based on the interac-tion between the drawn outlines of the participating parts. Specifically, for614.4. Canvas Modeling(a) (b) (c) (d) (e)Figure 4.14: Given the input sketch (a), contour persistence indicates thatside view contours (b,c) significantly differing from front-view ones are un-desirable. Viewers similarly do not anticipate extreme foreshortening (d).Our result (e) is persistent with the front view contours.each pair of parts adjacent to a joint we determine if the parts are a contin-uation of one another or not. If two body parts are deemed continuous wefuse their canvas surfaces, placing a quad strip between the part trajectoriesimmediately adjacent to one another across the shared joint (blue strip inFig. 4.12(b)). If a part has no continuous neighbors across an interior joint,its last cycle at the joint is simply triangulated (blue, Fig. 4.12 (c)).Two parts are deemed continuous if their outlines are either adjacentto one another along a single smooth contour curve or are Gestalt contin-uous (Fig. 4.13). We deem a contour curve smooth if it has no cusp atthe boundary between the two outlines. This smoothness requirement ismotivated by the observation that artists frequently omit drawing small T-junctions, connecting what in 3D should be separate contours into a single,albeit non-smooth one (Fig. 4.13(c)). Our joint processing can, by design,lead to non-manifold, as well as self intersecting canvases. If desired, the sur-facing step (Section 4.4.3), which leverages our current canvas connectivity,can be followed by a more complex fusion process similar to [8, 17] resultingin a smooth manifold mesh. However, we found this step unnecessary forthe canvas applications shown in this chapter.4.4.3 Canvas SurfacingThe key step in computing the canvas shape is to position trajectory verticesbalancing the goals of maximally symmetric body parts, contour conformity,and persistence. The remaining vertices, those in the triangulated regionsnext to terminal trajectories, are computed in a post-process which seeks624.4. Canvas Modelingfor smooth canvas geometry overall.We constrain each trajectory t with vertices vt0, . . . , vtn = vt0 to be or-thogonal to the previously computed normal nt,(vi \u2212 vi\u22121) \u00b7 nt = 0 i = 1, . . . , n.Part Symmetry To maximize part symmetry we seek canvas trajectorieswhich are as circular as possible and aim for profiles connecting consecutivetrajectories along each bone to have as constant as possible angle of revolu-tion, or slope, with respect to each trajectory\u2019s axis. We cast circularity asa quadratic energy term,Ec(t) =\u2211i(vti \u2212 (vti\u22121 + vti+1)\/2\u2212 \u03b4ti)2 (4.6)where the vectors \u03b4ti are the Laplacian coordinates of the i\u2019th vertex in aplanar circle whose normal and radius are the pre-computed nt and rt. Toaccount for different axes of revolution assigned to different trajectories, weexpress profile symmetry for each trajectory t and a neighboring trajectoryt\u2032 asEp(t, t\u2032) =\u2211i(vti \u2212Mt\u2032,tvt\u2032i \u2212Rn(vti+1 \u2212Mt\u2032,tvt\u2032i+1))2, (4.7)where Rn is a rotation matrix of pi\/n around the axis (ot, nt), and Mt\u2032,t isthe shortest path coordinate transformation aligning the axis (ot\u2032, nt\u2032) with(ot, nt).Conformity We want the visible contours of the canvas to match theartist drawn ones. To achieve this, the contour vertices on the final trajec-tories, i.e. those whose normals are in the view plane, must coincide in 2Dwith the previously computed trajectory contour points ct. While we donot know the final trajectory shape, we assume that this shape will remainclose to the ideal circular one; we therefore select the left and right verticeswhose normals on these ideal trajectories are most orthogonal to the viewdirection as the potential contour vertices. For each such vertex vt, if amatching (left or right) trajectory contour point ct exists we force their 2Dlocations to coincide,vt.x = ct.x and vt.y = ct.y.634.4. Canvas Modelingfrontotot\u2019 v t\u2019^v t^side ot\u2019otv t\u2019^v t^v t\u2019pt\u2019pt\u2019Figure 4.15: We constrain the profile angle to the range between the idealprofile slope given by the two ring radii and the axis direction.Persistence Previous work has relied upon part symmetry and contourconformity alone when attempting to recover 3D models from characterdrawings. This produces intuitive individual part geometries, and plausibletransitions between both discontinuous parts and those deemed continuousalong both side contours; however, it also generates sharp depth discontinu-ities, contradicting viewer perception, between parts classified as continuousalong only one side contour, such as a leg and a torso (Fig. 4.14(b)). Thereason for such discontinuities is that in these situations the trajectoriesadjacent across the joints typically have vastly different radii and far apartcenters. Since the artist contours provide no hint of discontinuity, we believethat viewers mentally eliminate them by deforming the parts to bring themcloser together. Moreover, we speculate that viewers expect the charactercontours to maintain their overall drawn shape in alternate views up to in-evitable foreshortening, avoiding the behavior visualized in Figure 4.14(c).This observation is supported by the minimal-variation principle observedby [138]. Following these observations we incorporate persistence into oursetup as follows. When two parts are continuous along only one side con-tour we explicitly minimize the depth variation along quad-strips connectingthese parts,Ed(t, t\u2032) =\u2211i(vti .z \u2212 vt\u2032j(i).z)2 (4.8)where t is the trajectory with the smaller radius and vt\u2032j(i) is the closest vertexto vti in image space, on the larger trajectory. We use vertex positions onperfect circular trajectories with centers ot and ot\u2032 and radii rt and rt\u2032 tocompute these distances. Note that both values vti .z and vt\u2032j(i).z are free,but the correspondences between their vertices j(i) are fixed throughout theoptimization.644.4. Canvas ModelingTo avoid creating discontinuities elsewhere, when two parts are con-tinuous along both side contours, we minimize profile variation along thequad-strip joining them using Equation 4.7. This formulation leverages theslope along the two contours to optimize for depth variation consistent withviewer perception.Lastly, to avoid undesirable derivative discontinuities (Fig. 4.14(c)) any-where across the canvas surface we explicitly constrain the profile angle withrespect to each axis of revolution to the range between the ideal profile slopegiven by the two ring radii and the axis direction (Fig. 4.15),(vt\u2032i \u2212 v\u02c6t\u2032i ) \u00b7 (v\u02c6t\u2032i \u2212 v\u02c6ti) \u2264 0(vt\u2032i \u2212 pt\u2032i ) \u00b7 (v\u02c6t\u2032i \u2212 v\u02c6ti) \u2265 0Here v\u02c6ti are the positions of the corresponding cycle vertices vti on an idealcircular trajectory, and pt\u2032i = v\u02c6ti + ot\u2032 \u2212 ot. In Figure 4.15, for the trajec-tory t\u2032 with an adjacent trajectory t, those two inequalities constrain vertexpositions along t\u2032 to lie within the green ring whose boundaries are derivedfrom the contour slopes between the pair of trajectories t and t\u2032.Given the terms above we proceed to optimize symmetry and persistenceat joints subject to the trajectory planarity, conformity and profile slopeconstraints listed above:E =\u2211tw(rt)Ec(t) +\u2211(t,t\u2032)\u2208B\\JEp(t, t\u2032) +\u2211(t,t\u2032)\u2208JEd(t, t\u2032), (4.9)Here B is the set of pairs of canvas trajectories connected by a quad strip andJ is the subset of such pairs with only one-sided contour continuity acrossjoints. To promote the preservation of smaller trajectories, where even asmall absolute error introduces large deviation from the ideal circular shape(Fig. 4.14(d)) we introduce per-trajectory weights w(rt) = 25e\u2212(rt\/2\u03c3)2 with\u03c3 set to one third of the average trajectory radius. All other terms in thefunctional are assigned unit weights. To avoid depth ambiguity, we fix thez coordinate of one vertex. We use a quadratic programming package [50]to obtain the desired minimizer.The resulting canvas is smoothed using standard Laplacian smoothing,while weakly holding the positions of contour vertices to eliminate localartifacts that can emerge due to imperfections in the input contours andsmall surface discontinuities due to the use of range constraints. To positionthe vertices in the triangulated regions next to terminal bone tips we use asimple Laplacian formulation that enforces tangent continuity with the restof the surface.654.5. Perceptual and Design Validation4.5 Perceptual and Design ValidationWe perform three-fold validation of the key aspects of our algorithm: weevaluate artist ability to provide the desired inputs, compare our results toground truth and artist drawings, and validate our segmentation algorithmvia an informal evaluation.4.5.1 Creating Overlaid 3D SkeletonsCurrent animation practice uses 2D character drawings, such as those usedas inputs to our system (e.g. Fig. 4.1), as a visual reference to manuallyauthor a 3D character model in a symmetric canonical pose [83]. A 3Dskeletal structure is then interactively created and positioned within this 3Dmodel. Our workflow expects animators to effectively create a 3D skeletonwithout an explicit 3D model, and pose it directly over a 2D characterdrawing.To ensure the viability of our workflow, we asked three Maya animatorsto create 3D skeletons over two ground truth drawings (Fig. 4.16). Two ani-mators (purple and maroon in Fig. 4.16) first created a 2D skeleton overlaidon the drawing and then re-positioned joints in an alternate view to get adesired 3D pose. One (purple) further used a measurement tool to com-pare symmetric parts and then further moved joints in 3D in an attempt toequalize the lengths of symmetric parts. These skeletons show a discrepancyin the average 3D length of symmetric parts (8% avg., 14% max. for purpleand 19% avg., 33% max. for maroon) in Fig. 4.16, c.The third animator (blue) first used the drawing simply as a visual ref-erence, to create a symmetric, canonical skeleton and roughly pose it in3D. This 3D skeleton was then moved onto the drawing and the pose re-fined by rotations and symmetric scaling of parts, to satisfactorily overlaythe skeleton in 2D on the drawing. We described this workflow to anima-tor #2 (maroon), who concurred that despite the natural tendency to firstoversketch a 2D skeleton on the drawing, a canonical 3D skeleton allowedanimators better control over symmetry and part proportion. The brownskeleton in Figure 4.16(c) was easily created by animator #2 using thisworkflow.All animators took between 5-10 minutes to create and pose these qual-itatively similar skeletons in 3D. The above exploration gives us confidencethat animators imagine the 3D pose of 2D character drawings consistentlyand with practice can capture this pose with a 3D skeleton, overlaid directlyon a 2D drawing.664.5. Perceptual and Design Validation14%33%(c)animator1 animator2 animator3 ground truth31\u00b046\u00b044\u00b040\u00b0(b)(a)30\u00b024\u00b032\u00b024\u00b0Figure 4.16: Ground truth (green) and 3D skeletons created by 3 animatorsoverlaid on two ground truth 2D character drawings (a), (b), also shown froman alternate view overlaid on the ground truth 3D canvas. The skeletonsin (b) shown individually (c). The purple and maroon skeletons, createdby manipulating an overlaid 2D skeleton have differences in 3D limb lengthbetween symmetric limbs. The maximum difference for each skeleton, 14%and 33%, is marked on the longer limb. The brown skeleton was created byanimator #2 mimicking the workflow of animator #3. The angular deviationbetween the corresponding bones on the ground truth and artist skeletonsis dominated by control bones (hips and shoulders) which have no impacton the result geometry. The maximal deviations without (and with) controlbones are: 24\u25e6 (31\u25e6) for the purple skeleton, 24\u25e6 (46\u25e6) maroon, 32\u25e6 (44\u25e6)brown , and 30\u25e6 (40\u25e6) blue. Average angle differences are 13\u25e6, 15\u25e6, 15\u25e6, and18\u25e6 respectively.674.5. Perceptual and Design ValidationFigure 4.17: Comparing our results to ground truth data: Left to right:contours and skeletons of ground truth (GT) models; GT (blue) and our(yellow) models rendered from alternate views.front sidefrontsidefrontsideFigure 4.18: Left: Given the same input sketch, small variations in skeletonposing (green and purple Figure in 4.16) lead to minor changes in charactershape. Right: significant change in bone slope and location for a symmetriccontour leads to larger shape difference.684.5. Perceptual and Design ValidationRobustness to Input Variation We also examined the impact of usingdifferent artist skeletons on the canvases created by our system (Fig. 4.18).As demonstrated, while the character pose predictably changes with changesin bone posing, the body part shape remains largely fixed, thanks to ourrobust curved-skeleton computation stage. The invariance to minor posingchanges is important, since artists are unlikely to pose a skeleton perfectly.The shape change is most pronounced (Fig. 4.18 (right)) when a bone fora perfectly symmetric surface of revolution is significantly misaligned com-pared to the expected axis. Such misplacement is easy to spot and fix.Overall, as long as the depth ordering of the bones is correct, the intrinsicgeometry of our results changes only marginally with changes in 3D skele-ton posing. In particular angle and depth changes (in this example we havebones orientations vary by up to 30\u25e6) cause only small difference in theresults. The output is more dependent on the image space skeleton posi-tioning, and in particular on how off-center the skeleton is with respect tothe drawing. Artists can easily center skeletons in 2D.4.5.2 Comparison to Ground Truth and Artist DrawingsTo validate our method, we compare the canvases created by our algorithmto ground-truth canvases for given contours and to alternate view drawingscreated by an artist given the same input as our method. To perform theground truth comparison, we had an artist create and rig four 3D models(Fig. 4.17). We then used contour only renders of these models and the artistskeletons as input to our method. The resulting canvases are extremelysimilar to the original models, validating our design choices.We further validated the perceptual correctness of our framework bycomparing these results to artist generated drawings of the input sketchedcharacters in alternate views (Fig. 4.19). We provided another artist withour input drawings with the skeleton overlaid, a 2D view of the skeletonin the desired output pose, and a 3D posed skeleton which allowed theartist to better relate the two poses. We then asked him to redraw theinput character matching the skeleton pose. The results were qualitativelyvery similar, though the artist\u2019s characters tended to be leaner than ourinterpretation.4.5.3 Perceived Contour SegmentationTo evaluate consistency across viewers and compare our algorithm withviewer perception, we asked 12 viewers (from various backgrounds, 10 with694.5. Perceptual and Design Validation(a) Artist drawings in novel views (b) Our resultsFigure 4.19: Comparison of our results (b) to sketches produced by artists(a) for the same view and pose.User overlay Our resultUser overlay Our resultFigure 4.20: Overlaid user segmentations (left) for both the elephant andthe scientist are qualitatively similar to the algorithmic results (right).704.5. Perceptual and Design ValidationFigure 4.21: Canvases and alternate view renders generated using our systemfrom the inputs on the right.Figure 4.22: A variety of re-posed renders generated automatically from theinputs on the right.no experience of 3D modeling, 8 females and 4 males) to hand segment thecontours on four simple and five complex drawings and associate each seg-ment with bones of an overlaid 2D skeleton. The full text and the resultsof the evaluation can be found at http:\/\/cs.ubc.ca\/~bmpix. We choseto distinguish joints by color as numbered labels for skeletons with dozensof bones were visually confusing. None of the users remarked that colorbased segment association was problematic as a task. Fig. 4.20 summarizesthe resulting segmentations on two complex inputs, with various user seg-mentations overlaid to visualize correlations across viewers. While viewershad less information than our algorithm (a 2D rather than 3D skeleton),their segmentations are largely consistent and match well our algorithmicsegmentation. We thus believe that our 3D character canvas is built on arobust and perceptually meaningful contour segmentation algorithm.714.6. ResultsFigure 4.23: The explicit cylindrical parameterization of our canvases allowsfor a range of advanced texturing effects, hard to achieve without a anunderlying 3D structure.4.6 ResultsWe demonstrate the results of our character canvas modeling frameworkthroughout this chapter. We show both the actual canvases created by themethod (Fig. 4.3, 4.21), as well as a range of NPR renders created usingthese canvases from different view directions in both the input and alter-nate poses (see Fig. 4.1, 4.22). The rendering examples include significantchanges in contour topology compared to the input view, which cannot beachieved purely by 2D control (e.g. see back view of the catwoman Fig. 4.1).Using our canvases, with their built-in cylindrical parameterization, one caneasily apply advanced rendering techniques such as fur or feathers simula-tion (Fig. 4.23), enabling artists to generate 3D effects without resorting tocomplex 3D modeling tools.One of the main technical challenges, addressed by our method, andshowcased by these examples is correct resolution of inter-part occlusions.Not only does it enable artists to draw characters in natural rather thanartificial canonical poses, but it enables them to draw characters whoseanatomy does not allow for such occlusion-free pose, e.g. one simply cannotdraw a quadropus (Fig. 4.22) with both the head and all four legs fullyvisible. Other such examples include the fox and anteater (Fig. 4.21, 4.23).Workflow The inputs we evaluated our framework on were created usingtwo workflows motivated by different target applications. In the first one, anartist created a set of sketches, e.g. catwoman or elephant and then fitted a724.6. Resultsskeleton to those using Maya or other animation software (see Section 4.5.1).This framework is best suited for creating new cartoon art and bringing tolife legacy characters, where a drawing of the character already exists.In the second workflow, artists created and pose a 3D skeleton first,and use it as an armature over which to draw character contours from aninteresting viewpoint (fox, anteater, quadropus). This approach is partic-ularly useful in animation setups where artists already have a skeletal ani-mation sequence they want to adapt to a new character. The accompany-ing video shows several animation sequences, each generated from a singleframe, created using this workflow. The amount of work required to gen-erate these animations was drastically lower than using the traditional 2Danimation workflow, where key-frames describing out-of-plane motion aretypically drawn by hand.Global Symmetry Besides local symmetry which is used throughout thealgorithm, characters frequently exhibit left-right global symmetries, whichviewers use to complete occluded body part geometry. We employ thisprinciple in two examples to recover fully occluded geometry (elephant) orcorrect for inaccurate artist drawing (fox) by enforcing similar trajectoryshape for matching trajectories on symmetric joints.Impact of Design Choices Figure 4.14 demonstrates the importance ofour design choices when surfacing the canvas. Not accounting for persis-tence at joints (Fig. 4.14(b)) results in unexpected surface discontinuities.Locally minimizing depth variation (Fig. 4.14(c)) is similarly insufficient.Our framework (Fig. 4.14(e)) which constrains profile slope and minimizesforeshortening produces more natural results.Parameters and Runtimes Our method has no tunable parameters.For canvas modeling we use thirty vertices per trajectory and have uniformtrajectory density across all bones; the density is determined so as to have atleast ten trajectories along the shortest bone, and to have the distance be-tween consecutive trajectories be no more than one percent of the character\u2019sbounding box diagonal. Our software takes between ten to sixty seconds togenerate a canvas on an Intel Core i7 machine with 32GB of RAM. Roughly25% of this time is spent in the segmentation stage and the rest is spent bythe QP solver computing the canvas surface. This fast turnaround allowsartists to quickly repose the skeleton or update the drawing were they tofind the results inadequate.734.6. Results(a) (b) (c) (d)Figure 4.24: Given a single drawing and a posed skeleton we generatequalitatively similar results (b,d) to those created by multi-drawing sys-tems which employ manually specified curve correspondences between drawncurves: [106] (a) and [74] (c).Comparison to Prior Art Figure 4.24 highlights our ability to generatemodels of equal complexity to those generated by multi-view approachessuch as [74, 106], without the need for multiple corresponding drawings.We performed this comparison by using one of the input views utilized bythese prior systems, tracing 2D curves over it as our sketch input and posingcorresponding skeletons. Our method employs significantly less user inputthan Levy et al, who require at least three corresponding drawings eachwith an appropriately posed skeleton. While Rivers et al. do not require askeleton, they still expect at least three drawings with correspondences andcannot articulate the results.We successfully handle a much wider range of sketches than previousmethods, most of which, e.g. [20] can handle only occlusion free inputs.While Cordier et al. [28] support partial occlusions, they assume perfectrigid mirror symmetry, and expect every part silhouette to be drawn as aseparate curve. Karpenko and Hughes [68] make a similar curve planarityassumption. Our framework successfully handles complex occlusions, in-cluding scenarios deemed ambiguous by previous methods (e.g. elephant inFig. 4.21, see [68]); does not require posing symmetry (e.g. see the mad sci-entist) nor separate part outlines (e.g. see hind side of the fox), and plausiblyrecovers non-planar contours (see Fig. 2.2). As demonstrated by Figure 4.4,our shape computation which aims to maximize simplicity, generates resultsmore consistent with user expectation than inflation based frameworks suchas [68, 95]. By accounting for persistence (Fig. 4.14) our method avoidsdepth discontinuities at complex joints bound to show up when parts areassumed to have perfect rotational symmetry [20, 28].Qualitative Evaluation. We asked six computer artists to provide visualcritique of our outputs (catwoman, elephant, quadropus) by showing themthe input drawings and the output renders (see Figure 4.25), and askingthem if our results represent the same character as the input drawing. All744.6. ResultsFigure 4.25: An example of the qualitative evaluation questionnaire.six agreed that our results faithfully capture the original input in new posesand views and expressed strong interest in using our system in their work.Limitations and Improvements. Like human observers, our method\u2019sability to predict the shape of a character is inherently limited by the de-scriptive power of the input drawings, and our algorithm can be misled bybadly posed or obfuscated drawings. For example, faced with an obliqueview of a bird\u2019s wings, neither viewers nor our method can guess their depthwithout resorting to prior knowledge of bird anatomy (Fig. 4.26(a)). Sinceselecting a single view where all character body parts are well described cansometimes be challenging, we provide users with an incremental, overdrawinterface. In this interface, users can first generate a character model froma single view, and then update the canvas from another view using contouroverdrawing framework that follows [96] (Fig. 4.26(a)).While our method is robust against minor inaccuracies in the input skele-ton, major errors in skeleton depth placement may clearly cause undesirableartifacts such as intersections between body parts. We did not encountersuch situations on the tested inputs. We believe that the simpler solutionwould be for the artist to adjust the skeleton, if and when they find theresult unsatisfactory, and rerun the algorithm. However if desired, one canincorporate additional non-intersection constraints into the optimization inEquation 4.9, or fix the self-intersections as a post-process step once thecanvas is generated. Regardless, we are still dependent on the ability of theartist to pose a skeleton with respect to a cartoon drawing in a manner thatavoids intersection between body parts.A fundamental premise of our work is that the 3D canvas is a collec-754.6. Results***(a) (b)(c)Figure 4.26: Our ability to plausibly recover character shape is limited bythe descriptive power of the inputs. Without cues to the contrary we gen-erate round bird wings, instead of anatomically correct ones (a). Since weuse a standard mesh representation, the canvas can be easily edited to cor-rect the wings or add extra features (beak) using standard tools (a, right).Geometries not-well represented by generalized surfaces of revolution, suchas loose clothing (b, pink cape) must be modeled by other means. Whilesome fine details can be captured by using skeleton refinement (c), alternateediting tools are likely to achieve this goal faster.tion of generalized surfaces of revolution parts, each part being defined by abone of the input 3D skeleton. Surface detail for a 3D canvas that stronglydeviates from this premise, like cloth folds with internal occluding contours(Fig. 4.26(b)) are thus not captured by our approach. While the hair spikesof (Fig. 4.26(c)) can be constructed using surface of revolution parts, it isunlikely that artists would provide the necessary definition for each hairspike with a bone on the input 3D skeleton. Thus while our system is wellsuited for canvas creation, artists should combine it with other mesh-editingtools to generate detailed, dressed, characters. Some cartoon charactersmay have elements which are designed to consistently face towards the cam-era regardless of the viewer position (cartoon eyes, or the ears of a cartoonmouse); we do not attempt to recover these features from the input sketch.In a production environment such features are best implemented using bill-board vector elements. In general, realistic cartoon drawings combine a mixof strokes that define a 3D canvas, view-dependent 3D geometry, and 3Ddetail drawn on and around the surface of the 3D canvas [113]. We have764.7. Conclusionsfocused on simplified cartoon drawings where the strokes strictly comprise acharacter canvas. The classification of strokes of arbitrary cartoon drawingsas described, and their 3D reconstruction, is subject to future work.4.7 ConclusionsWe presented the first, to our knowledge, system for 3D character canvasmodeling from a single naturally-posed character drawing and overlaid 3Dskeleton. We can process input with complex inter-part occlusions and largevariations in contour depth. As demonstrated, our output 3D geometry isappropriate for use as an animation canvas: facilitating non-trivial reposingand large viewpoint changes of complex characters, that remain consistentwith the input drawing, and enabling non-photorealistic animation usingpainterly strokes on and around the canvas.Our work is aligned with a recent trend to simultaneously model 3Dcharacter geometry and its corresponding skeleton [8, 17]. While we havefocused on 3D proxy geometry creation from minimal input in the formof drawn contours, our coupling of 3D skeleton and input drawings usinga perceptual framework is extensible. In the future we expect that ouralgorithmic approach, adapted to richer input drawings, embellished withinternal contours, construction lines and shading, will result in fully detailedand complex 3D character models.77Chapter 5Gesture3D: Posing 3DCharacter via a GestureDrawing5.1 IntroductionIn this chapter we introduce a novel system to pose rigged 3D charactersvia a gesture drawing. A variant of this chapter has been submitted toSIGGRAPH ASIA 2016.Gesture drawings - rough, yet expressive, contour drawings of posedcharacters (Figure 5.1b,e) - are routinely used by artists to quickly conveythe action, form, and pose of a character figure [12, 53, 97]. Artists aretrained to create descriptive gesture drawings which unambiguously conveya character\u2019s pose in just a few minutes [73], and use them ubiquitously whenconceiving character poses and motion key-frames for storyboarding. In dig-ital media production, artists subsequently apply these envisioned poses to3D character models. In current practice, posing is performed separately,using the drawings as a reference only, and requires additional, often sig-nificant, user interaction (Section 5.2). We seamlessly connect the ideationand modeling steps by introducing the first method for 3D character pos-ing which poses the characters algorithmically using gesture drawings as(a) (b) (e)(d)(c) (f ) (g)Figure 5.1: Gesture3D: gesture drawings (b,e) of an input character model(a); estimated 2D skeleton projections (c,f) and new poses automaticallycomputed from the drawings (d,g).785.1. Introductioninput, allowing artists to directly communicate their ideas using drawingsand sidestepping the mental overhead of interacting with a complex softwareinterface. As demonstrated, our method plausibly poses 3D characters usingquickly generated, rough, vectorized gesture drawings and rigged charactermodels, provided in a neutral bind pose, as the only inputs. It successfullyhandles complex poses with varying and significant part foreshortening, oc-clusions, and drawing inaccuracies (Figure 5.1).The advantage of gesture drawings over other types of 2D inputs ex-plored by previous posing approaches (Section 5.2) is the lack of perceptualambiguity. Unlike stick-figures, lines of action, and outer silhouettes (Fig-ure 5.2), gesture drawings allow artists to unambiguously convey poses tohuman observers. By identifying and leveraging the perceptual pose cuesused by artists when creating these drawings, we are able to automaticallyrecover character poses that are consistent with artist intent.Our framework centers around analysis of the stroke curves forming thegesture drawings (Section 5.4). Like many other line drawings, gesture draw-ings are dominated by contour curves, conveying the occlusion contours ofthe depicted characters. However, since gesture drawings focus on conveyingpose rather than shape, they typically only depict approximate, abstracted,character anatomy. In particular, artists typically use simple low-curvaturestroke segments to outline body parts and use higher-curvature sections todepict their connecting joints [53]. These high-curvature anatomical land-marks assist observers in parsing the drawings. The abstracted contourstrokes of a gesture drawing are designed to convey largely smooth 3D char-acter geometry. As observed in the previous chapter, in such scenarios thecontours of both individual body parts and part chains are usually continu-ous; thus adjacent contour stroke segments always outline adjacent bodyparts, and adjacent body part outlines are typically depicted using oneshared stroke, or multiple Gestalt continuous [69] strokes. We also observethat body part contours are consistently oriented with respect to the parts\u2019skeletal bone and rarely cross the bone\u2019s 2D projection. Combined together,these three contour consistency cues allows observers to identify poses withglobally consistent joint and bone locations.Generic projected contours allow multiple depth interpretations, thusartists are trained to use drawing cues to reduce ambiguity. When estimatingdepth from 2D drawings, viewers prefer less foreshortened interpretations ofthe observed shapes, thus to best convey the intended poses artists seekto select viewpoints with smaller foreshortening [57]. We also observe thatin gesture drawings artists prominently use local, suggestive, occlusions toconvey changes in depth and to specify depth order between adjacent joints.795.1. IntroductionGestalt psychology [5, 69] points to a persistent viewer preference for simpledrawing interpretations. In the context of gesture drawings, we believethat viewers use two types of simplicity cues: in cases where drawings areambiguous, viewers prefer more natural poses, or ones with angles closer tothose in the input bind pose; as studied in the previous chapter, viewersalso visually complete hidden body parts and correct drawing inaccuraciesby using regularity cues, such as pose symmetry. Finally, we note that whilehuman observers can clearly parse professional gesture drawings, reliance onthe drawing to accurately depict character proportions and projected jointlocations must be qualified: drawings are typically inexact, as even expertsdepict foreshortened objects inaccurately [114] and fail to correctly accountfor perspective [138].Overview. We use these observations to pose the input 3D rigged char-acter model into the artist intended pose conveyed by the input gesturedrawing. We first match skeletal elements against corresponding contourstroke segments, placing joints next to their matching contours. We for-mulate joint placement as a discrete 2D embedding that matches joints tocorresponding contour samples and is dominated by contour consistencyand anatomical landmark matching considerations. We then compute thedesired embedding by casting it as a variation of the tree-structured MarkovRandom Field (MRF) problem (Figure 5.9b, Section 5.5). We extend oursolution to 3D by leveraging the depth order implied by occlusion contours,and the observations about viewer preference for simple and less foreshort-ened poses. To overcome drawing inaccuracy, we formulate 3D embeddingas an energy minimization problem which balances landmark-implied 2Djoint placement against the simplicity and foreshortening cues (Figure 5.9d,Section 5.7).Contribution. Our contribution in this chapter is two-fold: we formu-late the properties of effective gesture drawings, bringing together insightsfrom multiple sources in the areas of psychology, art, and computer graph-ics, highlighting key perceptual cues which enable viewers to perceive theartist intended character poses; we then use these observations to intro-duce the first gesture drawing based algorithm for posing 3D characters.Our method enables artists to directly convert their ideated posed charac-ter drawings into 3D character poses, and supports complex drawings withocclusions, variable body part foreshortening, and drawing inaccuracies.Validation. We exhibit a gallery of character poses obtained automat-ically from gesture drawings of a range of 3D characters (Section 5.9) andvalidate our algorithm in a number of ways (Section 5.8). We evaluate ourresults against ground truth data, by first rendering projected contours of805.2. Related Work(a) (b) (c) (d) (e)Figure 5.2: Stick figure drawings (a), lines of action (b), and outer silhou-ettes (c) allow for multiple perceptually valid pose interpretations. (d) Poorview selection results in highly foreshortened contours leading to loss ofpose information (e.g bends on the left arm or the curved spine). Gesturedrawings, consciously drawn from descriptive views (e) effectively conveythe intended pose.posed character models, then using these contours as input to our methodand comparing our results against original poses; we compare our algo-rithm\u2019s results with characters posed by artists given the same drawingsas input; we compare the character-contour correspondences computed byour method against manual annotation by human observers; and we collectqualitative result evaluations by experts and non-experts alike. Finally, wecompare our method against prior work and algorithmic alternatives. Thesevalidations confirm that the poses we compute are consistent with viewerperception and artist intent.5.2 Related Work5.3 Parsing Gesture DrawingsGesture drawings are ubiquitously used by artists to clearly convey complex3D poses.To understand and formulate the properties that make them ef-fective, we combine observations from drawing tutorials, modeling research,and perception studies.Anatomical Landmarks In a typical character drawing, most strokesdepict projected contours, i.e. curves along which the normal to the posedcharacter\u2019s body lies in the image plane. Unlike detailed drawings of geo-815.3. Parsing Gesture Drawingsmetric shapes, gesture drawings focus on depicting pose and motion; hencetheir contour strokes are often highly abstracted and only approximate theshape of the actual 3D contours. We note that gesture drawings employidealized character anatomy, well described by a union of approximatelycylindrical body parts connected by spherical joints [12, 53, 57] (see inset).Figure 5.3: Portion of agesture drawing with an-notated joint (blue) andpart (red) contours.Consequently, contours of body-parts surround-ing skeletal bones are typically dominated by low-curvature lines. In contrast, joint contours inall views are well approximated by circular arcswhose radii are roughly equal to the body radiusaround the joints. These higher curvature jointcontour arcs are most prominent next to bent orterminal (single bone) joints. As a consequenceof this curvature difference, we speculate that hu-mans can easily discern the likely locations ofsuch prominent joints, or anatomical landmarks,in a gesture drawing, and use those to anchor theoverall character pose. Since artists seek to communicate their target pose,they typically select views where multiple anatomical landmarks are visibleand clearly depicted [52]. Clearly not all high-curvature contour segmentscorrespond to joints (see the skirt \u201ccorners\u201d in Figure 5.1); many drawnjoints are not bent and therefore not easy to pinpoint; and multiple jointsmay have the same radii, making them hard to distinguish. Our algorithmicchallenge is to discern the relevant markers on the drawing and to associatethem with their corresponding joints.Figure 5.4: Contour-skeletoncorrespondences, with Gestaltcontinuous contours con-nected by dashed lines.Contour Consistency. As noted by Bess-meltsev et al. [10], absent occlusions a typicalcharacter\u2019s contour is a single closed curve;each body part around a terminal bone (bonewith a terminal joint) is outlined by a singlecontour segment, while parts around interior,or non-terminal, bones define two outline seg-ments, one on each side of the bone; and ad-jacent segments along the contour correspondto adjacent skeletal bones (see inset). In thepresence of occlusions, the Gestalt continuation principle [69] indicates thatviewers complete the drawing by mentally connecting pairs of end-pointsof partially occluded curves (T-junction stems) by invisible contour sec-825.3. Parsing Gesture Drawings(a) (b) (c) (d)Figure 5.5: Implausible bone locations that violate (a) adjacency, (b) orien-tation, or (c) crossing cues; consistent placements (d).tions if they can be smoothly connected (Figure 5.4). In this scenario, theproperties above continue to hold once these invisible contour sections aretaken into account. In this general case, terminal bones correspond to asingle sequence of (one or more) Gestalt continuous curves, and interiorbones correspond to two such sequences - one on the left and one on theright. Adjacent segments along the same contour stroke still correspondto adjacent bones, while bones joined by a valence two joint correspond toeither immediately adjacent, or Gestalt-continuous, left and right contoursegments. In addition to reflecting skeletal adjacencies, body part contoursare consistently oriented with respect to their corresponding skeletal bones- a body\u2019s surface and consequently its contours clearly separate inside fromoutside (Figure 5.5b). Since body mass typically surrounds the bones, con-tours rarely cross 2D bone projections (Figure 5.5c). Viewers are known torely on domain priors when deciphering drawings, and therefore we expectthem to indirectly leverage this set of contour-bone consistency expectationswhen parsing gesture drawing and matching joints to landmarks.Simplicity Previous graphics research (e.g. [138]) had heavily relied oninsights from Gestalt psychology [69] which points to a viewer preferencefor simple or regular drawing interpretations. While some of these works(e.g. [138]) focus on generic regularities such as symmetry or parallelism,others (e.g. [10]) highlight domain-specific simplicity priors. We speculatethat viewers leverage both regularity and naturality when interpreting ges-ture drawings: they choose more likely or natural character poses amongthose consistent with the drawn contours (Figure 5.8), and use regularitycues, particularly symmetry, when presented with different ambiguous in-puts (for instance when mentally completing partially occluded poses, suchas the hands of the character in Figure 5.8, or the fetal pose in Figure 5.14,835.4. Framework Overviewtop row).(a) (b) (c)front view side viewsFigure 5.6: Depth ambigu-ityDepth In general, an infinite number of 3Dgeometries have the same 2D projection. How-ever, for each individual bone of a knownlength, if the 2D positions of its end-joints areknown, the z-difference between the bone end-points is fully determined; what needs to bedetermined is their depth order (see inset).While the simplicity priors discussed aboveoften help viewers to resolve order ambiguities, contours of posed charac-ters taken from a poor view-point (Figure 5.2d) remain ambiguous. Con-sequently, artists are consistently advised to strategically select descriptiveviews [36], and specifically to avoid views with large uneven foreshortening.Our observation of artist-generated gesture drawings suggests that in se-lecting views they also strategically use occlusions to clarify depth ordering,and add suggestive local, intra-part, occlusion contours (see inset) to furtherclarify local depth order.inter-partintra-partFigure 5.7: Oc-clusion types.Inaccuracy Experiments [114] show that even trainedartists fail to correctly draw foreshortened shapes and fre-quently exaggerate perspective scaling effects. As indicatedby prior work on interpreting design sketches [138], viewersare adept at mentally correcting such errors by biasing theenvisioned solutions toward more simple and less foreshort-ened interpretations. In the context of gesture drawings, weobserve that while viewers use landmarks to anchor the en-visioned pose, they mentally tweak the locations of theselandmarks in favor of such simpler pose interpretations.5.4 Framework OverviewThe input to our method is a rigged and skinned character model, in abind pose, and a roughly same scale vectorized gesture drawing. As artiststypically create the gesture drawings using the character as a reference, scalesimilarity is easy for them to satisfy; alternately, manually scaling drawingsgenerated independently from the character model takes seconds for bothexperts and amateurs. As is typical of skeletal posing systems, the poseof a rigged character is fully determined by the positions of its joints. We845.4. Framework Overview(a) (b) (c)Figure 5.8: Less natural (b) and more natural (c) interpretations of a drawnpose (a) (leg bent sideways vs forward).compute the joint positions that best reflect the depicted pose using thefollowing steps (Figure 5.9).Joint-Contour Matching We first match drawn contours against thebody parts they describe, and place a projected character skeleton in theimage plane so that its surrounding body contours roughly align with theirmatched drawn ones. We formulate the matching as a computation of opti-mal joint locations along the contours. As the continuous solution space ofall possible joint locations is too large to operate on efficiently, we discretizethe problem by considering only a finite set of potential joint locations onthe 2D drawing. We associate each possible joint location with an unaryassignment probability derived from our anatomical landmark prior (Sec-tion 5.5.1), and associate binary and ternary probabilities for assignmentsof adjacent pairs and triplets of joints based on consistency, simplicity, andlow foreshortening priors (Section 5.5.3). The resulting discrete optimizationproblem can be cast as a High-Order Tree-Structured Markov Random Field(MRF) problem [70]. We then minimize this combined cost function sub-ject to additional global constraints imposed by the drawing (Section 5.5.4).Adding these constraints makes the general assignment problem NP-hard;however, as we demonstrate, our greedy solution framework works well inpractice (Section 5.5.5).2D Pose Optimization Our discrete solution considers only a finite setof possible joint locations; accordingly while it provides a good estimate ofthe joint locations and joint contour correspondences, the final joint place-855.5. Character-Contour Correspondence(a) (b) (c) (d)Figure 5.9: Overview: (a) algorithm input; (b) discrete 2D joint embedding;(c) optimized 2D embedding; (d) 3D skeleton (color visualizes depth) andposed model.ment may be locally sub-optimal. We consequently use continuous locationoptimization to further improve this solution and compute joint locationsthat best capture the artist intent (Figure 5.9b-c).Full Pose Estimation. We proceed to fully pose the character by as-signing 3D positions to its joints, further adjusting the joint 2D positionswhen necessary. We note that exact 2D joint locations are more sensitiveto artist errors than bone directions and lengths, and consequently rely onthe latter when recovering the full pose. We seek poses that satisfy theordering cues provided by occluding contours in the gesture drawing, andwhich balance preservation of the bone directions and 2D lengths, estimatedfrom the drawing, against our expectations of simplicity and foreshorteningminimization.We formulate joint positioning as a constrained energy minimizationproblem, then obtain the minimum by recasting the energy in term of twistvariables [18] and using a Newton-type solution method that follows theapproach of [43].5.5 Character-Contour CorrespondenceInitialization To evaluate anatomical landmark correspondences, we needto associate a likely contour arc radius for each character joint. To computethe radius we use a variation on the method of Thierry et al. [126] to fita sphere to the region on the character mesh surrounding the joint. Whilemany joints are well approximated by spheres, some parts of a character,such as the palm of the hand, are more elliptical and consequently have a865.5. Character-Contour Correspondence(a) (b) (c)head elbow torsoFigure 5.10: Joint cost visualization. Here the color shows the matchingcost on a scale from red (poor match) to blue (good).range of plausible contour arc radii. Given the extracted mesh region aroundeach joint we therefore use PCA to obtain the maximum and minimum radii,and use a discrete set of joint radii with a step of \u03b5 within this range insubsequent computations. We set \u03b5 to 2% of the drawing bounding box anduse it as the default discretization density throughout the discrete solution.To facilitate the computation and evaluation of contour consistency inthe presence of occlusions, we preprocess the contours to detect Gestaltcontinuous strokes. We use the continuity test described in [10]: given eachpair of strokes, we connect their end-points with a straight line and measurethe angles between this line and the stroke tangents. A pair of strokes isclassified as Gestalt Continuous if both angles are below the 18\u25e6 thresholdidentified in perception literature [55]. For each pair of drawing strokes wetest all four end-point configurations. When strokes are deemed continuouswe retain the connecting line as a Gestalt bridge between them. We considereach pair of strokes connected by a bridge as a single bridged contour.5.5.1 Solution SpaceAs previously noted, artists approximate the contours surrounding joints ascircular arcs centered at the joints whose radius reflects the distance fromthe character joint to the surrounding surface. We therefore expect jointswith visible contours to be located approximately a radius distance awayfrom these contours along the contour normal (Figure 5.10). We use thisobservation to generate potential locations for joints with visible contours.We uniformly sample the input drawing contours at \u03b5-intervals, and treatthe samples as potential tangential contact points for joint circle placement.For each sample point we consider the options of placing the circle on eitherside of its contour, conceptually duplicating all samples into left and right875.5. Character-Contour Correspondenceinstances. We compute potential joint locations by placing each joint alongthe normal to the contour at the sample at an offset equivalent to it\u2019s circleradius (Figure 5.10).Character joints may be entirely occluded (e.g the man\u2019s palms in Fig-ure 5.8). To be able to plausibly place such joints, we sample the boundingbox of the drawing using a regular grid with density equal to \u03b5 and addthese samples to the discrete solution space.5.5.2 Unary Assignment CostWe compute, for each joint, the likelihood that it is placed at each potentiallocation. The grid-based locations are assigned the maximal assignment costof 1 since, absent information to the contrary, we expect contours associatedwith joints to be visible. For tangential locations, we aim to match appro-priate joints to corresponding anatomical landmarks, and hence prioritizeplacements where sections of the contours are well aligned ( in terms of bothlocation and normal) with the joint\u2019s circle. Since non-terminal joints areoften adjacent to multiple contour segments on different sides of the circle(Figure 5.10), our evaluation looks at all contour samples close to the cir-cle and not just those immediately next to the originating tangent sample.Since humans rarely draw perfect circular arcs, we do not expect perfectalignment; to evaluate fit between a joint i and a potential location P ia wetherefore measure the portion of a circle with radius ri centered around Piathat approximately aligns with the contours using simple distance and nor-mal thresholds. Specifically, we uniformly sample the circle and count thepercentage of circle sample points sc that have nearby contour samples swith contour normals ns close to the circle sample point normals:T (P ia) = {sc : |\u2016sc \u2212 s\u2016 < min(\u03b5,ri2) and \u2220(sc \u2212 P ia, ns) < \u03b1}C(i, P ia) = 1\u2212 \u2016T (P ia)\u2016\/N (5.1)Here N is the number of samples on the circle. The angle threshold \u03b1 isset empirically to 15\u25e6. When a contour matching a terminal joint is visiblein the drawing, we expect a non-negligible portion of the contour to closelyalign with the joint\u2019s circle. We found this threshold based solution to workbetter than using a falloff function that depends on how close the contoursare to the circle. We consider terminal joint locations to be reliable if atleast 15% of their osculating cycle is matched by the contours, and assignthe maximal cost of 1 to locations that do not pass this threshold. For each885.5. Character-Contour Correspondencejoint i and a potential assigned location P ia , in addition to the cost we storethe originating contour sample sia and the set of all contour samples Sia thatsatisfy the alignment threshold.Position Consolidation. Near high-curvature regions on the contours,we typically encounter several potential low cost joint locations for a givenjoint which have nearly identical sets of well-aligned contour points. Toreduce the solution space during computation we consolidate these potentialjoint locations into one, selecting the location whose originating sample liesclosest to the stroke\u2019s curvature extremum.5.5.3 Assignment CompatibilityOur compatibility term is designed to promote contour consistency, and toweakly encourage less foreshortened and more natural solutions.Bone Contours. Each pair of position assignments for the end-jointsof a bone indirectly defines the contour segments corresponding to this bone(Figure 5.4). Given a pair of such assignments, we compute the potentialbone contour segments defined by these assignments as follows (see inset).We consider all pairs of well aligned samples, where each sam-ple is associated with a different end-joint. If the two sampleslie on the same contour, or on contours connected via bridges,we associate the contour segment or segment chain betweenthem with the bone. We trim the segments by selecting thetwo samples, one in each joint\u2019s set, that are closest to oneanother along this shared contour as segment end points. Weuse the computed bone segments to assess the compatibilityof the bone\u2019s end-joint assignments. Note that occlusions orpoor assignments may lead to bones with no correspondingcontours.Consistency. We explicitly prohibit inconsistent assignments where a bone\u2019send-joints lie on opposite sides of the bone\u2019s contour, violating our orienta-tion prior. Since a bone is expected to be inside the body part it anchors,it typically should not cross its associated contours. We use a consistencypenalty cost Cc, which is set to 1 if a bone\u2019s 2D projection intersects any ofits associated contour segments, and is 0 otherwise. We use a penalty in-stead of a hard constraint to account for drawing inaccuracies and samplingartifacts.895.5. Character-Contour CorrespondenceWe prefer assignments where bones are associated with at least one, ei-ther simple or bridged, contour segment. Moreover, we aim for adjacentbones to be associated with the same continuous contour. We encode bothpreferences by focusing on the contour associated with the originating sam-ples of the end-joint assigned locations: we leave the consistency cost Ccunchanged if a pair of end-joints of a bone are assigned locations with thesame originating single or bridged contour, and set it to 1 otherwise.(a)(b)Bone Contour Conformity We expect the contour seg-ments associated with bones to have relatively low-curvature(see inset). To evaluate contour conformity, we measure theratio between the length of each bone segment and the Eu-clidean distance between its endpoints:Ccf (i, j) = 1\u2212 e\u2212(1\u2212Lc\/L)\/2\u03c32 ,where Lc is the length of the contour segment and L is theEuclidean distance between its end-points. We empirically set\u03c3 = 4% of the bounding box diagonal. If a bone has multipleassociated contour segments, we repeat the cost computationand, to be conservative, use the lower of the two costs as theconformity cost. If the joints have no shared bone contours,we set the cost to 1.Pose Preferences. We assign a per-bone cost term for each assignmentof its end-joints to a pair of potential positions, based on the differencebetween the bone length and the image-space distance between the two po-sitions. We expect the artist to select views where the drawn body parts,and consequently bone projections, undergo relatively small foreshortening;we therefore weakly penalize foreshortening when it occurs. While real char-acter bones do not stretch, artist drawings can contain errors in characterproportion description. We therefore tolerate assignments where the image-space distance is larger than the respective bone length, but penalize suchassignments with a large penalty cost. The combined cost is:Cl(i, j) ={1\u2212 e\u2212(l\u2032ij\u2212lij)2\/2\u03c32 , if l\u2032ij > lij1\u2212 e\u2212(l\u2032ij\u2212lij)2\/2(\u03c3\/3)2 , otherwise . (5.2)where l\u2032ji = \u2016P ia \u2212 P ja\u2016 and lij is the bone length. We use the same \u03c3 asfor bone conformity. We evaluate the difference between the two lengths905.5. Character-Contour Correspondencerather than their ratio, since ratio-based costs are extremely sensitive toartist errors on short bones.We encode our expectation for simpler, more natural character poses,depicted from a descriptive view, as a preference for 2D joint angles in theoutput pose that are close to their bind pose counterparts:Cn(i, j, k) = 1\u2212 e\u2212(\u03b3\u2212\u03b3\u2032)2\/2\u03c32aHere \u03b3 and \u03b3\u2032 are the current and bind pose angles respectively between pairsof emanating bones (i, j) and (j, k) at a joint j. We set \u03c3a to pi\/3 if the 3involved joints share an originating contour, and pi\/6 otherwise, enforcing astronger preference for the bind pose angle when there is no clear contoursuggesting 2D bone directions, and a weaker preference for bind pose angleswhen the adjacent bones follow one continuous contour and the 2D bonedirection is well-suggested. These costs are measured for each triplet ofadjacent joints. This term can be replaced by more advanced anatomicalmachinery used in prior work for predicting plausible angles: for instance,if multiple reference poses are provided, one can look at the smallest angledifference across these poses.Combined Local Cost Function. Combining the different terms above,the cost for assigning a pair of bone end-points i and j to a pair of locationsis measured asE(i, j) = 1\u2212 (1\u2212 Cl(i, j))(1\u2212 Ccf (i, j))(1\u2212WcCc(i, j)). (5.3)We empirically set the consistency penalty weight to Wc = 0.9. The com-bined energy function encoding all local preferences for a given assignmentof joints to point locations isEmatch =\u2211iC(i, P ia) +\u2211i,jE(i, j) +\u2211ijkCn(i, j, k) (5.4)where the first term sums the per-joint assignment costs, the second sumsthe per-bone costs and the third considers the joint triplet costs. All termshave equal weight.5.5.4 Global ConsistencyIn addition to the local criteria above, when evaluating the plausibility of askeleton embedding we need to evaluate the likelihood of the overall contour-to-skeleton assignments it imposes (Figure 5.11). In addition to the bone-segment correspondence computed earlier, this task requires a joint-contour915.5. Character-Contour Correspondence(a) (b) (c)Figure 5.11: Full solutions: (a) contains overlaps; (b) poor coverage; (c)preferred.correspondence. We compute segments associated with joints as follows.For terminal joints we consider the longest segment delineated by its alignedsamples which does not overlap the segments associated with its bone. Forinterior joints we consider each pair of bones emanating from the joint. Ifthe bones are associated with segments on the same contour, we associatethe contour segment in-between them with the joint (Figure 5.4).In real life, projected visible contours of different character body partscan overlap only if the two parts are in contact (i.e. on opposite sides ofthe contour), or if one is both perfectly parallel to and occluding the other(Figure 5.14, top row). We therefore test whether any pair of same-sidecontour segments associated with disjoint bones or joints overlap and, ifthey do, this configuration is assigned a high penalty score, empirically setto 10 (Figure 5.11a).In a drawing that contains only contours of body parts surroundingskeletal bones, a valid solution must associate all contours with some boneor joint. In practice our drawings can and do occasionally contain extracurves, e.g the cat and horse ears in Figure 5.14. Thus instead of full cov-erage, we seek for a sufficient one, requiring coverage of over 85% percentof the contours (Figure 5.11b-c). We note that when the soft non-overlapconstraint is satisfied, our local energy terms implicitly encourage coveragemaximization, since we penalize joints not being matched to contours anddiscourage undesirable foreshortening. We incorporate coverage constraintsinto our framework as discussed in Section 5.5.5.Our local energy does not clearly distinguish between fully or partiallysymmetric solutions. While hard to penalize locally, partial symmetries(e.g. left arm and right leg mapped to the same side of the spine) areeasily detected on a complete solution by evaluating the degree of twist thespine must undergo to accommodate them. While twist can be intentional,we expect it to be clearly indicated by the contours, with the undesired925.5. Character-Contour Correspondence\u201cuntwisted\u201d solution in these cases violating consistency constraints. Wedifferentiate between fully symmetric solutions by observing that, all thingsbeing equal, artists strongly prefer views where the face of the character isclearly visible. We similarly use this frontal preference in our global poseevaluation.5.5.5 Solver MechanismOptimizing Ematch alone without addressing global preferences can be castas a classical tree-structured high-order Markov Random Field (MRF) prob-lem by translating our cost terms into probabilities, and optimized efficientlyusing standard techniques [70]. Unfortunately, we are not aware of any stan-dard mechanism that allows us to incorporate the coverage constraints intosuch frameworks; the general problem of maximal a posteriori estimationover a Markov Random field is a classical NP-hard problem [117]. Insteadwe develop a simple domain-specific method that works well on all our in-puts. We note that, on typical gesture drawings, for terminal joints ourunary cost computation produces only about a dozen possible assignmentswith less than maximal cost; furthermore, our desired assignment is ex-pected to match most terminals, with the exception of occluded ones, toone of these below maximum cost placements. Because of our stringentcontour consistency constraints, given the correct assignment of terminals,using the basic Ematch optimization for assigning other joints results in thedesired global solution. Clearly we do not a priori know what this correctterminal assignment is; however, given the small number of terminals (typ-ically six or less) and the small number of placement choices for them, anexhaustive search of all possible alternatives is a practical option.This search can be further sped up by traversing the different alternativesin a strategic order. Specifically, we order all possible terminal assignmentsbased on the sum of their unary costs, and then process them in increasingcost order, penalizing assignment combinations where terminal assignmentsviolate the non-overlap constraints and placing them at the end of the queue.For each terminal assignment we then optimize Ematch on a reduced set ofjoints and with a reduced solution space. Specifically, when a terminal jointhas a below maximum cost assignment, we remove this node from the solved-for joint set and update the unary and binary costs of its neighboring vertexto reflect the selected assignment. We let the optimization determine thebest assignment for terminal joints associated with the maximal cost, butremove all assignments with below maximum cost from their solution space.If the located solution satisfies all our constraints, and in particular if it935.6. 2D Pose Optimizationproduces over 85% coverage, we stop the iterations.The same coverage can sometimes be produced by a permutation of thedesired terminal placements; however different permutations lead to differentminima of matching energy Ematch which may better satisfy our preferencefor more front facing and less twisted solutions. We thus process all partiallyand fully symmetric permutations of the obtained solution, and select theleast twisted and most front facing one from among those solutions thatsatisfy all our constraints.5.6 2D Pose OptimizationWhile our discrete solver correctly captures the overall contour-joint corre-spondences, it operates on a finite set of potential positions and thus mayend up generating imperfect joint placements (Figure 5.9b). Moreover, toenable an efficient solutions, our discrete formulation assumes all joints arefully flexible. In real models, many joints have a reduced set of degrees offreedom (DOFs), with pelvic and shoulder joints typically supporting onlyrigid transformations. To address both issues we iterate over the joints tofurther optimize their positions and enforce the allowable degrees of free-dom. For each joint we use a local random walk to find a new location thatimproves the overall matching energy (Equation 5.4) while constraining thejoint to remain on the same side with respect to all nearby contours, anddisallowing moves that violate consistency or introduce overlaps.For joints with a reduced DOF set, we then recompute the positions ofthe joint and its immediate neighbors which satisfy the DOF constraints andare maximally close to the current ones, using an ICP variant. Specifically,given the current 2D locations of a joint and its neighbors, we search for a3D transformation of these joints in the bind pose that satisfies the DOFconstraints while maximally aligning the 2D coordinates of each joint andits current location. We repeat the two steps until convergence.5.7 Full Pose OptimizationOnce we have generated a 2D skeletal embedding, we associate a depth valuewith each joint by leveraging viewer expectations of simplicity and weakforeshortening. In this process we also refine image plane joint positions tocorrect drawing and 2D estimation inaccuracies. In our computations weassume an orthographic projection since, as noted by [138], estimates ofartist intended perspective are highly unreliable. Our solution is based on945.7. Full Pose Optimizationthree key observations. First, we note that even small inaccuracies in depict-ing body proportions, due to inexact foreshortening, inaccurate perspective,and other artifacts, accumulate to form large errors in 2D joint placement.Therefore, rather than minimizing absolute 2D solution displacement com-pared to the 2D embedding, we encode conformity with this embedding interms of slopes and lengths of projected bones. Second we note that hu-man observers are know to underestimate foreshortening in drawings [114],a fact that often causes artists to exaggerate it [57]. Consequently, fore-shortening predictions based directly on drawn body-part lengths may beinaccurate. In our observations, viewers rely on relative rather than abso-lute foreshortening when predicting a character\u2019s pose from a drawing - evenwhen presented with a reference model. Consequently, when predicting thedegree of foreshortening per bone, we similarly take into account relativeforeshortening as compared to other bones. Our last observation is thatwhile we seek for natural poses, i.e. those closer to the input bind pose,minimizing this difference directly is problematic as many drawn poses arequite far from the input one by design. For this reason, we do not explicitlyconsider the distance to the bind pose in our optimization. Instead we usethe bind pose as an initial guess for the solution and limit the step size ineach iteration so that our final pose gradually evolves from the bind pose.In doing so, we indirectly guide our final solution towards a more naturalpose by searching for a smooth motion path from the bind pose to the finalone.Conformity We encode conformity to the estimated 2D skeletal pose aspreservation of 2D bone slopes and lengths:Ec =\u2211(i,j)\u2208Swc(i, j)((Pyi \u2212 P yj )\u2212 dyij)2 + ((P xi \u2212 P xj )\u2212 dxij)2 (5.5)where Pk are joint positions, S is the set of all skeletal bones, and dxij , dyijare the x and y differences between joint positions in the 2D embedding. Tofocus on relative rather than absolute bone projection preservation we setwc(i, j) = 1\/l2ij where lij is the length of the bone (i, j).Foreshortening When the 2D projected bone lengths l\u2032ij are fixed, thedepth along each bone is fully determined by the difference between the 3Dand 2D projected bone lengths: (dzij =\u221al2ij \u2212 l\u20322ij). However image spacelengths are sensitive to artist errors, as well as scale mismatches between955.7. Full Pose Optimizationthe character model and the drawing. Leveraging our previous observa-tions about human preference for foreshortened interpretations, we conse-quently combine conformity with a foreshortening minimization term which,together with the regularity constraints below, aims to mitigate drawing in-accuracies:Ev =\u2211(i,j)\u2208Swv(i, j) \u00b7 (P zi \u2212 P zj )2. (5.6)The weights wv(i, j) are determined by the anticipated foreshortening of thebone (i, j):wv(i, j) =\uf8f1\uf8f2\uf8f3e\u2212(fij\u2212favg)22\u03c32f , if fi,j < favg1.0, otherwise(5.7)Here fij = l\u2032ij\/lij is bone foreshortening and favg is the average bone fore-shortening for the entire character in the 2D solution. This weight is amonotonically decreasing function of the 2D-3D length ratio and is max-imized when this ratio is equal to or larger than the average across thedrawing. We view a ratio below 0.6 of the average as intentional foreshort-ening and consequently force the weight of the foreshortening minimizationterm drop to zero for such ratios by setting \u03c3f = 0.2 using the three-sigmarule.Regularity Previous work on the interpretation of drawings (e.g. [10,138]) has discussed numerous domain-specific regularity criteria. In ourwork we found four key regularity cues which viewers expect to hold whenenvisioning drawn poses: parallelism, symmetry, contact, and smoothness.We use the 2D embedding to detect near-regular relationships and thenstrictly enforce them in 3D. For each pair of bones (i, j) and (m,n) withroughly parallel 2D projections (within 10\u25e6), we enforce their 3D bone di-rections to be the same: Pi \u2212 Pj = lij\/lmn(Pm \u2212 Pn). Similarly, if twosymmetrical limb bones are nearly symmetric around the spine plane, weforce exact symmetry - since symmetry is detected in 3D, we enforce thisconstraint in a post-process step. We also note that human observers expectclose 2D adjacencies, specifically contacts observed in 2D, to be preservedin 3D. We therefore detect pairs of adjacent 2D joint contour segments andconstrain the distance between their corresponding 3D joints. Lastly, wenote that gesture drawings typically aim to convey aesthetic poses [48]. Mo-tivated by Guay et al., we fit a quadratic polynomial spline to each skeletallimb in the 2D embedding; if all joints along the limb are deemed to be close965.7. Full Pose Optimizationenough to this spline, i.e. within half each joint\u2019s radius from it, we add softconstraints attracting them toward corresponding spline locations.Joint Ordering. The drawing contours define two types of occlusions,inter- and intra- part (Figure 5.7). Inter-part occlusions, such as an arm infront of a body, indicate that at a particular point along one bone, the bodypart surrounding this bone is in front of a particular location on the bodypart around another bone. We encode these using relative locations on theparticipating bones:P zi tij + Pzj (1\u2212 tij) +Rij(tij) < P zk tkl + P zl (1\u2212 tkl)\u2212Rkl(tkl) (5.8)Here the two participating bones are (i, j) and (k, l), tij and tkl are thelinear parameters of the occluded and occluder points and Rij and Rkl arethe corresponding body part radii at these points.Intra-part occlusions, depicted via local contour T-junctions, encodepairwise joint ordering between end-joints i and j of individual bones. Thejoint associated with the stem of the \u201cT\u201d is expected to be farther awaythan the one associated with its top. To enforce these relationships we addthe inequality constraint:P zi < Pzj .Solver We minimize Ec + Ef subject to the simplicity and order con-straints detailed above. While our posing criteria are for convenience ex-pressed via positions, using positions as optimization variables is problem-atic, since preserving fixed bone lengths using a position based formulationrequires quadratic constraints, which are known to be hard to operate on [43]. Instead we follow the standard approach used in kinematics and roboticsand represent our 3D pose in terms of twist coordinates \u03b8ij [18]. We thenuse a solution method advocated by Gall et al. [43], who represent vertexpositions via twists, and use a Taylor expansion to linearize the resultingexpressions. Using such linearizion we formulate the optimization of E as asequence of constrained quadratic optimizations. We augment the quadraticfunction being minimized at each iteration with a stabilization term aimedat keeping the new solution close to the previous one:\u03b1\u2211ij(\u03b8nij \u2212 \u03b8n\u22121ij )2 (5.9)Here the sum is evaluated over all twist variables \u03b8ij in the current n andprevious n \u2212 1 iterations. We use a large \u03b1 = 200 to avoid introducing975.8. Validationunnecessary and unnatural deviations from the bind pose. Note that sincethe stabilizer is computed with respect to the previous solution, this processallows for slow, but arbitrarily far, deviation from this pose. The resultingquadratic optimization with ordering constraints is solved at each iterationusing the Gurobi optimizer (www.gurobi.com). Since we have just a fewdozen variables the entire process takes on average 30 seconds.5.8 ValidationWe validate the key aspects of our method in a number of ways.Ground Truth and Perception Comparison. We validate our methodon Ground Truth (GT) data, by posing two models into complex poses(Figure 5.12) and using retraced projected occlusion contours as inputs toour method together with the same models in neutral bind pose. Our resultsclosely resemble the original.Our method aims to recover the viewer-perceived pose from the drawings;therefore a more interesting test is to compare our poses to viewer perceivedones. We performed this test using the same data, by providing our inputsto two 3D modeling experts and asking them to pose the models into posesdepicted by the drawings. The result (Figure 5.12) are visually even moresimilar to ours than ground truth. We showed each artists the ground truthmodels, our results and the result produced by the other artist, withoutidentifying which output was produced by which method, and asked \u201cHowwell do these poses capture the artist intended pose?\u201d. Both assessed all theshown 3D poses as reflective of the drawn one, and one commented that ourresult was \u201cthe most natural\u201d. The full text and the results of the evaluationcan be found at http:\/\/cs.ubc.ca\/~bmpix. The artists required roughly15 minutes to pose each model, 5 to 10 time more than our automatic posingtimes of 1.5 and 3 minutes.Perceived 2D Skeletal Embedding. To evaluate consistency acrossviewers and to compare our algorithm with viewer perception, we asked10 viewers (2 artists and 8 with no art background, 6 females and 4 males)to manually embed skeletons to match 4 gesture drawings. We provide view-ers with 2D images of the models and skeletons in the bind pose, with jointsclearly marked, and with bone chains numbered and colored with differentcolors to facilitate distinction between symmetric limbs. The full text andthe results of the evaluation can be found at http:\/\/cs.ubc.ca\/~bmpix.985.8. ValidationInput curvesGround Truth Our resultArtist 1 Artist 2Input curvesGround Truth Our resultArtist 1 Artist 2Figure 5.12: Comparing our results to GT data and artist modeled poses.We use as input the projected contours of the posed GT models combinedwith their bind posed originals (Figure 5.14) to automatically create posesqualitatively similar to both GT and artist results.Figure 5.13: Overlays of viewer created skeleton embeddings (lines removedfor clarity) and our results on same inputs.995.9. ResultsWhile viewers found the task conceptually easy, marking locations for alljoints and connecting them took participants 5 to 10 minutes per drawing.Figure 5.13 summarizes the resulting embeddings on two complex inputs,with various user embeddings overlaid to visualize correlations across view-ers. Viewer embeddings are largely consistent and agree very well with ouralgorithmic results, confirming that our method is built on solid perceptualfoundations.Qualitative Evaluation. We asked 3 artists and 6 non-experts to com-ment on our results. We showed them each pair of input and result sepa-rately and asked \u201cHow well does this 3D character pose capture the artistintended drawn pose?\u201d. The full text of the evaluation can be found athttp:\/\/cs.ubc.ca\/~bmpix. All respondents agreed that our results suc-cessfully capture the drawn poses. Minor differences noted by two par-ticipants included: variation in geometric details beyond the control of askeletal rig, such as extended vs contracted character belly in the yoga pose,Figure 5.14, top; and insufficient tightness of the cross-armed pose in Fig-ure 5.14,bottom. The latter example is particularly challenging since theartist did not draw the actual character palms.5.9 ResultsThroughout the chapter we have shown numerous examples of gesture posingusing our method. These examples range from relatively simple occlusion-free and relatively flat ones, e.g. Figure 5.15, to the karate, cat, anddance poses which exhibit large foreshortening and complex occlusions (Fig-ures 5.1, 5.14, 5.17). Our results extend beyond typical humanoid modelsattempted by previous 2D posing methods (e.g. [33]), to whimsical charac-ters and animals (Figure 5.14). Across all examples our method believablyreproduces the drawn poses. It seamlessly overcomes drawing inaccuracies,clearly visible in inputs such as the gymnastics poses in Figures 5.9, 5.15, 5.16where the drawn limbs are consistently longer and skinnier in proportion toits torso than those of the character model.Workflow. Most of our inputs were created using a traditional keyfram-ing workflow, where the artists had a model in front of them and drew theposes with this character in mind (Figures 5.1, 5.14). We also evaluated aninverse workflow inspired by legacy drawings - tracing the strokes on exist-ing gesture drawings and adjusting the character dimensions to roughly fitthose (e.g. the karate sequence in Figure 5.17). This workflow can enable1005.9. ResultsFigure 5.14: Typical two-stage processing results. Left to right: inputmodel, drawing, 2D skeleton fitting, output model.non-artists to create compelling poses and animations by re-using existingmaterial and assets, but is likely to be more challenging as the characterproportions are more likely to differ.1015.9. ResultsFigure 5.15: (center) 3D posing using only drawing conformity, (right) full3D solution.Figure 5.16: Impact of different bind poses.Impact of Design Choices. Figures 5.5, 5.8, 5.11, and 5.15 demon-strate the importance of our algorithmic choices, highlighting what can hap-pen if we omit one or more of the perceptual cues we employ. Figure 5.15demonstrates the effect of our foreshortening and regularity terms on 3Dpose reconstruction. Absent these terms, the posed character better con-forms to the input contours, but the 3D pose becomes less predictable ornatural. Figure 5.8 further highlights the distinction between more and lessnatural interpretations.Figure 5.16 shows the impact on our results of using different bind poses.As demonstrated the bind pose impacts part orientation for cases where thedrawing does not provide clear pose information, e.g. the feet of the char-acter, or when the skeletal resolution is not sufficient to capture orientationdetails, e.g. the character\u2019s palm orientation.Comparison to Prior Art. Figure 5.17 compares our results against [33],the closest prior work in terms of 2D posing ability. While both methods1025.9. ResultsFigure 5.17: (right) Davis et al.[33] trace stick figures over gesture drawingsand then pose characters semi-automatically. (left), We use the originaldrawings to automatically pose characters.(a) (c)(b) (d)Figure 5.18: Extreme mismatch in proportions between model and drawing(a) can lead to poor depth reconstruction (b); correcting the proportions inthe drawing (c) corrects the reconstruction. (d) Ambiguous drawings usinghighly oblique views can cause our 2D pose estimation to fail.recover qualitatively similar poses, we compute the pose fully automatically,and use only the drawings and the model in a bind pose as inputs. In con-trast Davis et al. use a much more elaborate and time consuming process- users first draw a stick figure on top of the drawing, marking all 2D jointlocations, then add extra annotations and select between multiple solutionsto resolve input ambiguities. As our evaluation shows, while drawing a stickfigure is not difficult it is nevertheless time consuming.Parameters and Runtimes. All our results were computed withthe default parameters listed in the text. For the multi-component model\u2018wynky\u2019 (Figure 5.14, bottom row) we disabled the crossing cost as on thismodel bones must intersect contours. Our method takes between 1 to 3minutes to compute the output pose; roughly 60% of this time is spent onthe 2D discrete embedding computation.Limitations. Our method is inherently limited by the descriptivenessof the drawing (Figure 5.18). We rely on a combination of drawing and1035.10. Conclusionsmodel\u2019s proportions to predict foreshortening. When the proportions ofthe drawn and posed characters are drastically different (in Figure 5.18a-b the drawn arms are much shorter and the drawn legs much longer thantheir model counterparts), our framework will by necessity misestimate thedegree of output foreshortening. Once the drawing proportions are adjustedwe correctly recover the intended pose (Figure 5.18c). Our pose estimationcan fail when a gesture is not evident from the drawing itself, due to e.g.oblique views (Figures 5.2c, 5.18d), but can typically correctly recover thepose given a more descriptive view (Figure 5.1).5.10 ConclusionsWe have presented and validated the first method for character posing usinggesture drawings. Our method leverages a set of observations about thekey properties of gesture drawings that make them effective at conveyingcharacter pose. Using these observations we are able to first recover a 2Dprojection of the character\u2019s pose that matches the drawing, and then imbueit with depth. We are able to reconstruct convincing 3D poses, confirmed toagree with viewer expectations, from a single gesture drawing while robustlycorrecting for drawing inaccuracy.Our work raises many directions for future research. It is empiricallyknown that in artist drawings \u201cerrors of intent are inherent and unavoid-able, and furthermore can be of significant magnitude\u201d [114]. An interestingperceptual question would therefore be to explore when and where artist in-tent and viewer perception diverge, and at which point human observers areno longer able to correct for artist inaccuracies. The algorithmic impact ofthis exploration would provide more strict definitions of when and how poserecovery should deviate from conformity constrains. Our framework focuseson drawing cues, and it would also be interesting to explore how we cancombine those cues with stronger anatomical priors on plausible characterposes and other domain cues.104Chapter 6Discussion and ConclusionIn this thesis, we have discussed several systems to recover 3D shape fromconcept and pose drawings, along with their underlying ideas and insightsinto interpretation of line drawings. We have presented the first systemto quadrangulate closed 3D curve networks, capable of creating a surfaceconsistent with artist intent. We have analyzed and formalized the defin-ing principle to construct the artist-intended surfaces by interpreting inputcurves as flow-lines. We have then introduced and discussed a novel sys-tem to construct 3D character canvas from a single complete drawing anda 3D skeleton. We have shown that 3D skeleton is sufficient to resolve am-biguities in drawings without imposing unrealistic simplifying requirementson 3D shape. Finally, we have presented the first system to pose rigged 3Dcharacters via a single descriptive gesture drawing. Thus we show that whenthe 3D shape is known, it is possible to interpret a gesture drawing with noextra user input.6.1 DiscussionHere we briefly re-iterate over the contributions of all the proposed methods.We also include a short discussion of each method within the scope of sketchinterpretation. In-depth discussion and additional details of each particularmethod can be found in the corresponding chapters of this thesis.3D curve networks for CAD objects can be created via modern interfaces[7, 138], can effectively communicate shape [35, 88, 89], and, as we show inChapter 3, can be automatically surfaced. These results imply that 3D curvenetworks can become an efficient tool to accelerate CAD modeling, combin-ing the expressivity of pen-and-paper sketches with full power of 3D models.Our contribution is the first method that infers the artist-intended surfaceautomatically by interpreting the input lines as representative flow-lines, it-eratively segmenting and pairing them via stable matching, and then usingthat matching to represent the final surface as a set of gradually changingflow-lines.1056.2. Future WorkCartoon drawings are known to unambiguously convey shape to humanobservers, yet are notoriously hard to parse and interpret automatically.Such difficulty stems in part from various ambiguities in the drawing, which,as we show in Chapter 4, can be successfully resolved by specifying the cor-responding pose via an overlaid 3D skeleton. Conversely, as we show inChapter 5, when the 3D model of the rigged character is known, the ges-ture drawing alone can determine the character\u2019s pose. Interestingly, humanobservers seem to be able to infer both shape and pose from character draw-ings alone, perhaps relying on stronger anatomical priors. More perceptionresearch is needed to characterize such anatomical knowledge and outlinethe limits of human interpretation of drawings.It has to be also noted that concept drawings of a character (Chapter4) and gesture drawings (Chapter 5) are drawn for different purposes, andthus may exhibit different features. Gesture drawings allow for more simpli-fied shape, distorted proportions, incorrect foreshortening, and are aimed atconveying the pose only; cartoon drawings, however, typically demonstratemore attention to details in order to faithfully capture the character\u2019s shape.Nevertheless, as our analysis of the art literature and perceptual studiesshows in Chapters 4 and 5, the general concepts we can use to interpretthem automatically are similar. Thus, we observe that a valid 3D inter-pretation of a drawing should conform to the drawn contours (conformity),should be simple, regular, and natural (simplicity), and should consider theinterplay between skeletal and contour adjacencies (line consistency).6.2 Future WorkSince its publication, the method in Chapter 3 has inspired some follow-upwork [103] and work on related issues [142]. Those papers solve some of theoriginally proposed future work, such as automatic loop extraction from thecurve network and automatic classification of the input curves into trimmingcurves and flow-lines. However, both the proposed in the current thesis andthe current state of the art [103] methods assume the whole curve network tobe complete, and are not directly suitable for handling incremental updatesto the curve network. Instead, it would be more interesting to see a sketch-based interactive system that allows to start with a very few curves anditeratively refine the suggested surface.The method proposed in Chapter 4 relies on all the lines in the inputdrawing being occlusion contours. More complicated drawings, however,contain multiple feature curves, such as eyes, nose, or auxiliary curves (see1066.3. Conclusionsinset below) [26], which, unless annotated, may cause artifacts in the final3D shape of our method. While determining feature curves based on purelygeometrical information seems a hard task, we envision this can be solvedusing machine learning techniques by casting it as a classification problem.Furthermore, output canvases in Chapter 4 are unionsof manifold meshes, not a single manifold mesh. A naiveapproach to improve that might be to apply a mesh booleanoperation [72], though that may introduce triangles of poorquality near mesh intersections. Instead, a more direct waywould be to use the trajectories (Section 4.1) as an input fora surface reconstruction method from cross-sections [143].A combined system of our method, a machine-learning clas-sifier of the input curves, and a surface reconstruction sys-tem may become a powerful modeling framework.In all our projects we don\u2019t require any user annotation or extra infor-mation about the input curves. This is very typical for vectorized drawings,but in some sketching systems [7, 99] more information is available. Some ofthat information, particularly timestamps of each curve, might prove to be auseful cue. The most direct application would be a more robust resolution ofGestalt-continuous contours: while we currently use a simple angular thresh-old method, one would expect Gestalt-continuous contours to be drawn oneexactly after another. In a more subtle way, there might be some correlationwhich part of the character users draw first, which could aid the full-poseoptimization process (Section 5.5.5). These questions call for more in-depthstudy.Additionally, as we noted in Section 4.1, when interpreting characterdrawings, viewers often rely on semantic information of some extra elements,such as facial features. The full-pose optimization process (Section 5.5.5)could benefit from a machine learning element that classifies such features,thus reducing our search space.6.3 ConclusionsProgress in touch screen manufacturing process has turned much of the com-modities, such as cellphones, tablets, or laptops, into convenient sketchingand drawing devices. Nevertheless, software, capable of correctly interpret-ing drawings, still has a long way to go. We hope that the approachespresented in this thesis will contribute to forming a solid foundation of thefuture drawing interpretation methods.107Bibliography[1] Fatemeh Abbasinejad, Pushkar Joshi, and Nina Amenta. Surfacepatches from unorganized space curves. Comput. Graph. Forum,30(5):1379\u20131387, 2011.[2] Adobe Flash. Professional. Adobe, Inc., 2013.[3] J. K. Aggarwal and Q. Cai. Human motion analysis: a review. In Non-rigid and Articulated Motion Workshop, 1997. Proceedings., IEEE,pages 90\u2013102, Jun 1997.[4] Anime Studio. Smith Micro Software, 2013.[5] Fred Attneave and Robert Frost. The determination of perceivedtridimensional orientation by minimum criteria. Perception & Psy-chophysics, 6(6):391\u2013396, 1969.[6] Oscar Kin-Chung Au, Chiew-Lan Tai, Hung-Kuo Chu, Daniel Cohen-Or, and Tong-Yee Lee. Skeleton extraction by mesh contraction. ACMTrans. Graph., 27(3):44:1\u201344:10, 2008.[7] S.H. Bae, Ravin Balakrishnan, and Karan Singh. ILoveSketch: as-natural-as-possible sketching system for creating 3d curve models. InProc. Symposium on User interface software and technology, pages151\u2013160. ACM, 2008.[8] J. Andreas B\u00e6rentzen, Rinat Abdrashitov, and Karan Singh. Inter-active shape modeling using a skeleton-mesh co-representation. ACMTrans. Graph., 33(4):132:1\u2013132:10, July 2014.[9] Katie Bassett, Ilya Baran, Johannes Schmid, Markus Gross, andRobert W. Sumner. Authoring and animating painterly characters.ACM Trans. Graph., 32(5):156:1\u2013156:12, October 2013.[10] Mikhail Bessmeltsev, Will Chang, Nicholas Vining, Alla Sheffer, andKaran Singh. Modeling character canvases from cartoon drawings.Transactions on Graphics, 34(5), 2015.108Bibliography[11] Mikhail Bessmeltsev, Caoyu Wang, Alla Sheffer, and Karan Singh.Design-driven quadrangulation of closed 3d curves. Transactions onGraphics (Proc. SIGGRAPH ASIA 2012), 31(5), 2012.[12] Preston Blair. Cartoon Animation. Walter Foster Publishing, 1994.[13] Jules Bloomenthal and Brian Wyvill, editors. Introduction to ImplicitSurfaces. Morgan Kaufmann Publishers Inc., San Francisco, CA, USA,1997.[14] David Bommes, T. Lempfer, and Leif Kobbelt. Global StructureOptimization of Quadrilateral Meshes. Computer Graphics Forum,30(2):375\u2013384, 2011.[15] David Bommes, Tobias Vossemer, and Leif Kobbelt. Quadrangu-lar parameterization for reverse engineering. In Proceedings of the7th international conference on Mathematical Methods for Curves andSurfaces, MMCS\u201908, pages 55\u201369, Berlin, Heidelberg, 2010. Springer-Verlag.[16] M. Bordegoni and C. Rizzi. Innovation in Product Design: From CADto Virtual Prototyping. Springer, 2011.[17] Pe\u00b4ter Borosa\u00b4n, Ming Jin, Doug DeCarlo, Yotam Gingold, and AndrewNealen. Rigmesh: Automatic rigging for part-based shape modelingand deformation. ACM Trans. Graph., 31(6):198:1\u2013198:9, 2012.[18] Christoph Bregler, Jitendra Malik, and Katherine Pullen. Twist basedacquisition and tracking of animal and human kinematics. Int. J.Comput. Vision, 56(3):179\u2013194, 2004.[19] Michael Brewer, Lori Freitag Diachin, Patrick Knupp, ThomasLeurent, and Darryl Melander. The mesquite mesh quality improve-ment toolkit. In Proceedings, 12th International Meshing Roundtable,pages 239\u2013250, 2003.[20] Philip Buchanan, R. Mukundan, and Michael Doggett. Automaticsingle-view character model reconstruction. In Proc. Symp. Sketch-Based Interfaces and Modeling, pages 5\u201314, 2013.[21] Tao Chen, Zhe Zhu, Ariel Shamir, Shi-Min Hu, and Daniel Cohen-Or. 3-sweep: Extracting editable objects from a single photo. ACMTransactions on Graphics (Proc. SIGGRAPH Asia), 32(6), 2013.109Bibliography[22] Yu Chen, Tae-Kyun Kim, and Roberto Cipolla. Silhouette-based ob-ject phenotype recognition using 3d shape priors. In IEEE Interna-tional Conference on Computer Vision, ICCV, pages 25\u201332, 2011.[23] Joseph Jacob Cherlin, Faramarz Samavati, Mario Costa Sousa, andJoaquim a. Jorge. Sketch-based modeling with few strokes. Proceed-ings of the 21st spring conference on Computer graphics - SCCG \u201905,1(212):137, 2005.[24] M. G. Choi, K. Yang, T. Igarashi, J. Mitani, and J. Lee. Retrievaland visualization of human motion data via stick figures. ComputerGraphics Forum, 31:2057\u20132065, 2012.[25] M.B. Clowes. On seeing things. Artificial Intelligence, 2(1):79 \u2013 116,1971.[26] Forrester Cole, Aleksey Golovinskiy, Alex Limpaecher, Heather Stod-dart Barros, Adam Finkelstein, Thomas Funkhouser, and SzymonRusinkiewicz. Where do people draw lines? ACM Transactions onGraphics, 27(3):1, August 2008.[27] S. Coons. Surfaces for computer aided design. Technical Report, MIT.,1964.[28] Frederic Cordier, Hyewon Seo, Jinho Park, and Jun Yong Noh. Sketch-ing of mirror-symmetric shapes. IEEE Trans. Visualization and Com-puter Graphics, 17(11), 2011.[29] Nicu D. Cornea, Deborah Silver, Xiaosong Yuan, and Raman Bala-subramanian. Computing hierarchical curve-skeletons of 3d objects.The Visual Computer, 21(11):945\u2013955, 2005.[30] Joel Daniels, Claudio T. Silva, and Elaine Cohen. Semi-regularquadrilateral-only remeshing from simplified base domains. In Proc.Symposium on Geometry Processing, pages 1427\u20131435, 2009.[31] Joel Daniels, Cla\u00b4udio T. Silva, Jason Shepherd, and Elaine Cohen.Quadrilateral mesh simplification. ACM Transactions on Graphics,27(5):1, 2008.[32] K Das, P Diaz-Gutierrez, and M Gopi. Sketching free-form surfacesusing network of curves. Sketch-based interfaces and modeling SBIM,2005.110Bibliography[33] James Davis, Maneesh Agrawala, Erika Chuang, Zoran Popovic\u00b4, andDavid Salesin. A Sketching Interface for Articulated Figure Anima-tion. Proc. Symposium on Computer Animation, pages 320\u2013328, 2003.[34] Edilson de Aguiar, Carsten Stoll, Christian Theobalt, Naveed Ahmed,Hans-Peter Seidel, and Sebastian Thrun. Performance capture fromsparse multi-view video. ACM Transactions on Graphics, 27:1, 2008.[35] F. de Goes, S. Goldenstein, M. Desbrun, and L. Velho. Exoskeleton:Curve network abstraction for 3d shapes. Computer and Graphics,35(1):112\u2013121, 2011.[36] Koos Eissen and Roselien Steur. Sketching: The Basics. Bis Publish-ers, 2011.[37] Even Entem, Lo\u00a8\u0131c Barthe, Marie-Paule Cani, Frederic Cordier, andMichiel Van De Panne. Modeling 3D animals from a side-view sketch.Computer and Graphics, (38), 2014.[38] Gerald Farin. Curves and surfaces for computer aided geometric de-sign: a practical guide. Academic Press, 1992.[39] Gerald Farin and Dianne Hansford. Discrete Coons patches. ComputerAided Geometric Design, 16:691\u2013700, 1999.[40] Mark Finch and Hugues Hoppe. Freeform Vector Graphics with Con-trolled Thin-Plate Splines. ACM Trans. on Graphics (SIGGRAPHAsia), 30(6), 2011.[41] Fabian Di Fiore, Philip Schaeken, and Koen Elens. Automatic in-betweening in computer assisted animation by exploiting 2.5 D mod-elling techniques. Proc. Conference on Computer Animation, pages192\u2013200, 2001.[42] A. Gahan. 3D Automotive Modeling: An Insider\u2019s Guide to 3D CarModeling and Design for Games and Film. Elsevier Science, 2010.[43] Juergen Gall, Bodo Rosenhahn, Thomas Brox, and Hans Peter Seidel.Optimization and filtering for human motion capture : AAA multi-layer framework. International Journal of Computer Vision, 87(1-2):75\u201392, 2010.[44] Juergen Gall, Carsten Stoll, Edilson De Aguiar, Christian Theobalt,Bodo Rosenhahn, and Hans Peter Seidel. Motion capture using joint111Bibliographyskeleton tracking and surface estimation. IEEE Computer Vision andPattern Recognition Workshops, pages 1746\u20131753, 2009.[45] Kun Gao and Alyn Rockwood. Multi-sided attribute based modeling.Mathematics of Surfaces XI, pages 219\u2013232, 2005.[46] Yotam Gingold, Takeo Igarashi, and Denis Zorin. Structured annota-tions for 2D-to-3D modeling. ACM Trans. Graph., 28(5), 2009.[47] Oliver Glauser, Wan-Chun Ma, Daniele Panozzo, Alec Jacobson, Ot-mar Hilliges, and Olga Sorkine-Hornung. Rig Animation with a Tangi-ble and Modular Input Device. ACM Transactions on Graphics (Pro-ceedings of ACM SIGGRAPH), 2016.[48] Martin Guay, Marie-paule Cani, and Remi Ronfard. The Line of Ac-tion : an Intuitive Interface for Expressive Character Posing. ACMTrans. on Graphics, (6):8, 2013.[49] Arthur Leighton Guptill. Sketching and rendering in pencil. New York,The Pencil Points Press, Inc., 1922.[50] Gurobi Optimization. http:\/\/www.gurobi.com\/, 2013.[51] Fabian Hahn, Frederik Mutzel, Stelian Coros, BernhardThomaszewski, Maurizio Nitti, Markus Gross, and Robert W.Sumner. Sketch abstractions for character posing. In Proc. Symp.Computer Animation, pages 185\u2013191, 2015.[52] R.B. Hale and T. Coyle. Master Class in Figure Drawing. Watson-Guptill, 1991.[53] Michael Hampton. Figure Drawing: Design and Invention. Figure-drawing.info, 2009.[54] Ronie Hecker and Kenneth Perlin. Controlling 3d objects by sketching2d views. Proc. SPIE, 1828:46\u201348, 1992.[55] Robert Hess and David Field. Integration of contours: new insights.Trends in Cognitive Sciences, 3(12):480\u2013486, December 1999.[56] Donald D Hoffman. Visual intelligence: how to create what we see.Norton, New York, NY, 2000.[57] Burne Hogarth. Dynamic Figure Drawing. Watson-Guptill, 1996.112Bibliography[58] Alexander Hornung, Ellen Dekkers, and Leif Kobbelt. Character ani-mation from 2D pictures and 3D motion data. ACM Transactions onGraphics, 26(1):1\u20139, 2007.[59] D. A. Huffman. Impossible Objects as Nonsense Sentences. MachineIntelligence, 6:295\u2013323, 1971.[60] Takeo Igarashi, Satoshi Matsuoka, and Hidehiko Tanaka. Teddy: Asketching interface for 3d freeform design. In Proc. SIGGRAPH, pages409\u2013416, 1999.[61] Clara Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu.Human3.6m: Large scale datasets and predictive methods for 3d hu-man sensing in natural environments. IEEE Trans. Pattern Analysis& Machine Intelligence, 36(7):1325\u20131339, 2014.[62] Robert W. Irving. An efficient algorithm for the \u201dstable roommates\u201dproblem. J. Algorithms, 6(4):577\u2013595, 1985.[63] Alec Jacobson, Ilya Baran, J Popovic, and Olga Sorkine. Boundedbiharmonic weights for real-time deformation. ACM Trans. Graph.,30(4):78:1\u2014-78:8, 2011.[64] Alec Jacobson and Olga Sorkine. Stretchable and Twistable Bonesfor Skeletal Shape Deformation. ACM Trans. Graph., 30(6):165:1\u20138,2011.[65] Eakta Jain, Yaser Sheikh, Moshe Mahler, and Jessica Hodgins. Three-dimensional proxies for hand-drawn characters. ACM Transactions onGraphics, 31(1):1\u201316, 2012.[66] Tao Ju, Qian-Yi Zhou, Michiel van de Panne, Daniel Cohen-Or, andUlrich Neumann. Reusable skinning templates using cage-based de-formations. ACM Trans. Graph., 27(5):122:1\u2013122:10, December 2008.[67] Felix Ka\u00a8lberer, Matthias Nieser, and Konrad Polthier. Quadcover -surface parameterization using branched coverings. Computer Graph-ics Forum, 26(3):375\u2013384, 2007.[68] Olga A. Karpenko and John F. Hughes. SmoothSketch: 3D free-form shapes from complex sketches. ACM Transactions on Graphics(TOG), 1(212):589\u2013598, 2006.113Bibliography[69] K. Koffka. Principles of Gestalt Psychology. International library ofpsychology, philosophy, and scientific method. Routledge & K. Paul,1955.[70] D. Koller and N. Friedman. Probabilistic Graphical Models: Principlesand Techniques. MIT Press, 2009.[71] Vladislav Kraevoy, Alla Sheffer, and Michiel van de Panne. Modelingfrom contour drawings. In Proc. Symposium on Sketch-Based Inter-faces and Modeling, pages 37\u201344, 2009.[72] Shankar Krishnan and Dinesh Manocha. An efficient surface intersec-tion algorithm based on lower-dimensional formulation. ACM Trans.Graph., 16(1):74\u2013106, January 1997.[73] N. Leland. The New Creative Artist. F+W Media, 2006.[74] Zohar Levi and Craig Gotsman. ArtiSketch: A System for ArticulatedSketch Modeling. Computer Graphics Forum, 32(2):235\u2013244, 2013.[75] Bruno Levy. Dual domain extrapolation. ACM Transactions onGraphics (Proc. SIGGRAPH), 22(3):364\u2013369, 2003.[76] Bruno Levy and Yang Liu. Lp centroidal voronoi tesselation and itsapplications. ACM Trans. Graph., 2010.[77] Juncong Lin, Takeo Igarashi, Jun Mitani, and Greg Saul. A sketchinginterface for sitting-pose design. In Proc. Sketch-Based Interfaces andModeling Symposium, pages 111\u2013118, 2010.[78] H Lipson and M Shpitalni. Optimization-based reconstruction of a 3dobject from a single freehand line drawing. In ACM SIGGRAPH 2007Courses, SIGGRAPH \u201907, New York, NY, USA, 2007. ACM.[79] Alan K. Mackworth. On reading sketch maps. In Proceedings of the5th International Joint Conference on Artificial Intelligence - Volume2, IJCAI\u201977, pages 598\u2013606, San Francisco, CA, USA, 1977. MorganKaufmann Publishers Inc.[80] Jitendra Malik. Interpreting line drawings of curved objects. Interna-tional Journal of Computer Vision, 1(1):73\u2013103, 1987.[81] P. Malraison. N-SIDED Surfaces: a Survey. Defense Technical Infor-mation Center, 2000.114Bibliography[82] C Mao, S F Qin, and D K Wright. A sketch-based gesture interface forrough 3D stick figure animation. Proc. Sketch Based Interfaces andModeling, 2005.[83] C. Maraffi. Maya Character Creation: Modeling and Animation Con-trols. Voices that matter. Pearson Education, 2003.[84] Martin Marinov and Leif Kobbelt. A Robust Two-Step Procedure forQuad-Dominant Remeshing. Computer Graphics Forum, 25(3):537\u2013546, September 2006.[85] George Markowsky and Michael A. Wesley. Fleshing out wire frames.IBM J. Res. Dev., 24(5):582\u2013597, September 1980.[86] D. Marr. Analysis of occluding contour. Proceedings of the RoyalSociety of London. Series B, Biological Sciences, 197(1129):441\u2013475,1977.[87] James McCrae and Karan Singh. Sketching piecewise clothoid curves.Comput. Graph., 33:452\u2013461, August 2009.[88] James McCrae, Karan Singh, and Niloy J. Mitra. Slices: a shape-proxybased on planar sections. In Proceedings of the 2011 SIGGRAPH AsiaConference, SA \u201911, pages 168:1\u2013168:12, 2011.[89] Ravish Mehra, Qingnan Zhou, Jeremy Long, Alla Sheffer, Amy Gooch,and Niloy J. Mitra. Abstraction of man-made shapes. TOG (Proc.SIGGRAPH Asia), 28(5):1\u201310, 2009.[90] Paul Michalik, Dae Hyun Kim, and Beat D. Bruderlin. Sketch- andconstraint-based design of b-spline surfaces. In Proceedings of the Sev-enth ACM Symposium on Solid Modeling and Applications, SMA \u201902,pages 297\u2013304, New York, NY, USA, 2002. ACM.[91] Scott A. Mitchell. High fidelity interval assignment. In Proceedings,6th International Meshing Roundtable, pages 33\u201344, 1997.[92] Thomas B. Moeslund, Adrian Hilton, and Volker Krger. A survey ofadvances in vision-based human motion capture and analysis. Com-puter Vision and Image Understanding, 104(23):90 \u2013 126, 2006.[93] K. Nakayama and S. Shimojo. Experiencing and Perceiving VisualSurfaces. Science, 257:1357\u20131363, 1992.115Bibliography[94] A. Nasri, M. Sabin, and Z. Yasseen. Filling N -Sided Regions byQuad Meshes for Subdivision Surfaces. Computer Graphics Forum,28(6):1644\u20131658, September 2009.[95] Andrew Nealen, Takeo Igarashi, Olga Sorkine, and Marc Alexa. Fiber-mesh: designing freeform surfaces with 3d curves. ACM Trans. Graph.,26, July 2007.[96] Andrew Nealen, Olga Sorkine, Marc Alexa, and Daniel Cohen-Or. Asketch-based interface for detail-preserving mesh editing. ACM Trans.Graph., 24(3):1142\u20131147, 2005.[97] Kimon Nicolades. The Natural Way to Draw. Houghton Mifflin, 1975.[98] L. Olsen, F.F. Samavati, M.C. Sousa, and J. Jorge. Sketch-basedmodeling: A survey. Computers & Graphics, 33, 2009.[99] Luke Olsen, Faramarz Samavati, and Joaquim A. Jorge. Naturasketch:Modeling from images and natural sketches. IEEE Computer Graphicsand Applications, 31(6):24\u201334, 2011.[100] Gu\u00a8nay Orbay and Levent Burak Kara. Sketch-based modeling ofsmooth surfaces using adaptive curve networks. In Proceedings ofthe Eighth Eurographics Symposium on Sketch-Based Interfaces andModeling, SBIM \u201911, pages 71\u201378, 2011.[101] S.J Owen. A survey of unstructured mesh generation technology. InProc. International Meshing Roundtable, 1998.[102] Stephen E. Palmer. The effects of contextual scenes on the identifica-tion of objects. Memory and Cognition, 3(5):519\u2013526, 1975.[103] Hao Pan, Yang Liu, Alla Sheffer, Nicholas Vining, Chang-Jian Li,and Wenping Wang. Flow aligned surfacing of curve networks. ACMTrans. Graph., 34(4):127:1\u2013127:10, July 2015.[104] Paperman. Walt Disney Animation Studios, 2012.[105] Zygmunt Pizlo and AdamK. Stevenson. Shape constancy from novelviews. Perception & Psychophysics, 61(7):1299\u20131307, 1999.[106] Alec Rivers, Fredo Durand, and Takeo Igarashi. 2.5D cartoon models.ACM Transactions on Graphics, 2010.116Bibliography[107] K.L.P. Rose. Modeling developable surfaces from arbitrary boundarycurves. Processing, (August), 2007.[108] E. Ruiz-Girone\u00b4s and J. Sarrate. Generation of structured meshesin multiply connected surfaces using submapping. Adv. Eng. Softw.,41:379\u2013387, February 2010.[109] David Salomon. Curves and Surfaces for Computer Graphics. Spring-er-Verlag, 2006.[110] Peter Sand, L McMillan, and J Popovic. Continuous capture of skindeformation. ACM Transactions on Graphics (TOG), pages 578\u2013586,2003.[111] Benjamin Sapp, Alexander Toshev, and Ben Taskar. Cascaded modelsfor articulated pose estimation. Lecture Notes in Computer Science,6312:406\u2013420, 2010.[112] S. Schaefer, J. Warren, and D. Zorin. Lofting curve networks us-ing subdivision surfaces. Proceedings of the 2004 Eurographics\/ACMSIGGRAPH symposium on Geometry processing - SGP \u201904, page 103,2004.[113] Johannes Schmid, Martin Sebastian Senn, Markus Gross, andRobert W. Sumner. Overcoat: an implicit canvas for 3d painting.ACM Trans. Graph., 30:28:1\u201328:10, August 2011.[114] Ryan Schmidt, Azam Khan, Gord Kurtenbach, and Karan Singh. Onexpert performance in 3D curve-drawing tasks. Proc. Symposium onSketch-Based Interfaces and Modeling, 1:133, 2009.[115] Ryan Schmidt, Azam Khan, Karan Singh, and Gord Kurtenbach. An-alytic drawing of 3d scaffolds. ACM Trans. on Graph. (Proc. SIG-GRAPH Asia), 28(5), 2009.[116] Cloud Shao, Adrien Bousseau, Alla Sheffer, and Karan Singh.Crossshade: Shading concept sketches using cross-section curves.ACM Trans. Graphics, 31(4), 2012.[117] Solomon Eyal Shimony. Finding maps for belief networks is np-hard.Artificial Intelligence, 68(2):399 \u2013 410, 1994.[118] Alex Shtof, Alexander Agathos, Yotam Gingold, Ariel Shamir, andDaniel Cohen-Or. Geosemantic snapping for sketch-based modeling.Computer Graphics Forum (Proc. Eurographics), 32(2):245\u2013253, 2013.117Bibliography[119] Karan Singh, Hans Pedersen, and Venkat Krishnamurthy. Featurebased retargeting of parameterized geometry. In Proceedings of the Ge-ometric Modeling and Processing 2004, GMP \u201904, pages 163\u2013, Wash-ington, DC, USA, 2004. IEEE Computer Society.[120] Kent A. Stevens. The visual interpretation of surface contours. Arti-ficial Intelligence, 17, 1981.[121] Christopher Summerfield and Etienne Koechlin. A neural repre-sentation of prior information during perceptual inference. Neuron,59(2):336\u201347, July 2008.[122] Daniel Sy\u00b4kora, Ladislav Kavan, Martin C\u02c7ad\u00b4\u0131k, Ondr\u02c7ej Jamri\u02c7ska,Alec Jacobson, Brian Whited, Maryann Simmons, and Olga Sorkine-Hornung. Ink-and-ray: Bas-relief meshes for adding global illumina-tion effects to hand-drawn characters. ACM Transaction on Graphics,33(2):16, 2014.[123] Chiew-lan Tai, Hongxin Zhang, and Jacky Chun-kin Fong. PrototypeModeling from Sketched Silhouettes based on Convolution Surfaces.Computer Graphics Forum, 23(1):71\u201383, 2004.[124] Jimmy J. M. Tan. A necessary and sufficient condition for the existenceof a complete stable matching. J. Algorithms, 12(1):154\u2013178, January1991.[125] B Tekin, X Sun, Wang, V. Lepetit, and P. Fua. Predicting people\u2019s3d poses from short sequences. In arXiv preprint arXiv:1504.08200,2015.[126] Jean-Marc Thiery, Emilie Guy, and Tamy Boubekeur. Sphere-meshes:Shape approximation using spherical quadric error metrics. ACMTransaction on Graphics, 32(6), 2013.[127] Y. Tong, P. Alliez, D. Cohen-Steiner, and M. Desbrun. Designingquadrangulations with discrete harmonic forms. In Proceedings ofthe fourth Eurographics symposium on Geometry processing, SGP \u201906,pages 201\u2013210, Aire-la-Ville, Switzerland, Switzerland, 2006. Euro-graphics Association.[128] T. Va\u00b4rady, Alyn Rockwood, and P. Salvi. Transfinite surface interpo-lation over irregular n-sided domains. Computer-Aided Design, (iv),2011.118Bibliography[129] David L. Waltz. Generating semantic descriptions from drawings ofscenes with shadows. Technical report, Cambridge, MA, USA, 1972.[130] Yu Wang, Alec Jacobson, Jernej Barbic\u02c7, and Ladislav Kavan. Linearsubspace design for real-time shape deformation. ACM Trans. Graph.,34(4):57:1\u201357:11, July 2015.[131] Xiaolin Wei and Jinxiang Chai. Intuitive interactive human-characterposing with millions of example poses. IEEE Comput. Graph. Appl.,31(4):78\u201388, 2011.[132] Robert W. Weiner, Irving B. Healy, Alice F. Proctor. Handbook ofPsychology, Experimental Psychology (2nd Edition). 2012.[133] M. A. Wesley and G. Markowsky. Fleshing out projections. IBMJournal of Research and Development, 25(6):934\u2013954, Nov 1981.[134] B. Whited, G. Noris, M. Simmons, R. Sumner, M. Gross, andJ. Rossignac. Betweenit: An interactive tool for tight inbetweening.Comput. Graphics Forum (Proc. Eurographics), 29(2):605\u2013614, 2010.[135] Lance Williams. 3d paint. In Proceedings of the 1990 Symposiumon Interactive 3D Graphics, I3D \u201990, pages 225\u2013233, New York, NY,USA, 1990. ACM.[136] Richard Williams. The Animator\u2019s Survival Kit. Faber and Faber,2001.[137] Kwan-Yee K. Wong, Paulo R.S. Mendona, and Roberto Cipolla. Re-construction of surfaces of revolution from single uncalibrated views.Image and Vision Computing, 22(10):829 \u2013 836, 2004.[138] Baoxuan Xu, William Chang, Alla Sheffer, Adrien Bousseau, JamesMcCrae, and Karan Singh. True2form: 3d curve networks from 2dsketches via selective regularization. ACM Transactions on Graphics,33(4), 2014.[139] Genzhi Ye, Yebin Liu, Nils Hasler, Xiangyang Ji, Qionghai Dai, andChristian Theobalt. Performance capture of interacting characterswith handheld kinects. In Proc. European Conference on ComputerVision, pages 828\u2013841, 2012.119Bibliography[140] Robert C. Zeleznik, Kenneth P. Herndon, and John F. Hughes. Sketch:An interface for sketching 3d scenes. In Proceedings of the 23rd An-nual Conference on Computer Graphics and Interactive Techniques,SIGGRAPH \u201996, pages 163\u2013170, New York, NY, USA, 1996. ACM.[141] Jianmin Zhao and Norman I. Badler. Inverse kinematics position-ing using nonlinear programming for highly articulated figures. ACMTransactions on Graphics, 13(4):313\u2013336, 1994.[142] Yixin Zhuang, Ming Zou, Nathan Carr, and Tao Ju. A general andefficient method for finding cycles in 3d curve networks. ACM Trans.Graph., 32(6):180:1\u2013180:10, November 2013.[143] Ming Zou, Michelle Holloway, Nathan Carr, and Tao Ju. Topology-constrained surface reconstruction from cross-sections. ACM Trans.Graph., 34(4):128:1\u2013128:10, July 2015.120","attrs":{"lang":"en","ns":"http:\/\/www.w3.org\/2009\/08\/skos-reference\/skos.html#note","classmap":"oc:AnnotationContainer"},"iri":"http:\/\/www.w3.org\/2009\/08\/skos-reference\/skos.html#note","explain":"Simple Knowledge Organisation System; Notes are used to provide information relating to SKOS concepts. There is no restriction on the nature of this information, e.g., it could be plain text, hypertext, or an image; it could be a definition, information about the scope of a concept, editorial information, or any other type of information."}],"Genre":[{"label":"Genre","value":"Thesis\/Dissertation","attrs":{"lang":"en","ns":"http:\/\/www.europeana.eu\/schemas\/edm\/hasType","classmap":"dpla:SourceResource","property":"edm:hasType"},"iri":"http:\/\/www.europeana.eu\/schemas\/edm\/hasType","explain":"A Europeana Data Model Property; This property relates a resource with the concepts it belongs to in a suitable type system such as MIME or any thesaurus that captures categories of objects in a given field. It does NOT capture aboutness"}],"GraduationDate":[{"label":"GraduationDate","value":"2016-09","attrs":{"lang":"en","ns":"http:\/\/vivoweb.org\/ontology\/core#dateIssued","classmap":"vivo:DateTimeValue","property":"vivo:dateIssued"},"iri":"http:\/\/vivoweb.org\/ontology\/core#dateIssued","explain":"VIVO-ISF Ontology V1.6 Property; Date Optional Time Value, DateTime+Timezone Preferred "}],"IsShownAt":[{"label":"IsShownAt","value":"10.14288\/1.0308703","attrs":{"lang":"en","ns":"http:\/\/www.europeana.eu\/schemas\/edm\/isShownAt","classmap":"edm:WebResource","property":"edm:isShownAt"},"iri":"http:\/\/www.europeana.eu\/schemas\/edm\/isShownAt","explain":"A Europeana Data Model Property; An unambiguous URL reference to the digital object on the provider\u2019s website in its full information context."}],"Language":[{"label":"Language","value":"eng","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/language","classmap":"dpla:SourceResource","property":"dcterms:language"},"iri":"http:\/\/purl.org\/dc\/terms\/language","explain":"A Dublin Core Terms Property; A language of the resource.; Recommended best practice is to use a controlled vocabulary such as RFC 4646 [RFC4646]."}],"Program":[{"label":"Program","value":"Computer Science","attrs":{"lang":"en","ns":"https:\/\/open.library.ubc.ca\/terms#degreeDiscipline","classmap":"oc:ThesisDescription","property":"oc:degreeDiscipline"},"iri":"https:\/\/open.library.ubc.ca\/terms#degreeDiscipline","explain":"UBC Open Collections Metadata Components; Local Field; Indicates the program for which the degree was granted."}],"Provider":[{"label":"Provider","value":"Vancouver : University of British Columbia Library","attrs":{"lang":"en","ns":"http:\/\/www.europeana.eu\/schemas\/edm\/provider","classmap":"ore:Aggregation","property":"edm:provider"},"iri":"http:\/\/www.europeana.eu\/schemas\/edm\/provider","explain":"A Europeana Data Model Property; The name or identifier of the organization who delivers data directly to an aggregation service (e.g. Europeana)"}],"Publisher":[{"label":"Publisher","value":"University of British Columbia","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/publisher","classmap":"dpla:SourceResource","property":"dcterms:publisher"},"iri":"http:\/\/purl.org\/dc\/terms\/publisher","explain":"A Dublin Core Terms Property; An entity responsible for making the resource available.; Examples of a Publisher include a person, an organization, or a service."}],"Rights":[{"label":"Rights","value":"Attribution-ShareAlike 4.0 International","attrs":{"lang":"*","ns":"http:\/\/purl.org\/dc\/terms\/rights","classmap":"edm:WebResource","property":"dcterms:rights"},"iri":"http:\/\/purl.org\/dc\/terms\/rights","explain":"A Dublin Core Terms Property; Information about rights held in and over the resource.; Typically, rights information includes a statement about various property rights associated with the resource, including intellectual property rights."}],"RightsURI":[{"label":"RightsURI","value":"http:\/\/creativecommons.org\/licenses\/by-sa\/4.0\/","attrs":{"lang":"*","ns":"https:\/\/open.library.ubc.ca\/terms#rightsURI","classmap":"oc:PublicationDescription","property":"oc:rightsURI"},"iri":"https:\/\/open.library.ubc.ca\/terms#rightsURI","explain":"UBC Open Collections Metadata Components; Local Field; Indicates the Creative Commons license url."}],"ScholarlyLevel":[{"label":"ScholarlyLevel","value":"Graduate","attrs":{"lang":"en","ns":"https:\/\/open.library.ubc.ca\/terms#scholarLevel","classmap":"oc:PublicationDescription","property":"oc:scholarLevel"},"iri":"https:\/\/open.library.ubc.ca\/terms#scholarLevel","explain":"UBC Open Collections Metadata Components; Local Field; Identifies the scholarly level of the author(s)\/creator(s)."}],"Title":[{"label":"Title","value":"Recovering 3D shape from concept and pose drawings","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/title","classmap":"dpla:SourceResource","property":"dcterms:title"},"iri":"http:\/\/purl.org\/dc\/terms\/title","explain":"A Dublin Core Terms Property; The name given to the resource."}],"Type":[{"label":"Type","value":"Text","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/type","classmap":"dpla:SourceResource","property":"dcterms:type"},"iri":"http:\/\/purl.org\/dc\/terms\/type","explain":"A Dublin Core Terms Property; The nature or genre of the resource.; Recommended best practice is to use a controlled vocabulary such as the DCMI Type Vocabulary [DCMITYPE]. To describe the file format, physical medium, or dimensions of the resource, use the Format element."}],"URI":[{"label":"URI","value":"http:\/\/hdl.handle.net\/2429\/58914","attrs":{"lang":"en","ns":"https:\/\/open.library.ubc.ca\/terms#identifierURI","classmap":"oc:PublicationDescription","property":"oc:identifierURI"},"iri":"https:\/\/open.library.ubc.ca\/terms#identifierURI","explain":"UBC Open Collections Metadata Components; Local Field; Indicates the handle for item record."}],"SortDate":[{"label":"Sort Date","value":"2016-12-31 AD","attrs":{"lang":"en","ns":"http:\/\/purl.org\/dc\/terms\/date","classmap":"oc:InternalResource","property":"dcterms:date"},"iri":"http:\/\/purl.org\/dc\/terms\/date","explain":"A Dublin Core Elements Property; A point or period of time associated with an event in the lifecycle of the resource.; Date may be used to express temporal information at any level of granularity. Recommended best practice is to use an encoding scheme, such as the W3CDTF profile of ISO 8601 [W3CDTF].; A point or period of time associated with an event in the lifecycle of the resource.; Date may be used to express temporal information at any level of granularity. Recommended best practice is to use an encoding scheme, such as the W3CDTF profile of ISO 8601 [W3CDTF]."}]}