@InProceedings{pmlr-v95-huang18a, title = {Cartoon-to-Photo Facial Translation with Generative Adversarial Networks}, author = {Huang, Junhong and Tan, Mingkui and Yan, Yuguang and Qing, Chunmei and Wu, Qingyao and Yu, Zhuliang}, booktitle = {Proceedings of The 10th Asian Conference on Machine Learning}, pages = {566--581}, year = {2018}, editor = {Zhu, Jun and Takeuchi, Ichiro}, volume = {95}, series = {Proceedings of Machine Learning Research}, address = {}, month = {14--16 Nov}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v95/huang18a/huang18a.pdf}, url = {http://proceedings.mlr.press/v95/huang18a.html}, abstract = {Cartoon-to-photo facial translation could be widely used in different applications, such as law enforcement and anime remaking. Nevertheless, current general-purpose image-to-image models \ygyan{usually} \%can only produce blurry or unrelated results in this task. In this paper, we propose a Cartoon-to-Photo facial translation with Generative Adversarial Networks (\name) for inverting cartoon faces to generate photo-realistic and related face images. In order to produce convincing faces with intact facial parts, we exploit global and local discriminators to capture global facial features and three local facial regions, respectively. Moreover, we use a specific content network to capture and preserve face characteristic and identity between cartoons and photos. As a result, the proposed approach can generate convincing high-quality faces that satisfy both the characteristic and identity constraints of input cartoon faces. Compared with recent works on unpaired image-to-image translation, our proposed method is able to generate more realistic and correlative images.} }