In [2]:
!git clone https://github.com/Puzer/stylegan-encoder.git
Cloning into 'stylegan-encoder'...
remote: Enumerating objects: 105, done.
remote: Total 105 (delta 0), reused 0 (delta 0), pack-reused 105
Receiving objects: 100% (105/105), 10.39 MiB | 5.00 MiB/s, done.
Resolving deltas: 100% (28/28), done.
In [221]:
!mkdir -p data
!wget -nc -O data/starr.jpg https://www.dropbox.com/s/oyr35cz55lry5my/starr.jpg?dl=1
--2019-08-30 16:51:53--  https://www.dropbox.com/s/oyr35cz55lry5my/starr.jpg?dl=1
Resolving www.dropbox.com (www.dropbox.com)... 162.125.80.1, 2620:100:6030:1::a27d:5001
Connecting to www.dropbox.com (www.dropbox.com)|162.125.80.1|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: /s/dl/oyr35cz55lry5my/starr.jpg [following]
--2019-08-30 16:51:54--  https://www.dropbox.com/s/dl/oyr35cz55lry5my/starr.jpg
Reusing existing connection to www.dropbox.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://ucd85b7267125ced05ab39f0372d.dl.dropboxusercontent.com/cd/0/get/AnlfxkJGg8uFL6J1PfwpATHeoQtBQpIxaxhjMIwBvxYJWGLANSTf9txsoA0sOJYlHww6BMr4hwRVWQvUZEOZPi8KwpIL82YF-P4u9kqf0s9LXzTwj7Vcg4fHLx8KDJ_R7Ek/file?dl=1# [following]
--2019-08-30 16:51:54--  https://ucd85b7267125ced05ab39f0372d.dl.dropboxusercontent.com/cd/0/get/AnlfxkJGg8uFL6J1PfwpATHeoQtBQpIxaxhjMIwBvxYJWGLANSTf9txsoA0sOJYlHww6BMr4hwRVWQvUZEOZPi8KwpIL82YF-P4u9kqf0s9LXzTwj7Vcg4fHLx8KDJ_R7Ek/file?dl=1
Resolving ucd85b7267125ced05ab39f0372d.dl.dropboxusercontent.com (ucd85b7267125ced05ab39f0372d.dl.dropboxusercontent.com)... 162.125.80.6, 2620:100:6030:6::a27d:5006
Connecting to ucd85b7267125ced05ab39f0372d.dl.dropboxusercontent.com (ucd85b7267125ced05ab39f0372d.dl.dropboxusercontent.com)|162.125.80.6|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1619585 (1.5M) [application/binary]
Saving to: ‘data/starr.jpg’

data/starr.jpg      100%[===================>]   1.54M   845KB/s    in 1.9s    

2019-08-30 16:51:56 (845 KB/s) - ‘data/starr.jpg’ saved [1619585/1619585]

In [3]:
import sys
sys.path.append('stylegan-encoder')

import os
import bz2
import dlib
import PIL.Image
import numpy as np
from keras.utils import get_file
import matplotlib.pyplot as plt
from google.colab import files

import pickle
import config
import dnnlib
import dnnlib.tflib as tflib

from tqdm.autonotebook import tqdm
import tensorflow as tf
from keras.models import Model
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing import image
import keras.backend as K

from keras.applications.vgg16 import VGG16, preprocess_input
from scipy.optimize import fmin_l_bfgs_b
import scipy
Using TensorFlow backend.
WARNING: Logging before flag parsing goes to stderr.
W0830 15:50:38.940250 140418674759552 deprecation_wrapper.py:119] From stylegan-encoder/dnnlib/tflib/tfutil.py:34: The name tf.Dimension is deprecated. Please use tf.compat.v1.Dimension instead.

W0830 15:50:38.941768 140418674759552 deprecation_wrapper.py:119] From stylegan-encoder/dnnlib/tflib/tfutil.py:74: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.

W0830 15:50:38.942647 140418674759552 deprecation_wrapper.py:119] From stylegan-encoder/dnnlib/tflib/tfutil.py:128: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.

/usr/local/lib/python3.6/dist-packages/tqdm/autonotebook/__init__.py:14: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)
  " (e.g. in jupyter console)", TqdmExperimentalWarning)
In [0]:
#@title >> SKRYTÁ IMPLEMENTÁCIA RUTÍN << { display-mode: "form" }

def unpack_bz2(src_path):
    data = bz2.BZ2File(src_path).read()
    dst_path = src_path[:-4]
    with open(dst_path, 'wb') as fp:
        fp.write(data)
    return dst_path
  
def image_align(img, face_landmarks, output_size=1024, transform_size=4096, enable_padding=True):
    # Align function from FFHQ dataset pre-processing step
    # https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py

    lm = np.array(face_landmarks)
    lm_chin          = lm[0  : 17]  # left-right
    lm_eyebrow_left  = lm[17 : 22]  # left-right
    lm_eyebrow_right = lm[22 : 27]  # left-right
    lm_nose          = lm[27 : 31]  # top-down
    lm_nostrils      = lm[31 : 36]  # top-down
    lm_eye_left      = lm[36 : 42]  # left-clockwise
    lm_eye_right     = lm[42 : 48]  # left-clockwise
    lm_mouth_outer   = lm[48 : 60]  # left-clockwise
    lm_mouth_inner   = lm[60 : 68]  # left-clockwise

    # Calculate auxiliary vectors.
    eye_left     = np.mean(lm_eye_left, axis=0)
    eye_right    = np.mean(lm_eye_right, axis=0)
    eye_avg      = (eye_left + eye_right) * 0.5
    eye_to_eye   = eye_right - eye_left
    mouth_left   = lm_mouth_outer[0]
    mouth_right  = lm_mouth_outer[6]
    mouth_avg    = (mouth_left + mouth_right) * 0.5
    eye_to_mouth = mouth_avg - eye_avg

    # Choose oriented crop rectangle.
    x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
    x /= np.hypot(*x)
    x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
    y = np.flipud(x) * [-1, 1]
    c = eye_avg + eye_to_mouth * 0.1
    quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
    qsize = np.hypot(*x) * 2

    # Shrink.
    shrink = int(np.floor(qsize / output_size * 0.5))
    if shrink > 1:
        rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
        dst_img = img.resize(rsize, PIL.Image.ANTIALIAS)
        quad /= shrink
        qsize /= shrink
    else:
        dst_img = img.copy()

    # Crop.
    border = max(int(np.rint(qsize * 0.1)), 3)
    crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))),
            int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
    crop = (max(crop[0] - border, 0),
            max(crop[1] - border, 0),
            min(crop[2] + border, dst_img.size[0]),
            min(crop[3] + border, dst_img.size[1]))
    if crop[2] - crop[0] < dst_img.size[0] or crop[3] - crop[1] < dst_img.size[1]:
        dst_img = dst_img.crop(crop)
        quad -= crop[0:2]

    # Pad.
    pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
    pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - dst_img.size[0] + border, 0), max(pad[3] - dst_img.size[1] + border, 0))
    if enable_padding and max(pad) > border - 4:
        pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
        dst_img = np.pad(np.float32(dst_img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
        h, w, _ = dst_img.shape
        y, x, _ = np.ogrid[:h, :w, :1]
        mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
        blur = qsize * 0.02
        dst_img += (scipy.ndimage.gaussian_filter(dst_img, [blur, blur, 0]) - dst_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
        dst_img += (np.median(dst_img, axis=(0,1)) - dst_img) * np.clip(mask, 0.0, 1.0)
        dst_img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(dst_img), 0, 255)), 'RGB')
        quad += pad[:2]

    # Transform.
    dst_img = dst_img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
    if output_size < transform_size:
        dst_img = dst_img.resize((output_size, output_size), PIL.Image.ANTIALIAS)

    return dst_img
  
class LandmarksDetector:
    def __init__(self, predictor_model_path):
        """
        :param predictor_model_path: path to shape_predictor_68_face_landmarks.dat file
        """
        self.detector = dlib.get_frontal_face_detector() # cnn_face_detection_model_v1 also can be used
        self.shape_predictor = dlib.shape_predictor(predictor_model_path)

    def get_landmarks(self, img):
        dets = self.detector(img, 1)

        for detection in dets:
            face_landmarks = [(item.x, item.y) for item in self.shape_predictor(img, detection).parts()]
            yield face_landmarks
            
def convert_images_loss(images):
    drange=[-1,1]
    nchw_to_nhwc=True
    shrink=1
    
    images = tf.cast(images, tf.float32)
    if shrink > 1:
        ksize = [1, 1, shrink, shrink]
        images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
    if nchw_to_nhwc:
        images = tf.transpose(images, [0, 2, 3, 1])
    scale = 255 / (drange[1] - drange[0])
    images = images * scale + (0.5 - drange[0] * scale)
    
    return images
  
def convert_images_gen(images):
    images = tf.saturate_cast(images, tf.uint8)
    return images
  
class Evaluator(object):
    def __init__(self, aligned_img, loss_grad_func, latent_shape):
        self.loss_value = None
        self.grads_values = None
        self.latent_shape = latent_shape
        
        aligned_img = np.asarray(aligned_img)
        if len(aligned_img.shape) == 4:
            self.aligned_img = aligned_img
        elif len(aligned_img.shape) == 3:
            self.aligned_img = np.expand_dims(aligned_img, 0)
        else:
            raise RuntimeError("Unsupported image shape '{}'.".format(aligned_img.shape))

        self.loss_grad_func = loss_grad_func
        
        self.eval_iter = 0

    def loss(self, latent):
        assert self.loss_value is None
        latent = latent.reshape(self.latent_shape) 
        outs = self.loss_grad_func([self.aligned_img, latent])
        self.loss_value = outs[0]
        self.grad_values = np.array(outs[1:]).flatten().astype('float64')
        
        # clip the gradients
        self.grad_values = np.maximum(np.minimum(self.grad_values, 1.0), -1.0)

        self.eval_iter += 1
        print("eval {}, loss {}".format(self.eval_iter, self.loss_value))
        
        return self.loss_value

    def grads(self, x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values

def move_and_show(dlatent, direction, coeffs):
    fig,ax = plt.subplots(1, len(coeffs), figsize=(12, 10), dpi=80)
    dlatent = dlatent.reshape(dlatent_shape)
    
    for i, coeff in enumerate(coeffs):
        new_latent_vector = dlatent.copy()
        new_latent_vector[:8] = (dlatent + coeff*direction)[:8]
        ax[i].imshow(gen_func([new_latent_vector])[0][0])
        ax[i].set_title('Coeff: %0.1f' % coeff)
    [x.axis('off') for x in ax]
    plt.show()
    
def blend_and_show(dlatent1, dlatent2, coeffs):
    fig,ax = plt.subplots(1, len(coeffs), figsize=(12, 10), dpi=80)
    dlatent1 = dlatent1.reshape(dlatent_shape)
    
    for i, coeff in enumerate(coeffs):
        new_latent_vector = coeff * dlatent1 + (1-coeff) * dlatent2
        ax[i].imshow(gen_func([new_latent_vector])[0][0])
        ax[i].set_title('Coeff: %0.1f' % coeff)
    [x.axis('off') for x in ax]
    plt.show()

Generovanie ľudských tvári pomocou StyleGAN

V notebook-u predvedieme, ako sa dajú generovať obrázky ľudských tvárí pomocou metódy StyleGAN z článku "A Style-Based Generator Architecture for Generative Adversarial Networks" od firmy NVIDIA. Oficiálna implementácia metódy sa dá nájsť v GitHub repozitári. Okrem toho používame latentné vektory a časť kódu z ďalšieho GitHub repozitára.

Stiahnutie kódu a načítanie modelu

Na začiatku si stiahneme repozitár s oficiálnou implementáciou.

Definícia parametrov a načítanie modelov

Definujeme niekoľko základných parametrov.

In [0]:
URL_FFHQ = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ'
LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'

batch_size = 1
feature_img_size = 256
aligned_img_size = 1024
vgg_layer = 9

Načítame predtrénované modely – GAN siete na generovanie obrázkov.

In [5]:
tflib.init_tf()
with dnnlib.util.open_url(URL_FFHQ, cache_dir=config.cache_dir) as f:
    generator_network, discriminator_network, Gs_network = pickle.load(f)
W0830 15:51:10.493110 140418674759552 deprecation_wrapper.py:119] From stylegan-encoder/dnnlib/tflib/tfutil.py:97: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.

W0830 15:51:10.495650 140418674759552 deprecation_wrapper.py:119] From stylegan-encoder/dnnlib/tflib/tfutil.py:109: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.

Downloading https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ .... done
W0830 15:51:24.953988 140418674759552 deprecation.py:323] From <string>:364: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where

Modely, ktorými sa zarovnajú tváre.

In [6]:
landmarks_model_path = unpack_bz2(
    get_file('shape_predictor_68_face_landmarks.dat.bz2',
    LANDMARKS_MODEL_URL, cache_subdir='temp')
)

landmarks_detector = LandmarksDetector(landmarks_model_path)
Downloading data from http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
64045056/64040097 [==============================] - 5s 0us/step

Časti modelu, ktoré sa pri generovaní tvárí používajú, si oddelíme a vytvoríme funkcie, ktoré ich umožnia použiť. Časť, ktorá mapuje pôvodný latentný vektor na štýlový latentný vektor:

In [0]:
tf_map_in = Gs_network.components.mapping.input_templates[0]
tf_map_out = Gs_network.components.mapping.output_templates[0]
map_func = K.function([tf_map_in], [tf_map_out])

Časť, ktorá zo štýlového latentného vektora generuje obrázky:

In [0]:
tf_dlatents = Gs_network.components.synthesis.input_templates[0]
tf_output = Gs_network.components.synthesis.output_templates[0]
tf_loss_img_out = convert_images_loss(tf_output)
tf_img_out = convert_images_gen(tf_loss_img_out)
gen_func = K.function([tf_dlatents], [tf_img_out])

Tvar latentného a štýlového latentného vektora si uložíme do pomocných premenných.

In [0]:
latent_shape = (1,) + K.int_shape(tf_map_in)[1:]
dlatent_shape = (1,) + K.int_shape(tf_dlatents)[1:]

Generovanie náhodných tvárí

Ďalej si vygenerujeme náhodnú tvár. Začneme vygenerovaním latentného vektora. Jeho prvky budeme brať z normálneho rozdelenia a jeho tvar bude podľa premennej latent_shape.

In [0]:
latents = np.random.randn(*latent_shape)

Štýlový latentný vektor získame tak, že na ten pôvodný aplikujeme funkciu map_func, ktorú sme vyššie definovali.

In [0]:
dlatents = map_func([latents])[0]

Ďalej môžeme dlatents použiť ako vstup pre funkciu gen_func, ktorá vygeneruje samotný obrázok.

In [0]:
img = gen_func([dlatents])[0][0]

Potom nezostáva už nič iné než si obrázok vizualizovať, prípadne uložiť do súboru.

In [13]:
plt.imshow(img)
plt.axis('off')
Out[13]:
(-0.5, 1023.5, 1023.5, -0.5)

Generovanie ďalších obrázkov si môžeme vyskúšať tu – stačí zakaždým vygenerovať iný latentný vektor.

In [15]:
fig, axes = plt.subplots(3, 4, figsize=(10, 10))

for row in axes:
    for ax in row:
        latents = np.random.randn(*latent_shape)
        dlatents = map_func([latents])[0]
        img = gen_func([dlatents])[0][0]
        ax.imshow(img)
        ax.axis('off')

Štýlový latentný vektor existujúcej tváre

GAN sieť by sa dala použiť aj na zaujímavé manipulácie s existujúcimi tvárami. Potrebovali by sme však najprv poznať latentné vektory, ktoré im zodpovedajú. StyleGAN však funguje len jednosmerne: generuje tváre z latentných vektorov, nie naopak.

Vieme však využiť obdobný princíp, aký používame pri generovaní predobrazov a protivníckych príkladov. Neurónová sieť je diferencovateľná a optimalizáciou vieme nájsť latentný vektor, ktorý bude generovať tvár čo najpodobnejšiu cieľovej tvári.

Podobnosť tvárí

Podobnosť tvárí nebudeme merať v zmysle pixelovej vzdialenosti, pretože tá dobre nevyjadruje ich skutočnú podobnosť. Namiesto toho si necháme obrázok tváre najprv predspracovať sieťou predtrénovanou na dátovej množine ImageNet. Porovnávať budeme až extrahované príznaky.

Vytvoríme si teda jednotlivé tenzory a načítame predtrénovaný model.

In [16]:
vgg16 = VGG16(include_top=False, input_shape=(feature_img_size, feature_img_size, 3))
perceptual_model = Model(vgg16.input, vgg16.layers[vgg_layer].output)

tf_img_ref = K.placeholder((1, aligned_img_size, aligned_img_size, 3))

tf_out_resized = preprocess_input(tf.image.resize_images(tf_loss_img_out,
                                  (feature_img_size, feature_img_size), method=1))
tf_out_features = perceptual_model(tf_out_resized)

tf_ref_resized = preprocess_input(tf.image.resize_images(tf_img_ref,
                                  (feature_img_size, feature_img_size), method=1))
tf_ref_features = perceptual_model(tf_ref_resized)
W0830 15:52:27.229561 140418674759552 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4267: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.

Zadefinujeme si chybovú funkciu – ako strednú kvadratickú chybu medzi príznakmi generovaného obrázku a pôvodného obrázku. Chybu si môžeme preškálovať tak, aby čísla boli z rozumnejšieho rozsahu:

In [0]:
loss = tf.losses.mean_squared_error(tf_out_features, tf_ref_features) / 85000

Ďalej určíme gradient chybovej funkcie voči štýlovému latentnému vektoru, aby sme ho neskôr mohli použiť na minimalizáciu chybovej funkcie.

In [0]:
tf_grads = K.gradients(loss, tf_dlatents)
loss_grad_func = K.function([tf_img_ref, tf_dlatents], [loss] + tf_grads)

Minimalizácia chybovej funkcie

Načítame obrázok a vykonáme minimalizáciu chybovej funkcie, aby sme našli jeho latentný vektor.

In [0]:
face_img_path = "data/starr.jpg"
In [0]:
# face_img_path = list(files.upload())[0]
In [224]:
face_img = PIL.Image.open(face_img_path)
plt.imshow(face_img)
plt.axis('off')
Out[224]:
(-0.5, 3074.5, 2049.5, -0.5)

Ďalej si tvár mierne predspracujeme. Extrahujeme kľúčové body a obrázok tváre podľa nich zarovnáme na určité preddefinované pozície.

In [225]:
face_landmarks = next(landmarks_detector.get_landmarks(np.asarray(face_img)))
aligned_img = image_align(face_img, face_landmarks, output_size=aligned_img_size)
plt.imshow(aligned_img)
plt.axis('off')
Out[225]:
(-0.5, 1023.5, 1023.5, -0.5)

Začneme optimalizáciu z nulového latentného vektora pomocou metódy LBFGS:

In [0]:
evaluator = Evaluator(aligned_img, loss_grad_func, dlatent_shape)
latent_init = np.zeros(latent_shape)
dlatent = map_func([latent_init])[0]
In [227]:
dlatent, min_val, info = fmin_l_bfgs_b(evaluator.loss, dlatent.flatten(),
     fprime=evaluator.grads, maxfun=400, disp=1)
eval 1, loss 3.872175455093384
eval 2, loss 3.0125572681427
eval 3, loss 2.5562267303466797
eval 4, loss 2.243816614151001
eval 5, loss 2.1983180046081543
eval 6, loss 2.1722562313079834
eval 7, loss 2.1465823650360107
eval 8, loss 2.1392977237701416
eval 9, loss 2.122971773147583
eval 10, loss 2.1013152599334717
eval 11, loss 2.0757861137390137
eval 12, loss 2.063170909881592
eval 13, loss 2.033087968826294
eval 14, loss 1.9949768781661987
eval 15, loss 1.9702361822128296
eval 16, loss 1.9386131763458252
eval 17, loss 1.9359647035598755
eval 18, loss 1.886164665222168
eval 19, loss 1.8713849782943726
eval 20, loss 1.845493197441101
eval 21, loss 2.9995973110198975
eval 22, loss 1.8253344297409058
eval 23, loss 1.9185506105422974
eval 24, loss 1.8218966722488403
eval 25, loss 1.7910929918289185
eval 26, loss 1.8062055110931396
eval 27, loss 1.7628881931304932
eval 28, loss 1.8002257347106934
eval 29, loss 1.7624831199645996
eval 30, loss 1.746901512145996
eval 31, loss 1.789322853088379
eval 32, loss 1.727484941482544
eval 33, loss 2.1626532077789307
eval 34, loss 1.7346842288970947
eval 35, loss 1.7256182432174683
eval 36, loss 1.716592788696289
eval 37, loss 1.6825536489486694
eval 38, loss 1.672863245010376
eval 39, loss 1.6597580909729004
eval 40, loss 1.6457439661026
eval 41, loss 1.617003083229065
eval 42, loss 1.634903907775879
eval 43, loss 1.6135088205337524
eval 44, loss 1.6033084392547607
eval 45, loss 1.5916823148727417
eval 46, loss 1.5890175104141235
eval 47, loss 1.5660183429718018
eval 48, loss 1.562195062637329
eval 49, loss 1.5514636039733887
eval 50, loss 1.550924301147461
eval 51, loss 1.5445278882980347
eval 52, loss 1.5284154415130615
eval 53, loss 1.539220929145813
eval 54, loss 1.5165945291519165
eval 55, loss 1.5038076639175415
eval 56, loss 1.49579656124115
eval 57, loss 1.5015281438827515
eval 58, loss 1.4835647344589233
eval 59, loss 1.4702389240264893
eval 60, loss 1.464280366897583
eval 61, loss 1.4510525465011597
eval 62, loss 1.4407048225402832
eval 63, loss 1.4313472509384155
eval 64, loss 1.412169337272644
eval 65, loss 1.4010980129241943
eval 66, loss 1.3819955587387085
eval 67, loss 1.3659712076187134
eval 68, loss 1.3463634252548218
eval 69, loss 1.3287553787231445
eval 70, loss 1.3130507469177246
eval 71, loss 1.2921096086502075
eval 72, loss 1.2959121465682983
eval 73, loss 1.2846438884735107
eval 74, loss 1.2717336416244507
eval 75, loss 1.248328447341919
eval 76, loss 1.2373219728469849
eval 77, loss 1.2283027172088623
eval 78, loss 1.2282317876815796
eval 79, loss 1.2173192501068115
eval 80, loss 1.2066850662231445
eval 81, loss 1.200470209121704
eval 82, loss 1.2050234079360962
eval 83, loss 1.1948740482330322
eval 84, loss 1.1857070922851562
eval 85, loss 1.1782597303390503
eval 86, loss 1.1743229627609253
eval 87, loss 1.1688791513442993
eval 88, loss 1.1595991849899292
eval 89, loss 1.151140809059143
eval 90, loss 1.1467794179916382
eval 91, loss 1.1390612125396729
eval 92, loss 1.1319180727005005
eval 93, loss 1.1232352256774902
eval 94, loss 1.1127039194107056
eval 95, loss 1.104974389076233
eval 96, loss 1.0984495878219604
eval 97, loss 1.0956687927246094
eval 98, loss 1.0927563905715942
eval 99, loss 1.0870254039764404
eval 100, loss 1.0860671997070312
eval 101, loss 1.0708942413330078
eval 102, loss 1.0674344301223755
eval 103, loss 1.0599557161331177
eval 104, loss 1.0513578653335571
eval 105, loss 1.0486525297164917
eval 106, loss 1.0379443168640137
eval 107, loss 1.03311288356781
eval 108, loss 1.0280941724777222
eval 109, loss 1.0255906581878662
eval 110, loss 1.0208406448364258
eval 111, loss 1.013746738433838
eval 112, loss 1.009687066078186
eval 113, loss 1.001966953277588
eval 114, loss 0.9969324469566345
eval 115, loss 0.9929214119911194
eval 116, loss 0.9882003664970398
eval 117, loss 0.9846997261047363
eval 118, loss 0.9799337387084961
eval 119, loss 0.9781701564788818
eval 120, loss 0.9751098155975342
eval 121, loss 0.9720279574394226
eval 122, loss 0.9662854671478271
eval 123, loss 0.9634777307510376
eval 124, loss 0.9603421092033386
eval 125, loss 0.9559329748153687
eval 126, loss 0.951575756072998
eval 127, loss 0.9534199237823486
eval 128, loss 0.9497725963592529
eval 129, loss 0.9466044306755066
eval 130, loss 0.9428498148918152
eval 131, loss 0.939295768737793
eval 132, loss 0.9343875646591187
eval 133, loss 0.9300364851951599
eval 134, loss 0.9248130321502686
eval 135, loss 0.9215850234031677
eval 136, loss 0.9188866019248962
eval 137, loss 0.9163172841072083
eval 138, loss 0.9131673574447632
eval 139, loss 0.9117192029953003
eval 140, loss 0.9102048873901367
eval 141, loss 0.9058131575584412
eval 142, loss 0.9032158255577087
eval 143, loss 0.898719847202301
eval 144, loss 0.8931148052215576
eval 145, loss 0.8884377479553223
eval 146, loss 0.8852185606956482
eval 147, loss 0.882132351398468
eval 148, loss 0.8791991472244263
eval 149, loss 0.8752710223197937
eval 150, loss 0.8696550726890564
eval 151, loss 0.869009256362915
eval 152, loss 0.866431713104248
eval 153, loss 0.870154857635498
eval 154, loss 0.8655417561531067
eval 155, loss 0.8645567297935486
eval 156, loss 0.8604468703269958
eval 157, loss 0.8570272922515869
eval 158, loss 0.8532870411872864
eval 159, loss 0.8508718609809875
eval 160, loss 0.8486485481262207
eval 161, loss 0.8463671207427979
eval 162, loss 0.8433049917221069
eval 163, loss 0.8400793075561523
eval 164, loss 0.8358263969421387
eval 165, loss 0.8328272700309753
eval 166, loss 0.8304656147956848
eval 167, loss 0.8293054103851318
eval 168, loss 0.8278719782829285
eval 169, loss 0.8258415460586548
eval 170, loss 0.8245134353637695
eval 171, loss 0.8233104944229126
eval 172, loss 0.8211360573768616
eval 173, loss 0.8165016770362854
eval 174, loss 0.8134431838989258
eval 175, loss 0.8115397095680237
eval 176, loss 0.8093110918998718
eval 177, loss 0.8071876168251038
eval 178, loss 0.8045438528060913
eval 179, loss 0.8023340106010437
eval 180, loss 0.7996381521224976
eval 181, loss 0.7982217073440552
eval 182, loss 0.7964407801628113
eval 183, loss 0.7942122220993042
eval 184, loss 0.792283296585083
eval 185, loss 0.7900277376174927
eval 186, loss 0.7871758937835693
eval 187, loss 0.7853615880012512
eval 188, loss 0.7841498255729675
eval 189, loss 0.7820619344711304
eval 190, loss 0.7791020274162292
eval 191, loss 0.7770072817802429
eval 192, loss 0.7762289643287659
eval 193, loss 0.7742867469787598
eval 194, loss 0.7720877528190613
eval 195, loss 0.7698838114738464
eval 196, loss 0.7685555219650269
eval 197, loss 0.7672102451324463
eval 198, loss 0.7663626670837402
eval 199, loss 0.7648781538009644
eval 200, loss 0.763992428779602
eval 201, loss 0.7632360458374023
eval 202, loss 0.7613664269447327
eval 203, loss 0.7596086859703064
eval 204, loss 0.7565758228302002
eval 205, loss 0.7548362612724304
eval 206, loss 0.753596305847168
eval 207, loss 0.7522847056388855
eval 208, loss 0.750353217124939
eval 209, loss 0.7483586668968201
eval 210, loss 0.7461926937103271
eval 211, loss 0.7463271617889404
eval 212, loss 0.7455090284347534
eval 213, loss 0.7447689175605774
eval 214, loss 0.74236661195755
eval 215, loss 0.7405077815055847
eval 216, loss 0.7388811707496643
eval 217, loss 0.7374368906021118
eval 218, loss 0.7357988953590393
eval 219, loss 0.7341362833976746
eval 220, loss 0.7314481735229492
eval 221, loss 0.7293265461921692
eval 222, loss 0.7276479601860046
eval 223, loss 0.7253755331039429
eval 224, loss 0.7238327860832214
eval 225, loss 0.7227761149406433
eval 226, loss 0.7204046845436096
eval 227, loss 0.7181450128555298
eval 228, loss 0.7196106910705566
eval 229, loss 0.7170634269714355
eval 230, loss 0.7160297632217407
eval 231, loss 0.7147826552391052
eval 232, loss 0.7135891914367676
eval 233, loss 0.7122662663459778
eval 234, loss 0.7103750705718994
eval 235, loss 0.7090418934822083
eval 236, loss 0.7067644596099854
eval 237, loss 0.7046753168106079
eval 238, loss 0.7029799818992615
eval 239, loss 0.7004870176315308
eval 240, loss 0.7047842741012573
eval 241, loss 0.6997848153114319
eval 242, loss 0.6988236904144287
eval 243, loss 0.6979946494102478
eval 244, loss 0.6960986852645874
eval 245, loss 0.6953908801078796
eval 246, loss 0.6930070519447327
eval 247, loss 0.6923381090164185
eval 248, loss 0.6916384696960449
eval 249, loss 0.6903101801872253
eval 250, loss 0.6875209808349609
eval 251, loss 0.6857730746269226
eval 252, loss 0.6843472123146057
eval 253, loss 0.6832779049873352
eval 254, loss 0.6820927262306213
eval 255, loss 0.6822793483734131
eval 256, loss 0.6813715696334839
eval 257, loss 0.6799541115760803
eval 258, loss 0.6785489916801453
eval 259, loss 0.6775917410850525
eval 260, loss 0.6760972738265991
eval 261, loss 0.6748998761177063
eval 262, loss 0.6770661473274231
eval 263, loss 0.673430323600769
eval 264, loss 0.6723194122314453
eval 265, loss 0.6715410351753235
eval 266, loss 0.6699787974357605
eval 267, loss 0.6683990359306335
eval 268, loss 0.6688071489334106
eval 269, loss 0.6672735810279846
eval 270, loss 0.666031539440155
eval 271, loss 0.664588987827301
eval 272, loss 0.6633111834526062
eval 273, loss 0.6609271764755249
eval 274, loss 0.6588882207870483
eval 275, loss 0.6570149064064026
eval 276, loss 0.6556275486946106
eval 277, loss 0.6541233658790588
eval 278, loss 0.6528472304344177
eval 279, loss 0.6518955230712891
eval 280, loss 0.6506788730621338
eval 281, loss 0.649089515209198
eval 282, loss 0.6483870148658752
eval 283, loss 0.6475030779838562
eval 284, loss 0.6457538604736328
eval 285, loss 0.6442957520484924
eval 286, loss 0.6433860063552856
eval 287, loss 0.641841471195221
eval 288, loss 0.6411774158477783
eval 289, loss 0.6400284767150879
eval 290, loss 0.6394098997116089
eval 291, loss 0.6388410925865173
eval 292, loss 0.637876033782959
eval 293, loss 0.6368944048881531
eval 294, loss 0.6357610821723938
eval 295, loss 0.6348499059677124
eval 296, loss 0.634257972240448
eval 297, loss 0.6333927512168884
eval 298, loss 0.6328461766242981
eval 299, loss 0.6314125657081604
eval 300, loss 0.630212664604187
eval 301, loss 0.6291467547416687
eval 302, loss 0.6278651356697083
eval 303, loss 0.6271102428436279
eval 304, loss 0.6259462237358093
eval 305, loss 0.6240423917770386
eval 306, loss 0.6230619549751282
eval 307, loss 0.6219618320465088
eval 308, loss 0.6205244660377502
eval 309, loss 0.6192265748977661
eval 310, loss 0.6178379058837891
eval 311, loss 0.616770327091217
eval 312, loss 0.615837574005127
eval 313, loss 0.6147909760475159
eval 314, loss 0.6139355301856995
eval 315, loss 0.6135615110397339
eval 316, loss 0.6121669411659241
eval 317, loss 0.6105650067329407
eval 318, loss 0.608662486076355
eval 319, loss 0.6072702407836914
eval 320, loss 0.6052009463310242
eval 321, loss 0.6045450568199158
eval 322, loss 0.6025871634483337
eval 323, loss 0.6014084815979004
eval 324, loss 0.6008326411247253
eval 325, loss 0.5997905135154724
eval 326, loss 0.5993286371231079
eval 327, loss 0.5984798669815063
eval 328, loss 0.5974148511886597
eval 329, loss 0.5964711904525757
eval 330, loss 0.594855785369873
eval 331, loss 0.5939688086509705
eval 332, loss 0.5929673910140991
eval 333, loss 0.5909979939460754
eval 334, loss 0.589803159236908
eval 335, loss 0.5884538888931274
eval 336, loss 0.587634265422821
eval 337, loss 0.5871477723121643
eval 338, loss 0.5867458581924438
eval 339, loss 0.586233913898468
eval 340, loss 0.5854668617248535
eval 341, loss 0.584229588508606
eval 342, loss 0.5825238227844238
eval 343, loss 0.5817247629165649
eval 344, loss 0.580909252166748
eval 345, loss 0.5788203477859497
eval 346, loss 0.577423632144928
eval 347, loss 0.5763955116271973
eval 348, loss 0.5755192637443542
eval 349, loss 0.5747732520103455
eval 350, loss 0.5738762617111206
eval 351, loss 0.5729372501373291
eval 352, loss 0.5722852945327759
eval 353, loss 0.5708370208740234
eval 354, loss 0.5703375935554504
eval 355, loss 0.5683645009994507
eval 356, loss 0.5676060914993286
eval 357, loss 0.566705048084259
eval 358, loss 0.564680278301239
eval 359, loss 0.5659130811691284
eval 360, loss 0.5640592575073242
eval 361, loss 0.5635150074958801
eval 362, loss 0.562497079372406
eval 363, loss 0.5618245601654053
eval 364, loss 0.5612006783485413
eval 365, loss 0.560543417930603
eval 366, loss 0.5598954558372498
eval 367, loss 0.5590693950653076
eval 368, loss 0.5585567355155945
eval 369, loss 0.5580882430076599
eval 370, loss 0.55707848072052
eval 371, loss 0.5563532114028931
eval 372, loss 0.5552704334259033
eval 373, loss 0.5546892285346985
eval 374, loss 0.5540485978126526
eval 375, loss 0.5531027317047119
eval 376, loss 0.5521500110626221
eval 377, loss 0.551362931728363
eval 378, loss 0.550322949886322
eval 379, loss 0.5492898225784302
eval 380, loss 0.5480936765670776
eval 381, loss 0.5474768280982971
eval 382, loss 0.5464456677436829
eval 383, loss 0.5458250045776367
eval 384, loss 0.5453962087631226
eval 385, loss 0.5447714924812317
eval 386, loss 0.5439521074295044
eval 387, loss 0.5432000160217285
eval 388, loss 0.5424246191978455
eval 389, loss 0.5418981909751892
eval 390, loss 0.5408112406730652
eval 391, loss 0.5396261215209961
eval 392, loss 0.5381608605384827
eval 393, loss 0.536891520023346
eval 394, loss 0.5357316136360168
eval 395, loss 0.5353119969367981
eval 396, loss 0.5342859625816345
eval 397, loss 0.5337342619895935
eval 398, loss 0.5333878993988037
eval 399, loss 0.5332449674606323
eval 400, loss 0.532051682472229
eval 401, loss 0.5315852165222168

Generovanie obrázka

Po minimalizácii chybovej funkcie získame štýlový latentný vektor, ktorý približne zodpovedá pôvodnému obrázku. Keď na základe neho vygenerujeme novú tvár, mala by sa podobať pôvodnej tvári.

In [228]:
img = gen_func([dlatent.reshape((dlatent_shape))])[0][0]
plt.imshow(img)
plt.axis('off')
Out[228]:
(-0.5, 1023.5, 1023.5, -0.5)

Manipulácia latentného vektora

Latentný vektor fotografie je možné ďalej modifikovať. Podobne ako pri rôznych iných typoch GAN a embedovaní, niektoré aritmetické operácie s vektormi nesú sémantický význam. Je napríklad možné identifikovať vektor, ktorý približne zodpovedá úsmevu, veku, pohlaviu a pod. Niekoľko takýchto vektorov si načítame:

In [0]:
smile_direction = np.load('stylegan-encoder/ffhq_dataset/latent_directions/smile.npy')
gender_direction = np.load('stylegan-encoder/ffhq_dataset/latent_directions/gender.npy')
age_direction = np.load('stylegan-encoder/ffhq_dataset/latent_directions/age.npy')

Aplikácia úsmevu:

In [230]:
move_and_show(dlatent.reshape((dlatent_shape)), smile_direction, [-1, 0, 1])

Zmena pohlavia:

In [254]:
move_and_show(dlatent.reshape((dlatent_shape)), gender_direction, [-1.5, 0, 2])

Zmena veku:

In [240]:
move_and_show(dlatent.reshape((dlatent_shape)), age_direction, [-2, 0, 1.5])

Zmiešavanie štýlov

Alternatívne vieme zmiešať štýly z viacerých obrázkov.

In [241]:
latent2 = np.random.RandomState(1855).randn(*latent_shape)
dlatent2 = map_func([latent2])[0]
img2 = gen_func([dlatent2])[0][0]
plt.imshow(img2)
plt.axis('off')
Out[241]:
(-0.5, 1023.5, 1023.5, -0.5)
In [0]:
style_range = [0, 1, 2, 3, 4, 5, 6]
dlatent3 = dlatent2.copy()
dlatent3[:, style_range] = dlatent.reshape(dlatent_shape)[:, style_range]
In [243]:
img2 = gen_func([dlatent3])[0][0]
plt.imshow(img2)
plt.axis('off')
Out[243]:
(-0.5, 1023.5, 1023.5, -0.5)
In [244]:
blend_and_show(dlatent.reshape(dlatent_shape), dlatent2, [0, 0.25, 0.5, 0.75, 1])

Aritmetika

In [0]:
def find_mean_dlatent(seeds):
    mean_dlatent = np.zeros(dlatent_shape)
    
    for s in seeds:
        h_latent = np.random.RandomState(s).randn(*latent_shape)
        h_dlatent = map_func([h_latent])[0]

        mean_dlatent += h_dlatent / len(seeds)
        
    return mean_dlatent
In [0]:
female_long_hair = [517, 519, 521, 523, 525, 528, 529, 538, 539, 540, 618, 642, 655]
female_short_hair = [537, 546, 561, 597, 599, 602, 610, 616, 627, 637, 652]
In [0]:
# male_long_hair = [629]
# male_short_hair = [535, 536, 549, 558, 559, 560]
In [0]:
long_hair_dlatent = find_mean_dlatent(female_long_hair)
short_hair_dlatent = find_mean_dlatent(female_short_hair)
In [252]:
dlatent2 = dlatent.reshape(dlatent_shape) + 0.5 * (short_hair_dlatent - long_hair_dlatent)
img2 = gen_func([dlatent2])[0][0]
plt.imshow(img2)
plt.axis('off')
Out[252]:
(-0.5, 1023.5, 1023.5, -0.5)
In [0]: