Autoencoder

Einen Autoencoder könnte man sich entfernt wie die Identität-Funktion vorstellen:

f(x) = x + \epsilon mit x, \epsilon \in R^{n} und \left\Vert\epsilon\right\Vert \rightarrow 0

Das ist aber schon die einzige Ähnlichkeit zur Id-Abbildung.

Ein Autoencoder besteht im wesentlichen aus zwei Teilen, dem encoder und dem decoder die in der Kombination den Autoencoder ergeben.

autoencoder(x) = decode(encode(x))

Encoder

Ein n-Dimensionaler Vektor (x\in R^{n}) wird in einen m-Dimensionalen Vektor (x\in R^{m}) transformiert, wobei m deutlich kleiner ist als n. Das kann in einem oder wie im nächsten Beispiel angedeutet in mehreren Schritten (deep) erfolgen.

input = Input(shape=(784,)) # Dimension des Inputs ist 28 x 28 = 784
x = Dense(256, activation='relu')(input)
x = Dense(32, activation='relu')(x)
encode = Dense(2, activation='relu')(x) # Reduktion auf 2 Dimensionen

Decoder

Die Kunst liegt darin, selbst aus einem im Extremfall m=2 auf 2 Float-Werte reduziertem Bild, das Original zu rekonstruieren.

x = Dense(32, activation = 'relu')(encode)
x = Dense(256, activation = 'relu')(x)
decode = Dense(784, activation = 'sigmoid')(x)

Autoencoder

Die Zusammenführung der beiden Komponenten ergibt den Autoencoder.

autoencoder = Model(input, decode)

Deep Autoencoder

#!/usr/bin/env python

import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # TF-Infos unterdrücken

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers

# Groesse kodierter Bilder
encoding_dim=32

# Input-Shape ist 1x784
input_img=keras.Input(shape=(784,))

# Encode 
x = layers.Dense(128, activation='relu')(input_img)
x = layers.Dense(64, activation='relu')(x)
encode = layers.Dense(32, activation='relu')(x)

# Decode
x = layers.Dense(64, activation = 'relu')(encode)
x = layers.Dense(128, activation = 'relu')(x)
decode = layers.Dense(784, activation = 'sigmoid')(x)

# Autoencoder
autoencoder = keras.Model(input_img, decode)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

# MNIST-Daten holen und aufbereiten
from tensorflow.keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train/255.
x_test = x_test/255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

# Autoencoder Trainieren
autoencoder.fit(x_train, x_train,
                epochs=50, # wg. regulierer ist overfitting unwahrscheinicher und man kann laenger trainieren
                batch_size=512,
                shuffle=True,
                validation_data=(x_test, x_test))

# Auf die Test-Images anwenden 
decoded_imgs = autoencoder.predict(x_test)

import matplotlib.pyplot as plt

n = 10 # Anzahl der Bilder
plt.figure(figsize=(20, 4))
for i in range(1, n+1):
    # Anzeige Original
    ax = plt.subplot(2, n, i)
    plt.imshow(x_test[i].reshape(28,28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    # Anzeige Rekonstruktion
    ax = plt.subplot(2, n, i+n)
    plt.imshow(decoded_imgs[i].reshape(28,28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

Convolutional Autoencoder

#!/usr/bin/env python

import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers

input_img=keras.Input(shape=(28, 28, 1))

# Encode 
x = layers.Conv2D(16, (3,3), activation='relu', padding='same')(input_img)
x = layers.MaxPooling2D((2,2), padding='same')(x)
x = layers.Conv2D(8, (3,3), activation='relu', padding='same')(x)
x = layers.MaxPooling2D((2,2), padding='same')(x)
x = layers.Conv2D(8, (3,3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2,2), padding='same')(x)

# Decode
x = layers.Conv2D(8, (3,3), activation='relu', padding='same')(encoded)
x = layers.UpSampling2D((2,2))(x)
x = layers.Conv2D(8, (3,3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2,2))(x)
x = layers.Conv2D(16, (3,3), activation='relu')(x)
x = layers.UpSampling2D((2,2))(x)
decode = layers.Conv2D(1, (3,3), activation = 'sigmoid', padding='same')(x)

# Autoencoder
autoencoder = keras.Model(input_img, decode)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

# MNIST-Daten holen und aufbereiten
from tensorflow.keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train/255.
x_test = x_test/255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))

# Autoencoder Trainieren
from tensorflow.keras.callbacks import TensorBoard
autoencoder.fit(x_train, x_train,
                epochs=50,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test, x_test))

# Auf Test-Images anwenden und pruefen
decoded_imgs = autoencoder.predict(x_test)

import matplotlib.pyplot as plt

n = 10 # Anzahl der Bilder
plt.figure(figsize=(20, 4))
for i in range(1, n+1):
    # Anzeige Original
    ax = plt.subplot(2, n, i)
    plt.imshow(x_test[i].reshape(28,28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    # Anzeige Rekonstruktion
    ax = plt.subplot(2, n, i+n)
    plt.imshow(decoded_imgs[i].reshape(28,28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

Denoising Autoencoder

#!/usr/bin/env python

import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers

# MNIST-Daten holen und aufbereiten
from tensorflow.keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train/255.
x_test = x_test/255. 
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))

# Bring etwas Rauschen in die Bilder
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)

y_train_noisy = np.clip(x_train_noisy, 0., 1.)
y_test_noisy = np.clip(x_test_noisy, 0., 1.)

# Und zeige die verrauschten Bilder
import matplotlib.pyplot as plt

n = 10
plt.figure(figsize=(20, 2))
for i in range(1, n + 1):
    ax = plt.subplot(1, n, i)
    plt.imshow(x_test_noisy[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

input_img=keras.Input(shape=(28, 28, 1))

# Encode 
x = layers.Conv2D(32, (3,3), activation='relu', padding='same')(input_img)
x = layers.MaxPooling2D((2,2), padding='same')(x)
x = layers.Conv2D(32, (3,3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2,2), padding='same')(x)

# Decode
x = layers.Conv2D(32, (3,3), activation='relu', padding='same')(encoded)
x = layers.UpSampling2D((2,2))(x)
x = layers.Conv2D(32, (3,3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2,2))(x)
decode = layers.Conv2D(1, (3,3), activation = 'sigmoid', padding='same')(x)

# Autoencoder
autoencoder = keras.Model(input_img, decode)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

## Start Tensorboard in Konsole:
## tensorboard --logdir=/tmp/autoencoder --port=6006

# Autoencoder Trainieren
autoencoder.fit(x_train_noisy, x_train,
                epochs=100,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test_noisy, x_test))

# Auf Test-Images anwenden und pruefen
decoded_imgs = autoencoder.predict(x_test_noisy)

n = 10 # Anzahl der Bilder
plt.figure(figsize=(20, 4))
for i in range(1, n+1):
    # Anzeige Original
    ax = plt.subplot(2, n, i)
    plt.imshow(x_test_noisy[i].reshape(28,28))
    plt.gray()  
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    # Anzeige Rekonstruktion
    ax = plt.subplot(2, n, i+n)
    plt.imshow(decoded_imgs[i].reshape(28,28))
    plt.gray() 
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

Variational Autoencoder – VAE

#!/usr/bin/env python

# Siehe: https://github.com/keras-team/keras/blob/master/examples/variational_autoencoder.py

# Falls es schon mal gelaufen ist aufrufen mit:
# 7.var* --weight vae_mlp_mnist.h5

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import sys

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Lambda, Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.datasets import mnist
from tensorflow.keras.losses import mse, binary_crossentropy
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K

import numpy as np
import matplotlib.pyplot as plt
import argparse

# Reparameterization trick
# instead of sampling from Q(z|X),
# sample epsilon = N(0,I)
# return z_mean + sqrt(var) * epsilon
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon


def plot_results(models, # [ encoder, decoder ]
                 data,   # [ data, label ]
                 batch_size=128,
                 model_name="vae_mnist"): # Verzeichnis-name

    encoder, decoder = models
    x_test, y_test = data
    # os.makedirs(model_name, exist_ok=True)

    # filename = os.path.join(model_name, "vae_mean.png")
    z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size)

    plt.figure(figsize=(12, 10))
    plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)
    plt.xlabel("z[0]")
    plt.ylabel("z[1]")
    #plt.savefig(filename)
    #plt.show()

    #filename = os.path.join(model_name, "digits_over_latent.png")
    n = 50
    image_size = 28
    figure = np.zeros((image_size * n, image_size * n))

    grid_x = np.linspace(-4, 4, n) # Liste von -4 bis 4 in n Schritten
    grid_y = np.linspace(-4, 4, n)[::-1] # das gleiche

    for i, yi in enumerate(grid_y):
        for j, xi in enumerate(grid_x):
            z = np.array([[xi, yi]])
            x_decoded = decoder.predict(z)
            image = x_decoded[0].reshape(image_size, image_size)
            figure[i * image_size: (i + 1) * image_size,
                   j * image_size: (j + 1) * image_size] = image

    plt.figure(figsize=(10, 10))
    start_range = image_size // 2
    end_range = (n - 1) * image_size + start_range + 1
    pixel_range = np.arange(start_range, end_range, image_size)
    sample_range_x = np.round(grid_x, 1)
    sample_range_y = np.round(grid_y, 1)
    plt.xticks(pixel_range, sample_range_x)
    plt.yticks(pixel_range, sample_range_y)
    plt.xlabel("z[0]")
    plt.ylabel("z[1]")
    plt.imshow(figure, cmap='Greys_r')
    #plt.savefig(filename)
    plt.show()

#----------------------------
# MAIN
#----------------------------
# MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()

image_size = x_train.shape[1]
original_dim = image_size * image_size
x_train = np.reshape(x_train, [-1, original_dim])
x_test = np.reshape(x_test, [-1, original_dim])
x_train = x_train / 255.
x_test = x_test / 255.

# network parameters
input_shape = (original_dim, )
intermediate_dim = 512
batch_size = 128
latent_dim = 2
epochs = 50

# VAE model = encoder + decoder

# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x         = Dense(intermediate_dim, activation='relu')(inputs)
z_mean    = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)

# use reparameterization trick to push the sampling out as input
z = Lambda(sampling, name='z')([z_mean, z_log_var])

# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)

# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(intermediate_dim, activation='relu')(latent_inputs)
outputs = Dense(original_dim, activation='sigmoid')(x)

# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)

# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-w", "--weights", help="Load h5 model trained weights")
    parser.add_argument("-m", "--mse", help="Use mse loss instead of binary cross entropy (default)", action='store_true')
    args = parser.parse_args()
    models = (encoder, decoder)
    data = (x_test, y_test)

    # VAE loss = mse_loss or xent_loss + kl_loss
    if args.mse:
        reconstruction_loss = mse(inputs, outputs)
    else:
        reconstruction_loss = binary_crossentropy(inputs, outputs)

    reconstruction_loss *= original_dim
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer='adam')

    if args.weights:
        vae.load_weights(args.weights)
    else:
        # train the autoencoder
        vae.fit(x_train,
                epochs=epochs,
                batch_size=batch_size,
                validation_data=(x_test, None))
        vae.save_weights('vae_mlp_mnist.h5')

    plot_results(models,
                 data,
                 batch_size=batch_size,
                 model_name="vae_mlp")

Series Navigation<< Transformer