Generative Adversarial Networks (GAN)
Table of Contents
%%html
<center><iframe src="https://www.youtube.com/embed/d3DGaIoRRNo?rel=0"
width="560" height="315" frameborder="0" allowfullscreen></iframe></center>
%%html
<center><iframe src="https://www.youtube.com/embed/9IItTSYOfqM?rel=0"
width="560" height="315" frameborder="0" allowfullscreen></iframe></center>
%%html
<center><iframe src="https://www.youtube.com/embed/YzZpWwJlesE?rel=0"
width="560" height="315" frameborder="0" allowfullscreen></iframe></center>
%%html
<center><iframe src="https://www.youtube.com/embed/H_F28cy2wNs?rel=0"
width="560" height="315" frameborder="0" allowfullscreen></iframe></center>
If $P_{\text{model}}(x)$ can be estimated as close to $P_{\text{data}}(x)$, then data can be generated by sampling from $P_{\text{model}}(x)$.
In generative modeling, we'd like to train a network that models a distribution, such as a distribution over images.
GANs do not work with any explicit density function !
Instead, take game-theoretic approach
One way to judge the quality of the model is to sample from it.
Model to produce samples which are indistinguishable from the real data, as judged by a discriminator network whose job is to tell real from fake
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
mnist = tf.keras.datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x, test_x = train_x/255.0, test_x/255.0
train_x = train_x.reshape(-1, 784)
test_x = test_x.reshape(-1, 784)
print('train_x: ', train_x.shape)
print('test_x: ', test_x.shape)
generator = tf.keras.models.Sequential([
tf.keras.layers.Dense(units = 256, input_dim = 100, activation = 'relu'),
tf.keras.layers.Dense(units = 784, activation = 'sigmoid')
])
discriminator = tf.keras.models.Sequential([
tf.keras.layers.Dense(units = 256, input_dim = 784, activation = 'relu'),
tf.keras.layers.Dense(units = 1, activation = 'sigmoid'),
])
discriminator.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 0.0001),
loss = 'binary_crossentropy')
combined_input = tf.keras.layers.Input(shape = (100,))
generated = generator(combined_input)
discriminator.trainable = False
combined_output = discriminator(generated)
combined = tf.keras.models.Model(inputs = combined_input, outputs = combined_output)
combined.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 0.0002),
loss = 'binary_crossentropy')
def make_noise(samples):
return np.random.normal(0, 1, [samples, 100])
def plot_generated_images(generator, samples = 3):
noise = make_noise(samples)
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(samples, 28, 28)
for i in range(samples):
plt.subplot(1, samples, i+1)
plt.imshow(generated_images[i], 'gray', interpolation = 'nearest')
plt.axis('off')
plt.tight_layout()
plt.show()
n_iter = 20000
batch_size = 100
fake = np.zeros(batch_size)
real = np.ones(batch_size)
for i in range(n_iter):
# Train Discriminator
noise = make_noise(batch_size)
generated_images = generator.predict(noise)
idx = np.random.randint(0, train_x.shape[0], batch_size)
real_images = train_x[idx]
D_loss_real = discriminator.train_on_batch(real_images, real)
D_loss_fake = discriminator.train_on_batch(generated_images, fake)
D_loss = D_loss_real + D_loss_fake
# Train Generator
noise = make_noise(batch_size)
G_loss = combined.train_on_batch(noise, real)
if i % 5000 == 0:
print('Discriminator Loss: ', D_loss)
print('Generator Loss: ', G_loss)
plot_generated_images(generator)
plot_generated_images(generator)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from scipy.fftpack import fft, fftshift
from scipy.signal import spectrogram
# Gen. & plot signal
Fs = 24
T = 1/Fs
N = 48
t = np.arange(0, N)*T
k = np.arange(0, N)
f = (Fs/N)*k
x1 = 4*np.sin(2*np.pi*2*t) + 0.5*np.random.randn(N)
x2 = 2*np.sin(2*np.pi*5*t) + 0.5*np.random.randn(N)
x = x1 + x2
xt = fft(x)/N
xtss = xt[0:int(N/2)+1] # 0:N/2
xtss[1:-1] = 2*xtss[1:-1]
fss = f[0:int(N/2)+1]
plt.figure(figsize=(10,10))
plt.subplot(3,1,1)
plt.plot(t, x1)
plt.ylim([-5,5])
plt.grid()
plt.title('Vibration 1')
plt.subplot(3,1,2)
plt.plot(t, x2)
plt.ylim([-5,5])
plt.grid()
plt.title('Vibration 2')
plt.subplot(3,1,3)
plt.plot(t, x)
plt.ylim([-5,5])
plt.grid()
plt.title('Vibration 1 + Vibration 2')
plt.show()
plt.figure(figsize = (10,3))
plt.plot(fss, np.abs(xtss))
plt.grid()
plt.xlim([np.min(fss), np.max(fss)])
plt.xlabel('f')
plt.ylabel('|X(f)|', fontsize = 15)
plt.ylim([0, 4.1])
plt.title('Single-sided FFT')
plt.show()
def data_gen(f1,f2,A1,A2):
Fs = 24
T = 1/Fs
N = 48
t = np.arange(0, N)*T
k = np.arange(0, N)
f = (Fs/N)*k
x1 = A1*np.sin(2*np.pi*f1*t)
x2 = A2*np.sin(2*np.pi*f2*t)
x = x1 + x2
return x
x = data_gen(2,5,2,4)
plt.figure(figsize = (10,3))
plt.plot(t, x)
plt.grid()
plt.show()
def batch_maker(n_batch):
signals = []
for _ in range(n_batch):
A1 = 0.4 + 0.1*np.random.random() # uniform [0.4,0.5]
A2 = 0.1 + 0.2*np.random.random() # uniform [0.1,0.3]
x = data_gen(2, 5, A1, A2)
signals.append(x)
return np.array(signals)
x = batch_maker(1)
plt.figure(figsize=(10,3))
plt.plot(t, x[0])
plt.grid()
plt.show()
generator = tf.keras.models.Sequential([
tf.keras.layers.Dense(units = 32, input_dim = 10, activation = 'relu'),
tf.keras.layers.Dense(units = 32, activation = 'relu'),
tf.keras.layers.Dense(units = 48, activation = 'relu'),
tf.keras.layers.Dense(units = 48, activation = 'tanh'),
])
discriminator = tf.keras.models.Sequential([
tf.keras.layers.Dense(units = 50, input_dim = 48, activation = 'relu'),
tf.keras.layers.Dense(units = 32, activation = 'relu'),
tf.keras.layers.Dense(units = 32, activation = 'relu'),
tf.keras.layers.Dense(units = 1, activation = 'sigmoid'),
])
discriminator.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 2e-5),
loss = 'binary_crossentropy')
combined_input = tf.keras.layers.Input(shape = (10,))
generated = generator(combined_input)
discriminator.trainable = False
combined_output = discriminator(generated)
combined = tf.keras.models.Model(inputs = combined_input, outputs = combined_output)
combined.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 2e-5),
loss = 'binary_crossentropy')
def make_noise(samples):
return np.random.normal(0, 1, [samples, 10])
def plot_generated_images(generator, samples = 2):
noise = make_noise(samples)
generated_images = generator.predict(noise)
plt.figure(figsize = (10,3))
for i in range(samples):
plt.subplot(1, samples, i+1)
plt.plot(generated_images[i])
plt.grid()
plt.tight_layout()
plt.show()
n_iter = 5000
batch_size = 100
fake = np.zeros(batch_size)
real = np.ones(batch_size)
for i in range(n_iter):
# Train Discriminator
noise = make_noise(batch_size)
generated_signals = generator.predict(noise)
real_signals = batch_maker(batch_size)
D_loss_real = discriminator.train_on_batch(real_signals, real)
D_loss_fake = discriminator.train_on_batch(generated_signals, fake)
D_loss = D_loss_real + D_loss_fake
# Train Generator
noise = make_noise(batch_size)
G_loss = combined.train_on_batch(noise, real)
if i % 1000 == 0:
print('Discriminator Loss: ', D_loss)
print('Generator Loss: ', G_loss)
plot_generated_images(generator)
Fs = 24
T = 1/Fs
N = 48
t = np.arange(0, N)*T
k = np.arange(0, N)
f = (Fs/N)*k
noise = make_noise(1)
generated_images = generator.predict(noise)
x = generated_images[0]
xt = fft(x)/N
xtss = xt[0:int(N/2)+1] # 0:N/2
xtss[1:-1] = 2*xtss[1:-1]
fss = f[0:int(N/2)+1]
plt.figure(figsize = (10,7))
plt.subplot(2,1,1)
plt.plot(t, x)
plt.ylim([-1, 1])
plt.grid()
plt.title('Generated Vibration', fontsize = 15)
plt.subplot(2,1,2)
plt.plot(fss, np.abs(xtss))
plt.grid()
plt.xlim([np.min(fss), np.max(fss)])
plt.xlabel('f')
plt.ylabel('|X(f)|', fontsize = 15)
plt.ylim([0, 1.1])
plt.title('Single-sided FFT')
plt.show()
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')