DCGAN Tensorflow code doesn't produce faces on CelebA dataset





.everyoneloves__top-leaderboard:empty,.everyoneloves__mid-leaderboard:empty,.everyoneloves__bot-mid-leaderboard:empty{ height:90px;width:728px;box-sizing:border-box;
}







-1















I have written the following code but does not produce faces on the celebA dataset. I think it should create some sort of face (even if very blurry) at the last iteration of each epoch. However, it just creates noisy squares with no visible face. I am quite new to GANs and I am not sure how to debug this Deep Convolutional GAN (DCGAN) in order to figure what's going wrong.



My code might be easier to be seen here:
https://pastebin.com/c4QUqxJy
Here is the code:



from __future__ import print_function
import random
import os
import glob
import scipy

import tensorflow as tf
import numpy as np
from PIL import Image
import skimage.io as io
import matplotlib.pyplot as plt


class Arguments(object):

data_path = 'results_celebA/preprocessed/'
save_path = 'results_celebA' #path to save preprocessed image folder
preproc_foldername = 'preprocessed' #folder name for preprocessed images
image_size = 64 #images are resized to image_size value
num_images = 202590 #the number of training images
batch_size = 64 #batch size
dim_z = 100 #the dimension of z variable (the generator input dimension)
n_g_filters = 64 #the number of the generator filters (gets multiplied between layers)
n_f_filters = 64 #the number of the discriminator filters (gets multiplied between layers)
n_epoch = 25 #the number of epochs
lr = 0.0002 #learning rate
beta1 = 0.5 #beta_1 parameter of Adam optimizer
beta2 = 0.99 #beta_2 parameter of Adam optimizer

args = Arguments()


#contains functions that load, preprocess and visualize images.


class Dataset(object):
def __init__(self, data_path, num_imgs, target_imgsize):
self.data_path = data_path
self.num_imgs = num_imgs
self.target_imgsize = target_imgsize

def normalize_np_image(self, image):
return (image / 255.0 - 0.5) / 0.5

def denormalize_np_image(self, image):
return (image * 0.5 + 0.5) * 255

def get_input(self, image_path):
image = np.array(Image.open(image_path)).astype(np.float32)
return self.normalize_np_image(image)

def get_imagelist(self, data_path, celebA=False):
if celebA == True:
imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
else:
imgs_path = os.path.join(data_path, '*.jpg')
all_namelist = glob.glob(imgs_path, recursive=True)
return all_namelist[:self.num_imgs]

def load_and_preprocess_image(self, image_path):
image = Image.open(image_path)
j = (image.size[0] - 100) // 2
i = (image.size[1] - 100) // 2
image = image.crop([j, i, j + 100, i + 100])
image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
image = np.array(image.convert('RGB')).astype(np.float32)
image = self.normalize_np_image(image)
return image

#reads data, preprocesses and saves to another folder with the given path.
def preprocess_and_save_images(self, dir_name, save_path=''):
preproc_folder_path = os.path.join(save_path, dir_name)
if not os.path.exists(preproc_folder_path):
os.makedirs(preproc_folder_path)
imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
print('Saving and preprocessing images ...')
for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
cur_image = self.load_and_preprocess_image(imgname)
cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
self.data_path= preproc_folder_path

def get_nextbatch(self, batch_size):
print("nextbatch batchsize is: ", batch_size)
assert (batch_size > 0),"Give a valid batch size"
cur_idx = 0
image_namelist = self.get_imagelist(self.data_path)
while cur_idx + batch_size <= self.num_imgs:
cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
cur_batch = np.array(cur_batch).astype(np.float32)
cur_idx += batch_size
yield cur_batch

def show_image(self, image, normalized=True):
if not type(image).__module__ == np.__name__:
image = image.numpy()
if normalized:
npimg = (image * 0.5) + 0.5
npimg.astype(np.uint8)
plt.imshow(npimg, interpolation='nearest')


#contains functions that load, preprocess and visualize images.

class Dataset(object):
def __init__(self, data_path, num_imgs, target_imgsize):
self.data_path = data_path
self.num_imgs = num_imgs
self.target_imgsize = target_imgsize

def normalize_np_image(self, image):
return (image / 255.0 - 0.5) / 0.5

def denormalize_np_image(self, image):
return (image * 0.5 + 0.5) * 255

def get_input(self, image_path):
image = np.array(Image.open(image_path)).astype(np.float32)
return self.normalize_np_image(image)

def get_imagelist(self, data_path, celebA=False):
if celebA == True:
imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
else:
imgs_path = os.path.join(data_path, '*.jpg')

all_namelist = glob.glob(imgs_path, recursive=True)
return all_namelist[:self.num_imgs]

def load_and_preprocess_image(self, image_path):
image = Image.open(image_path)
j = (image.size[0] - 100) // 2
i = (image.size[1] - 100) // 2
image = image.crop([j, i, j + 100, i + 100])
image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
image = np.array(image.convert('RGB')).astype(np.float32)
image = self.normalize_np_image(image)
return image

#reads data, preprocesses and saves to another folder with the given path.
def preprocess_and_save_images(self, dir_name, save_path=''):
preproc_folder_path = os.path.join(save_path, dir_name)
if not os.path.exists(preproc_folder_path):
os.makedirs(preproc_folder_path)
imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
print('Saving and preprocessing images ...')
for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
cur_image = self.load_and_preprocess_image(imgname)
cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
self.data_path= preproc_folder_path

def get_nextbatch(self, batch_size):
assert (batch_size > 0),"Give a valid batch size"
cur_idx = 0
image_namelist = self.get_imagelist(self.data_path)
while cur_idx + batch_size <= self.num_imgs:
cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
cur_batch = np.array(cur_batch).astype(np.float32)
cur_idx += batch_size
yield cur_batch

def show_image(self, image, normalized=True):
if not type(image).__module__ == np.__name__:
image = image.numpy()
if normalized:
npimg = (image * 0.5) + 0.5
npimg.astype(np.uint8)
plt.imshow(npimg, interpolation='nearest')



def generator(x, args, reuse=False):
with tf.device('/gpu:0'):
with tf.variable_scope("generator", reuse=reuse):
#Layer Block 1
with tf.variable_scope("layer1"):
deconv1 = tf.layers.conv2d_transpose(inputs=x,
filters= args.n_g_filters*8,
kernel_size=4,
strides=1,
padding='valid',
use_bias=False,
name='deconv')
batch_norm1=tf.layers.batch_normalization(deconv1,
name = 'batch_norm')
relu1 = tf.nn.relu(batch_norm1, name='relu')
#Layer Block 2
with tf.variable_scope("layer2"):
deconv2 = tf.layers.conv2d_transpose(inputs=relu1,
filters=args.n_g_filters*4,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='deconv')
batch_norm2 = tf.layers.batch_normalization(deconv2,
name = 'batch_norm')
relu2 = tf.nn.relu(batch_norm2, name='relu')
#Layer Block 3
with tf.variable_scope("layer3"):
deconv3 = tf.layers.conv2d_transpose(inputs=relu2,
filters=args.n_g_filters*2,
kernel_size=4,
strides=2,
padding='same',
use_bias = False,
name='deconv')
batch_norm3 = tf.layers.batch_normalization(deconv3,
name = 'batch_norm')
relu3 = tf.nn.relu(batch_norm3, name='relu')
#Layer Block 4
with tf.variable_scope("layer4"):
deconv4 = tf.layers.conv2d_transpose(inputs=relu3,
filters=args.n_g_filters,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='deconv')
batch_norm4 = tf.layers.batch_normalization(deconv4,
name = 'batch_norm')
relu4 = tf.nn.relu(batch_norm4, name='relu')
#Output Layer
with tf.variable_scope("last_layer"):
logit = tf.layers.conv2d_transpose(inputs=relu4,
filters=3,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='logit')
output = tf.nn.tanh(logit)
return output, logit



def discriminator(x, args, reuse=False):
with tf.device('/gpu:0'):
with tf.variable_scope("discriminator", reuse=reuse):
with tf.variable_scope("layer1"):
conv1 = tf.layers.conv2d(inputs=x,
filters=args.n_f_filters,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='conv')
relu1 = tf.nn.leaky_relu(conv1, alpha=0.2, name='relu')
with tf.variable_scope("layer2"):
conv2 = tf.layers.conv2d(inputs=relu1,
filters=args.n_f_filters*2,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='conv')
batch_norm2 = tf.layers.batch_normalization(conv2,name='batch_norm')
relu2 = tf.nn.leaky_relu(batch_norm2, alpha=0.2, name='relu')
with tf.variable_scope("layer3"):
conv3 = tf.layers.conv2d(inputs=relu2,
filters=args.n_f_filters*4,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='conv')
batch_norm3 = tf.layers.batch_normalization(conv3, name='batch_norm')
relu3 = tf.nn.leaky_relu(batch_norm3, name='relu')
with tf.variable_scope("layer4"):
conv4 = tf.layers.conv2d(inputs=relu3,
filters=args.n_f_filters*8,
kernel_size=4,
strides=2,
padding='same',
use_bias=False,
name='conv')
batch_norm4 = tf.layers.batch_normalization(conv4, name='batch_norm')
relu4 = tf.nn.leaky_relu(batch_norm4, alpha=0.2, name='relu')
with tf.variable_scope("last_layer"):
logit = tf.layers.conv2d(inputs=relu4,
filters=1,
kernel_size=4,
strides=1,
padding='valid',
use_bias=False,
name='conv')
output = tf.nn.sigmoid(logit)
return output, logit



def sample_z(dim_z, num_batch):
mu = 0
sigma = 1
s = np.random.normal(mu, sigma, num_batch*dim_z)
samples = s.reshape(num_batch, 1, 1, dim_z)
##dist = tf.distributions.Normal(0.0, 1.0)
##samples = dist.sample([num_batch, 1, 1, dim_z])
return samples
#64,1,1,100 6400
sample_z(100, 64)



def get_losses(d_real_logits, d_fake_logits):
#add new loss function here
###d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits, labels=tf.ones_like(d_real_logits)))
###d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits)))
###d_loss = d_loss_real + d_loss_fake
###g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.ones_like(d_fake_logits)))
###return d_loss, g_loss
d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits,labels=tf.ones_like(d_real_logits)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.zeros_like(d_fake_logits)))

g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.ones_like(d_fake_logits)))
return d_loss, g_loss



def get_optimizers(learning_rate, beta1, beta2):
d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
return d_optimizer, g_optimizer


def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
d_step = d_optimizer.minimize(d_loss)
g_step = g_optimizer.minimize(g_loss)
return d_step, g_step


LOGDIR = "logs_basic_dcgan"

def merge_images(image_batch, size):
h,w = image_batch.shape[1], image_batch.shape[2]
c = image_batch.shape[3]
img = np.zeros((int(h*size[0]), w*size[1], c))
for idx, im in enumerate(image_batch):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w,:] = im
return img
itr_fh = open('basic_gan_itr.txt', 'a+')

def train(args):
tf.reset_default_graph()
data_loader = Dataset(args.data_path, args.num_images, args.image_size)
#data_loader.preprocess_and_save_images('preprocessed', 'results_celebA') #preprocess the images once
X = tf.placeholder(tf.float32, shape=[args.batch_size, args.image_size , args.image_size, 3])
Z = tf.placeholder(tf.float32, shape=[args.batch_size, 1, 1, args.dim_z])

G_sample, _ = generator(Z, args)
D_real, D_real_logits = discriminator(X, args)
D_fake, D_fake_logits = discriminator(G_sample, args, reuse=True)
d_loss, g_loss = get_losses(D_real_logits, D_fake_logits)
d_optimizer, g_optimizer = get_optimizers(args.lr, args.beta1, args.beta2)
d_step, g_step = optimize(d_optimizer, g_optimizer, d_loss, g_loss)
###z_sum = tf.summary.histogram('z', Z)
###d_sum = tf.summary.histogram('d', D_real)
###G_sum = tf.summary.histogram('g', G_sample)
###d_loss_sum = tf.summary.scalar('d_loss', d_loss)
###g_loss_sum = tf.summary.scalar('g_loss', g_loss)
###d_sum = tf.summary.merge([z_sum, d_sum, d_loss_sum])
###g_sum = tf.summary.merge([z_sum, G_sum, g_loss_sum])
###saver = tf.train.Saver()
###merged_summary = tf.summary.merge_all()

###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
###merged_summary = tf.summary.merge_all()

with tf.Session() as sess:

sess.run(tf.global_variables_initializer())
for epoch in range(args.n_epoch):
for itr, real_batch in enumerate(data_loader.get_nextbatch(args.batch_size)):
print('itr is %d, and epoch is %d' %(itr, epoch))
itr_fh.write("epoch: " + str(epoch) + " itr: " + str(itr) + "n")

Z_sample = sample_z(args.dim_z, args.batch_size)

_, _ = sess.run([d_step, g_step], feed_dict={X:real_batch , Z:Z_sample})
sample = sess.run(G_sample, feed_dict={Z:Z_sample})
print("sample size is: ", sample.shape)
if itr==3164: #num_images/batch_size
im_merged = merge_images(sample[:16], [4,4])
plt.imsave('sample_gan_images/im_merged_epoch_%d.png' %(epoch), im_merged )
scipy.misc.imsave('sample_gan_images/im_epoch_%d_itr_%d.png' %(epoch,itr), sample[1])
##merged_summary = sess.run(merged_summary, feed_dict={X:real_batch , Z:Z_sample})
###writer = tf.summary.FileWriter(LOGDIR)
###writer.add_summary(merged_summary, itr)
###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
###merged_summary = tf.summary.merge_all()
###writer.add_graph(sess.graph)
###saver.save(sess, save_path='logs_basic_dcgan/gan.ckpt')


train(args)


Here is the images created at the end of first 5 epochs. I also have commented stuff related to tensorboard because it makes it very slow unfortunately.



end of Epoch 0:
enter image description here



end of Epoch 1:
enter image description here



end of Epoch 2:
enter image description here



end of Epoch 3:
enter image description here



end of Epoch 4:
enter image description here










share|improve this question





























    -1















    I have written the following code but does not produce faces on the celebA dataset. I think it should create some sort of face (even if very blurry) at the last iteration of each epoch. However, it just creates noisy squares with no visible face. I am quite new to GANs and I am not sure how to debug this Deep Convolutional GAN (DCGAN) in order to figure what's going wrong.



    My code might be easier to be seen here:
    https://pastebin.com/c4QUqxJy
    Here is the code:



    from __future__ import print_function
    import random
    import os
    import glob
    import scipy

    import tensorflow as tf
    import numpy as np
    from PIL import Image
    import skimage.io as io
    import matplotlib.pyplot as plt


    class Arguments(object):

    data_path = 'results_celebA/preprocessed/'
    save_path = 'results_celebA' #path to save preprocessed image folder
    preproc_foldername = 'preprocessed' #folder name for preprocessed images
    image_size = 64 #images are resized to image_size value
    num_images = 202590 #the number of training images
    batch_size = 64 #batch size
    dim_z = 100 #the dimension of z variable (the generator input dimension)
    n_g_filters = 64 #the number of the generator filters (gets multiplied between layers)
    n_f_filters = 64 #the number of the discriminator filters (gets multiplied between layers)
    n_epoch = 25 #the number of epochs
    lr = 0.0002 #learning rate
    beta1 = 0.5 #beta_1 parameter of Adam optimizer
    beta2 = 0.99 #beta_2 parameter of Adam optimizer

    args = Arguments()


    #contains functions that load, preprocess and visualize images.


    class Dataset(object):
    def __init__(self, data_path, num_imgs, target_imgsize):
    self.data_path = data_path
    self.num_imgs = num_imgs
    self.target_imgsize = target_imgsize

    def normalize_np_image(self, image):
    return (image / 255.0 - 0.5) / 0.5

    def denormalize_np_image(self, image):
    return (image * 0.5 + 0.5) * 255

    def get_input(self, image_path):
    image = np.array(Image.open(image_path)).astype(np.float32)
    return self.normalize_np_image(image)

    def get_imagelist(self, data_path, celebA=False):
    if celebA == True:
    imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
    else:
    imgs_path = os.path.join(data_path, '*.jpg')
    all_namelist = glob.glob(imgs_path, recursive=True)
    return all_namelist[:self.num_imgs]

    def load_and_preprocess_image(self, image_path):
    image = Image.open(image_path)
    j = (image.size[0] - 100) // 2
    i = (image.size[1] - 100) // 2
    image = image.crop([j, i, j + 100, i + 100])
    image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
    image = np.array(image.convert('RGB')).astype(np.float32)
    image = self.normalize_np_image(image)
    return image

    #reads data, preprocesses and saves to another folder with the given path.
    def preprocess_and_save_images(self, dir_name, save_path=''):
    preproc_folder_path = os.path.join(save_path, dir_name)
    if not os.path.exists(preproc_folder_path):
    os.makedirs(preproc_folder_path)
    imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
    print('Saving and preprocessing images ...')
    for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
    cur_image = self.load_and_preprocess_image(imgname)
    cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
    cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
    self.data_path= preproc_folder_path

    def get_nextbatch(self, batch_size):
    print("nextbatch batchsize is: ", batch_size)
    assert (batch_size > 0),"Give a valid batch size"
    cur_idx = 0
    image_namelist = self.get_imagelist(self.data_path)
    while cur_idx + batch_size <= self.num_imgs:
    cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
    cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
    cur_batch = np.array(cur_batch).astype(np.float32)
    cur_idx += batch_size
    yield cur_batch

    def show_image(self, image, normalized=True):
    if not type(image).__module__ == np.__name__:
    image = image.numpy()
    if normalized:
    npimg = (image * 0.5) + 0.5
    npimg.astype(np.uint8)
    plt.imshow(npimg, interpolation='nearest')


    #contains functions that load, preprocess and visualize images.

    class Dataset(object):
    def __init__(self, data_path, num_imgs, target_imgsize):
    self.data_path = data_path
    self.num_imgs = num_imgs
    self.target_imgsize = target_imgsize

    def normalize_np_image(self, image):
    return (image / 255.0 - 0.5) / 0.5

    def denormalize_np_image(self, image):
    return (image * 0.5 + 0.5) * 255

    def get_input(self, image_path):
    image = np.array(Image.open(image_path)).astype(np.float32)
    return self.normalize_np_image(image)

    def get_imagelist(self, data_path, celebA=False):
    if celebA == True:
    imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
    else:
    imgs_path = os.path.join(data_path, '*.jpg')

    all_namelist = glob.glob(imgs_path, recursive=True)
    return all_namelist[:self.num_imgs]

    def load_and_preprocess_image(self, image_path):
    image = Image.open(image_path)
    j = (image.size[0] - 100) // 2
    i = (image.size[1] - 100) // 2
    image = image.crop([j, i, j + 100, i + 100])
    image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
    image = np.array(image.convert('RGB')).astype(np.float32)
    image = self.normalize_np_image(image)
    return image

    #reads data, preprocesses and saves to another folder with the given path.
    def preprocess_and_save_images(self, dir_name, save_path=''):
    preproc_folder_path = os.path.join(save_path, dir_name)
    if not os.path.exists(preproc_folder_path):
    os.makedirs(preproc_folder_path)
    imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
    print('Saving and preprocessing images ...')
    for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
    cur_image = self.load_and_preprocess_image(imgname)
    cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
    cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
    self.data_path= preproc_folder_path

    def get_nextbatch(self, batch_size):
    assert (batch_size > 0),"Give a valid batch size"
    cur_idx = 0
    image_namelist = self.get_imagelist(self.data_path)
    while cur_idx + batch_size <= self.num_imgs:
    cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
    cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
    cur_batch = np.array(cur_batch).astype(np.float32)
    cur_idx += batch_size
    yield cur_batch

    def show_image(self, image, normalized=True):
    if not type(image).__module__ == np.__name__:
    image = image.numpy()
    if normalized:
    npimg = (image * 0.5) + 0.5
    npimg.astype(np.uint8)
    plt.imshow(npimg, interpolation='nearest')



    def generator(x, args, reuse=False):
    with tf.device('/gpu:0'):
    with tf.variable_scope("generator", reuse=reuse):
    #Layer Block 1
    with tf.variable_scope("layer1"):
    deconv1 = tf.layers.conv2d_transpose(inputs=x,
    filters= args.n_g_filters*8,
    kernel_size=4,
    strides=1,
    padding='valid',
    use_bias=False,
    name='deconv')
    batch_norm1=tf.layers.batch_normalization(deconv1,
    name = 'batch_norm')
    relu1 = tf.nn.relu(batch_norm1, name='relu')
    #Layer Block 2
    with tf.variable_scope("layer2"):
    deconv2 = tf.layers.conv2d_transpose(inputs=relu1,
    filters=args.n_g_filters*4,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='deconv')
    batch_norm2 = tf.layers.batch_normalization(deconv2,
    name = 'batch_norm')
    relu2 = tf.nn.relu(batch_norm2, name='relu')
    #Layer Block 3
    with tf.variable_scope("layer3"):
    deconv3 = tf.layers.conv2d_transpose(inputs=relu2,
    filters=args.n_g_filters*2,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias = False,
    name='deconv')
    batch_norm3 = tf.layers.batch_normalization(deconv3,
    name = 'batch_norm')
    relu3 = tf.nn.relu(batch_norm3, name='relu')
    #Layer Block 4
    with tf.variable_scope("layer4"):
    deconv4 = tf.layers.conv2d_transpose(inputs=relu3,
    filters=args.n_g_filters,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='deconv')
    batch_norm4 = tf.layers.batch_normalization(deconv4,
    name = 'batch_norm')
    relu4 = tf.nn.relu(batch_norm4, name='relu')
    #Output Layer
    with tf.variable_scope("last_layer"):
    logit = tf.layers.conv2d_transpose(inputs=relu4,
    filters=3,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='logit')
    output = tf.nn.tanh(logit)
    return output, logit



    def discriminator(x, args, reuse=False):
    with tf.device('/gpu:0'):
    with tf.variable_scope("discriminator", reuse=reuse):
    with tf.variable_scope("layer1"):
    conv1 = tf.layers.conv2d(inputs=x,
    filters=args.n_f_filters,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='conv')
    relu1 = tf.nn.leaky_relu(conv1, alpha=0.2, name='relu')
    with tf.variable_scope("layer2"):
    conv2 = tf.layers.conv2d(inputs=relu1,
    filters=args.n_f_filters*2,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='conv')
    batch_norm2 = tf.layers.batch_normalization(conv2,name='batch_norm')
    relu2 = tf.nn.leaky_relu(batch_norm2, alpha=0.2, name='relu')
    with tf.variable_scope("layer3"):
    conv3 = tf.layers.conv2d(inputs=relu2,
    filters=args.n_f_filters*4,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='conv')
    batch_norm3 = tf.layers.batch_normalization(conv3, name='batch_norm')
    relu3 = tf.nn.leaky_relu(batch_norm3, name='relu')
    with tf.variable_scope("layer4"):
    conv4 = tf.layers.conv2d(inputs=relu3,
    filters=args.n_f_filters*8,
    kernel_size=4,
    strides=2,
    padding='same',
    use_bias=False,
    name='conv')
    batch_norm4 = tf.layers.batch_normalization(conv4, name='batch_norm')
    relu4 = tf.nn.leaky_relu(batch_norm4, alpha=0.2, name='relu')
    with tf.variable_scope("last_layer"):
    logit = tf.layers.conv2d(inputs=relu4,
    filters=1,
    kernel_size=4,
    strides=1,
    padding='valid',
    use_bias=False,
    name='conv')
    output = tf.nn.sigmoid(logit)
    return output, logit



    def sample_z(dim_z, num_batch):
    mu = 0
    sigma = 1
    s = np.random.normal(mu, sigma, num_batch*dim_z)
    samples = s.reshape(num_batch, 1, 1, dim_z)
    ##dist = tf.distributions.Normal(0.0, 1.0)
    ##samples = dist.sample([num_batch, 1, 1, dim_z])
    return samples
    #64,1,1,100 6400
    sample_z(100, 64)



    def get_losses(d_real_logits, d_fake_logits):
    #add new loss function here
    ###d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits, labels=tf.ones_like(d_real_logits)))
    ###d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits)))
    ###d_loss = d_loss_real + d_loss_fake
    ###g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.ones_like(d_fake_logits)))
    ###return d_loss, g_loss
    d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits,labels=tf.ones_like(d_real_logits)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.zeros_like(d_fake_logits)))

    g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.ones_like(d_fake_logits)))
    return d_loss, g_loss



    def get_optimizers(learning_rate, beta1, beta2):
    d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
    g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
    return d_optimizer, g_optimizer


    def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
    d_step = d_optimizer.minimize(d_loss)
    g_step = g_optimizer.minimize(g_loss)
    return d_step, g_step


    LOGDIR = "logs_basic_dcgan"

    def merge_images(image_batch, size):
    h,w = image_batch.shape[1], image_batch.shape[2]
    c = image_batch.shape[3]
    img = np.zeros((int(h*size[0]), w*size[1], c))
    for idx, im in enumerate(image_batch):
    i = idx % size[1]
    j = idx // size[1]
    img[j*h:j*h+h, i*w:i*w+w,:] = im
    return img
    itr_fh = open('basic_gan_itr.txt', 'a+')

    def train(args):
    tf.reset_default_graph()
    data_loader = Dataset(args.data_path, args.num_images, args.image_size)
    #data_loader.preprocess_and_save_images('preprocessed', 'results_celebA') #preprocess the images once
    X = tf.placeholder(tf.float32, shape=[args.batch_size, args.image_size , args.image_size, 3])
    Z = tf.placeholder(tf.float32, shape=[args.batch_size, 1, 1, args.dim_z])

    G_sample, _ = generator(Z, args)
    D_real, D_real_logits = discriminator(X, args)
    D_fake, D_fake_logits = discriminator(G_sample, args, reuse=True)
    d_loss, g_loss = get_losses(D_real_logits, D_fake_logits)
    d_optimizer, g_optimizer = get_optimizers(args.lr, args.beta1, args.beta2)
    d_step, g_step = optimize(d_optimizer, g_optimizer, d_loss, g_loss)
    ###z_sum = tf.summary.histogram('z', Z)
    ###d_sum = tf.summary.histogram('d', D_real)
    ###G_sum = tf.summary.histogram('g', G_sample)
    ###d_loss_sum = tf.summary.scalar('d_loss', d_loss)
    ###g_loss_sum = tf.summary.scalar('g_loss', g_loss)
    ###d_sum = tf.summary.merge([z_sum, d_sum, d_loss_sum])
    ###g_sum = tf.summary.merge([z_sum, G_sum, g_loss_sum])
    ###saver = tf.train.Saver()
    ###merged_summary = tf.summary.merge_all()

    ###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
    ###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
    ###merged_summary = tf.summary.merge_all()

    with tf.Session() as sess:

    sess.run(tf.global_variables_initializer())
    for epoch in range(args.n_epoch):
    for itr, real_batch in enumerate(data_loader.get_nextbatch(args.batch_size)):
    print('itr is %d, and epoch is %d' %(itr, epoch))
    itr_fh.write("epoch: " + str(epoch) + " itr: " + str(itr) + "n")

    Z_sample = sample_z(args.dim_z, args.batch_size)

    _, _ = sess.run([d_step, g_step], feed_dict={X:real_batch , Z:Z_sample})
    sample = sess.run(G_sample, feed_dict={Z:Z_sample})
    print("sample size is: ", sample.shape)
    if itr==3164: #num_images/batch_size
    im_merged = merge_images(sample[:16], [4,4])
    plt.imsave('sample_gan_images/im_merged_epoch_%d.png' %(epoch), im_merged )
    scipy.misc.imsave('sample_gan_images/im_epoch_%d_itr_%d.png' %(epoch,itr), sample[1])
    ##merged_summary = sess.run(merged_summary, feed_dict={X:real_batch , Z:Z_sample})
    ###writer = tf.summary.FileWriter(LOGDIR)
    ###writer.add_summary(merged_summary, itr)
    ###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
    ###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
    ###merged_summary = tf.summary.merge_all()
    ###writer.add_graph(sess.graph)
    ###saver.save(sess, save_path='logs_basic_dcgan/gan.ckpt')


    train(args)


    Here is the images created at the end of first 5 epochs. I also have commented stuff related to tensorboard because it makes it very slow unfortunately.



    end of Epoch 0:
    enter image description here



    end of Epoch 1:
    enter image description here



    end of Epoch 2:
    enter image description here



    end of Epoch 3:
    enter image description here



    end of Epoch 4:
    enter image description here










    share|improve this question

























      -1












      -1








      -1








      I have written the following code but does not produce faces on the celebA dataset. I think it should create some sort of face (even if very blurry) at the last iteration of each epoch. However, it just creates noisy squares with no visible face. I am quite new to GANs and I am not sure how to debug this Deep Convolutional GAN (DCGAN) in order to figure what's going wrong.



      My code might be easier to be seen here:
      https://pastebin.com/c4QUqxJy
      Here is the code:



      from __future__ import print_function
      import random
      import os
      import glob
      import scipy

      import tensorflow as tf
      import numpy as np
      from PIL import Image
      import skimage.io as io
      import matplotlib.pyplot as plt


      class Arguments(object):

      data_path = 'results_celebA/preprocessed/'
      save_path = 'results_celebA' #path to save preprocessed image folder
      preproc_foldername = 'preprocessed' #folder name for preprocessed images
      image_size = 64 #images are resized to image_size value
      num_images = 202590 #the number of training images
      batch_size = 64 #batch size
      dim_z = 100 #the dimension of z variable (the generator input dimension)
      n_g_filters = 64 #the number of the generator filters (gets multiplied between layers)
      n_f_filters = 64 #the number of the discriminator filters (gets multiplied between layers)
      n_epoch = 25 #the number of epochs
      lr = 0.0002 #learning rate
      beta1 = 0.5 #beta_1 parameter of Adam optimizer
      beta2 = 0.99 #beta_2 parameter of Adam optimizer

      args = Arguments()


      #contains functions that load, preprocess and visualize images.


      class Dataset(object):
      def __init__(self, data_path, num_imgs, target_imgsize):
      self.data_path = data_path
      self.num_imgs = num_imgs
      self.target_imgsize = target_imgsize

      def normalize_np_image(self, image):
      return (image / 255.0 - 0.5) / 0.5

      def denormalize_np_image(self, image):
      return (image * 0.5 + 0.5) * 255

      def get_input(self, image_path):
      image = np.array(Image.open(image_path)).astype(np.float32)
      return self.normalize_np_image(image)

      def get_imagelist(self, data_path, celebA=False):
      if celebA == True:
      imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
      else:
      imgs_path = os.path.join(data_path, '*.jpg')
      all_namelist = glob.glob(imgs_path, recursive=True)
      return all_namelist[:self.num_imgs]

      def load_and_preprocess_image(self, image_path):
      image = Image.open(image_path)
      j = (image.size[0] - 100) // 2
      i = (image.size[1] - 100) // 2
      image = image.crop([j, i, j + 100, i + 100])
      image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
      image = np.array(image.convert('RGB')).astype(np.float32)
      image = self.normalize_np_image(image)
      return image

      #reads data, preprocesses and saves to another folder with the given path.
      def preprocess_and_save_images(self, dir_name, save_path=''):
      preproc_folder_path = os.path.join(save_path, dir_name)
      if not os.path.exists(preproc_folder_path):
      os.makedirs(preproc_folder_path)
      imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
      print('Saving and preprocessing images ...')
      for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
      cur_image = self.load_and_preprocess_image(imgname)
      cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
      cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
      self.data_path= preproc_folder_path

      def get_nextbatch(self, batch_size):
      print("nextbatch batchsize is: ", batch_size)
      assert (batch_size > 0),"Give a valid batch size"
      cur_idx = 0
      image_namelist = self.get_imagelist(self.data_path)
      while cur_idx + batch_size <= self.num_imgs:
      cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
      cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
      cur_batch = np.array(cur_batch).astype(np.float32)
      cur_idx += batch_size
      yield cur_batch

      def show_image(self, image, normalized=True):
      if not type(image).__module__ == np.__name__:
      image = image.numpy()
      if normalized:
      npimg = (image * 0.5) + 0.5
      npimg.astype(np.uint8)
      plt.imshow(npimg, interpolation='nearest')


      #contains functions that load, preprocess and visualize images.

      class Dataset(object):
      def __init__(self, data_path, num_imgs, target_imgsize):
      self.data_path = data_path
      self.num_imgs = num_imgs
      self.target_imgsize = target_imgsize

      def normalize_np_image(self, image):
      return (image / 255.0 - 0.5) / 0.5

      def denormalize_np_image(self, image):
      return (image * 0.5 + 0.5) * 255

      def get_input(self, image_path):
      image = np.array(Image.open(image_path)).astype(np.float32)
      return self.normalize_np_image(image)

      def get_imagelist(self, data_path, celebA=False):
      if celebA == True:
      imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
      else:
      imgs_path = os.path.join(data_path, '*.jpg')

      all_namelist = glob.glob(imgs_path, recursive=True)
      return all_namelist[:self.num_imgs]

      def load_and_preprocess_image(self, image_path):
      image = Image.open(image_path)
      j = (image.size[0] - 100) // 2
      i = (image.size[1] - 100) // 2
      image = image.crop([j, i, j + 100, i + 100])
      image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
      image = np.array(image.convert('RGB')).astype(np.float32)
      image = self.normalize_np_image(image)
      return image

      #reads data, preprocesses and saves to another folder with the given path.
      def preprocess_and_save_images(self, dir_name, save_path=''):
      preproc_folder_path = os.path.join(save_path, dir_name)
      if not os.path.exists(preproc_folder_path):
      os.makedirs(preproc_folder_path)
      imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
      print('Saving and preprocessing images ...')
      for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
      cur_image = self.load_and_preprocess_image(imgname)
      cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
      cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
      self.data_path= preproc_folder_path

      def get_nextbatch(self, batch_size):
      assert (batch_size > 0),"Give a valid batch size"
      cur_idx = 0
      image_namelist = self.get_imagelist(self.data_path)
      while cur_idx + batch_size <= self.num_imgs:
      cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
      cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
      cur_batch = np.array(cur_batch).astype(np.float32)
      cur_idx += batch_size
      yield cur_batch

      def show_image(self, image, normalized=True):
      if not type(image).__module__ == np.__name__:
      image = image.numpy()
      if normalized:
      npimg = (image * 0.5) + 0.5
      npimg.astype(np.uint8)
      plt.imshow(npimg, interpolation='nearest')



      def generator(x, args, reuse=False):
      with tf.device('/gpu:0'):
      with tf.variable_scope("generator", reuse=reuse):
      #Layer Block 1
      with tf.variable_scope("layer1"):
      deconv1 = tf.layers.conv2d_transpose(inputs=x,
      filters= args.n_g_filters*8,
      kernel_size=4,
      strides=1,
      padding='valid',
      use_bias=False,
      name='deconv')
      batch_norm1=tf.layers.batch_normalization(deconv1,
      name = 'batch_norm')
      relu1 = tf.nn.relu(batch_norm1, name='relu')
      #Layer Block 2
      with tf.variable_scope("layer2"):
      deconv2 = tf.layers.conv2d_transpose(inputs=relu1,
      filters=args.n_g_filters*4,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='deconv')
      batch_norm2 = tf.layers.batch_normalization(deconv2,
      name = 'batch_norm')
      relu2 = tf.nn.relu(batch_norm2, name='relu')
      #Layer Block 3
      with tf.variable_scope("layer3"):
      deconv3 = tf.layers.conv2d_transpose(inputs=relu2,
      filters=args.n_g_filters*2,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias = False,
      name='deconv')
      batch_norm3 = tf.layers.batch_normalization(deconv3,
      name = 'batch_norm')
      relu3 = tf.nn.relu(batch_norm3, name='relu')
      #Layer Block 4
      with tf.variable_scope("layer4"):
      deconv4 = tf.layers.conv2d_transpose(inputs=relu3,
      filters=args.n_g_filters,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='deconv')
      batch_norm4 = tf.layers.batch_normalization(deconv4,
      name = 'batch_norm')
      relu4 = tf.nn.relu(batch_norm4, name='relu')
      #Output Layer
      with tf.variable_scope("last_layer"):
      logit = tf.layers.conv2d_transpose(inputs=relu4,
      filters=3,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='logit')
      output = tf.nn.tanh(logit)
      return output, logit



      def discriminator(x, args, reuse=False):
      with tf.device('/gpu:0'):
      with tf.variable_scope("discriminator", reuse=reuse):
      with tf.variable_scope("layer1"):
      conv1 = tf.layers.conv2d(inputs=x,
      filters=args.n_f_filters,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      relu1 = tf.nn.leaky_relu(conv1, alpha=0.2, name='relu')
      with tf.variable_scope("layer2"):
      conv2 = tf.layers.conv2d(inputs=relu1,
      filters=args.n_f_filters*2,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      batch_norm2 = tf.layers.batch_normalization(conv2,name='batch_norm')
      relu2 = tf.nn.leaky_relu(batch_norm2, alpha=0.2, name='relu')
      with tf.variable_scope("layer3"):
      conv3 = tf.layers.conv2d(inputs=relu2,
      filters=args.n_f_filters*4,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      batch_norm3 = tf.layers.batch_normalization(conv3, name='batch_norm')
      relu3 = tf.nn.leaky_relu(batch_norm3, name='relu')
      with tf.variable_scope("layer4"):
      conv4 = tf.layers.conv2d(inputs=relu3,
      filters=args.n_f_filters*8,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      batch_norm4 = tf.layers.batch_normalization(conv4, name='batch_norm')
      relu4 = tf.nn.leaky_relu(batch_norm4, alpha=0.2, name='relu')
      with tf.variable_scope("last_layer"):
      logit = tf.layers.conv2d(inputs=relu4,
      filters=1,
      kernel_size=4,
      strides=1,
      padding='valid',
      use_bias=False,
      name='conv')
      output = tf.nn.sigmoid(logit)
      return output, logit



      def sample_z(dim_z, num_batch):
      mu = 0
      sigma = 1
      s = np.random.normal(mu, sigma, num_batch*dim_z)
      samples = s.reshape(num_batch, 1, 1, dim_z)
      ##dist = tf.distributions.Normal(0.0, 1.0)
      ##samples = dist.sample([num_batch, 1, 1, dim_z])
      return samples
      #64,1,1,100 6400
      sample_z(100, 64)



      def get_losses(d_real_logits, d_fake_logits):
      #add new loss function here
      ###d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits, labels=tf.ones_like(d_real_logits)))
      ###d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits)))
      ###d_loss = d_loss_real + d_loss_fake
      ###g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.ones_like(d_fake_logits)))
      ###return d_loss, g_loss
      d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits,labels=tf.ones_like(d_real_logits)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.zeros_like(d_fake_logits)))

      g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.ones_like(d_fake_logits)))
      return d_loss, g_loss



      def get_optimizers(learning_rate, beta1, beta2):
      d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
      g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
      return d_optimizer, g_optimizer


      def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
      d_step = d_optimizer.minimize(d_loss)
      g_step = g_optimizer.minimize(g_loss)
      return d_step, g_step


      LOGDIR = "logs_basic_dcgan"

      def merge_images(image_batch, size):
      h,w = image_batch.shape[1], image_batch.shape[2]
      c = image_batch.shape[3]
      img = np.zeros((int(h*size[0]), w*size[1], c))
      for idx, im in enumerate(image_batch):
      i = idx % size[1]
      j = idx // size[1]
      img[j*h:j*h+h, i*w:i*w+w,:] = im
      return img
      itr_fh = open('basic_gan_itr.txt', 'a+')

      def train(args):
      tf.reset_default_graph()
      data_loader = Dataset(args.data_path, args.num_images, args.image_size)
      #data_loader.preprocess_and_save_images('preprocessed', 'results_celebA') #preprocess the images once
      X = tf.placeholder(tf.float32, shape=[args.batch_size, args.image_size , args.image_size, 3])
      Z = tf.placeholder(tf.float32, shape=[args.batch_size, 1, 1, args.dim_z])

      G_sample, _ = generator(Z, args)
      D_real, D_real_logits = discriminator(X, args)
      D_fake, D_fake_logits = discriminator(G_sample, args, reuse=True)
      d_loss, g_loss = get_losses(D_real_logits, D_fake_logits)
      d_optimizer, g_optimizer = get_optimizers(args.lr, args.beta1, args.beta2)
      d_step, g_step = optimize(d_optimizer, g_optimizer, d_loss, g_loss)
      ###z_sum = tf.summary.histogram('z', Z)
      ###d_sum = tf.summary.histogram('d', D_real)
      ###G_sum = tf.summary.histogram('g', G_sample)
      ###d_loss_sum = tf.summary.scalar('d_loss', d_loss)
      ###g_loss_sum = tf.summary.scalar('g_loss', g_loss)
      ###d_sum = tf.summary.merge([z_sum, d_sum, d_loss_sum])
      ###g_sum = tf.summary.merge([z_sum, G_sum, g_loss_sum])
      ###saver = tf.train.Saver()
      ###merged_summary = tf.summary.merge_all()

      ###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
      ###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
      ###merged_summary = tf.summary.merge_all()

      with tf.Session() as sess:

      sess.run(tf.global_variables_initializer())
      for epoch in range(args.n_epoch):
      for itr, real_batch in enumerate(data_loader.get_nextbatch(args.batch_size)):
      print('itr is %d, and epoch is %d' %(itr, epoch))
      itr_fh.write("epoch: " + str(epoch) + " itr: " + str(itr) + "n")

      Z_sample = sample_z(args.dim_z, args.batch_size)

      _, _ = sess.run([d_step, g_step], feed_dict={X:real_batch , Z:Z_sample})
      sample = sess.run(G_sample, feed_dict={Z:Z_sample})
      print("sample size is: ", sample.shape)
      if itr==3164: #num_images/batch_size
      im_merged = merge_images(sample[:16], [4,4])
      plt.imsave('sample_gan_images/im_merged_epoch_%d.png' %(epoch), im_merged )
      scipy.misc.imsave('sample_gan_images/im_epoch_%d_itr_%d.png' %(epoch,itr), sample[1])
      ##merged_summary = sess.run(merged_summary, feed_dict={X:real_batch , Z:Z_sample})
      ###writer = tf.summary.FileWriter(LOGDIR)
      ###writer.add_summary(merged_summary, itr)
      ###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
      ###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
      ###merged_summary = tf.summary.merge_all()
      ###writer.add_graph(sess.graph)
      ###saver.save(sess, save_path='logs_basic_dcgan/gan.ckpt')


      train(args)


      Here is the images created at the end of first 5 epochs. I also have commented stuff related to tensorboard because it makes it very slow unfortunately.



      end of Epoch 0:
      enter image description here



      end of Epoch 1:
      enter image description here



      end of Epoch 2:
      enter image description here



      end of Epoch 3:
      enter image description here



      end of Epoch 4:
      enter image description here










      share|improve this question














      I have written the following code but does not produce faces on the celebA dataset. I think it should create some sort of face (even if very blurry) at the last iteration of each epoch. However, it just creates noisy squares with no visible face. I am quite new to GANs and I am not sure how to debug this Deep Convolutional GAN (DCGAN) in order to figure what's going wrong.



      My code might be easier to be seen here:
      https://pastebin.com/c4QUqxJy
      Here is the code:



      from __future__ import print_function
      import random
      import os
      import glob
      import scipy

      import tensorflow as tf
      import numpy as np
      from PIL import Image
      import skimage.io as io
      import matplotlib.pyplot as plt


      class Arguments(object):

      data_path = 'results_celebA/preprocessed/'
      save_path = 'results_celebA' #path to save preprocessed image folder
      preproc_foldername = 'preprocessed' #folder name for preprocessed images
      image_size = 64 #images are resized to image_size value
      num_images = 202590 #the number of training images
      batch_size = 64 #batch size
      dim_z = 100 #the dimension of z variable (the generator input dimension)
      n_g_filters = 64 #the number of the generator filters (gets multiplied between layers)
      n_f_filters = 64 #the number of the discriminator filters (gets multiplied between layers)
      n_epoch = 25 #the number of epochs
      lr = 0.0002 #learning rate
      beta1 = 0.5 #beta_1 parameter of Adam optimizer
      beta2 = 0.99 #beta_2 parameter of Adam optimizer

      args = Arguments()


      #contains functions that load, preprocess and visualize images.


      class Dataset(object):
      def __init__(self, data_path, num_imgs, target_imgsize):
      self.data_path = data_path
      self.num_imgs = num_imgs
      self.target_imgsize = target_imgsize

      def normalize_np_image(self, image):
      return (image / 255.0 - 0.5) / 0.5

      def denormalize_np_image(self, image):
      return (image * 0.5 + 0.5) * 255

      def get_input(self, image_path):
      image = np.array(Image.open(image_path)).astype(np.float32)
      return self.normalize_np_image(image)

      def get_imagelist(self, data_path, celebA=False):
      if celebA == True:
      imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
      else:
      imgs_path = os.path.join(data_path, '*.jpg')
      all_namelist = glob.glob(imgs_path, recursive=True)
      return all_namelist[:self.num_imgs]

      def load_and_preprocess_image(self, image_path):
      image = Image.open(image_path)
      j = (image.size[0] - 100) // 2
      i = (image.size[1] - 100) // 2
      image = image.crop([j, i, j + 100, i + 100])
      image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
      image = np.array(image.convert('RGB')).astype(np.float32)
      image = self.normalize_np_image(image)
      return image

      #reads data, preprocesses and saves to another folder with the given path.
      def preprocess_and_save_images(self, dir_name, save_path=''):
      preproc_folder_path = os.path.join(save_path, dir_name)
      if not os.path.exists(preproc_folder_path):
      os.makedirs(preproc_folder_path)
      imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
      print('Saving and preprocessing images ...')
      for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
      cur_image = self.load_and_preprocess_image(imgname)
      cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
      cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
      self.data_path= preproc_folder_path

      def get_nextbatch(self, batch_size):
      print("nextbatch batchsize is: ", batch_size)
      assert (batch_size > 0),"Give a valid batch size"
      cur_idx = 0
      image_namelist = self.get_imagelist(self.data_path)
      while cur_idx + batch_size <= self.num_imgs:
      cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
      cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
      cur_batch = np.array(cur_batch).astype(np.float32)
      cur_idx += batch_size
      yield cur_batch

      def show_image(self, image, normalized=True):
      if not type(image).__module__ == np.__name__:
      image = image.numpy()
      if normalized:
      npimg = (image * 0.5) + 0.5
      npimg.astype(np.uint8)
      plt.imshow(npimg, interpolation='nearest')


      #contains functions that load, preprocess and visualize images.

      class Dataset(object):
      def __init__(self, data_path, num_imgs, target_imgsize):
      self.data_path = data_path
      self.num_imgs = num_imgs
      self.target_imgsize = target_imgsize

      def normalize_np_image(self, image):
      return (image / 255.0 - 0.5) / 0.5

      def denormalize_np_image(self, image):
      return (image * 0.5 + 0.5) * 255

      def get_input(self, image_path):
      image = np.array(Image.open(image_path)).astype(np.float32)
      return self.normalize_np_image(image)

      def get_imagelist(self, data_path, celebA=False):
      if celebA == True:
      imgs_path = os.path.join(data_path, 'img_align_celeba/*.jpg')
      else:
      imgs_path = os.path.join(data_path, '*.jpg')

      all_namelist = glob.glob(imgs_path, recursive=True)
      return all_namelist[:self.num_imgs]

      def load_and_preprocess_image(self, image_path):
      image = Image.open(image_path)
      j = (image.size[0] - 100) // 2
      i = (image.size[1] - 100) // 2
      image = image.crop([j, i, j + 100, i + 100])
      image = image.resize([self.target_imgsize, self.target_imgsize], Image.BILINEAR)
      image = np.array(image.convert('RGB')).astype(np.float32)
      image = self.normalize_np_image(image)
      return image

      #reads data, preprocesses and saves to another folder with the given path.
      def preprocess_and_save_images(self, dir_name, save_path=''):
      preproc_folder_path = os.path.join(save_path, dir_name)
      if not os.path.exists(preproc_folder_path):
      os.makedirs(preproc_folder_path)
      imgs_path = os.path.join(self.data_path, 'img_align_celeba/*.jpg')
      print('Saving and preprocessing images ...')
      for num, imgname in enumerate(glob.iglob(imgs_path, recursive=True)):
      cur_image = self.load_and_preprocess_image(imgname)
      cur_image = Image.fromarray(np.uint8(self.denormalize_np_image(cur_image)))
      cur_image.save(preproc_folder_path + '/preprocessed_image_%d.jpg' %(num))
      self.data_path= preproc_folder_path

      def get_nextbatch(self, batch_size):
      assert (batch_size > 0),"Give a valid batch size"
      cur_idx = 0
      image_namelist = self.get_imagelist(self.data_path)
      while cur_idx + batch_size <= self.num_imgs:
      cur_namelist = image_namelist[cur_idx:cur_idx + batch_size]
      cur_batch = [self.get_input(image_path) for image_path in cur_namelist]
      cur_batch = np.array(cur_batch).astype(np.float32)
      cur_idx += batch_size
      yield cur_batch

      def show_image(self, image, normalized=True):
      if not type(image).__module__ == np.__name__:
      image = image.numpy()
      if normalized:
      npimg = (image * 0.5) + 0.5
      npimg.astype(np.uint8)
      plt.imshow(npimg, interpolation='nearest')



      def generator(x, args, reuse=False):
      with tf.device('/gpu:0'):
      with tf.variable_scope("generator", reuse=reuse):
      #Layer Block 1
      with tf.variable_scope("layer1"):
      deconv1 = tf.layers.conv2d_transpose(inputs=x,
      filters= args.n_g_filters*8,
      kernel_size=4,
      strides=1,
      padding='valid',
      use_bias=False,
      name='deconv')
      batch_norm1=tf.layers.batch_normalization(deconv1,
      name = 'batch_norm')
      relu1 = tf.nn.relu(batch_norm1, name='relu')
      #Layer Block 2
      with tf.variable_scope("layer2"):
      deconv2 = tf.layers.conv2d_transpose(inputs=relu1,
      filters=args.n_g_filters*4,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='deconv')
      batch_norm2 = tf.layers.batch_normalization(deconv2,
      name = 'batch_norm')
      relu2 = tf.nn.relu(batch_norm2, name='relu')
      #Layer Block 3
      with tf.variable_scope("layer3"):
      deconv3 = tf.layers.conv2d_transpose(inputs=relu2,
      filters=args.n_g_filters*2,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias = False,
      name='deconv')
      batch_norm3 = tf.layers.batch_normalization(deconv3,
      name = 'batch_norm')
      relu3 = tf.nn.relu(batch_norm3, name='relu')
      #Layer Block 4
      with tf.variable_scope("layer4"):
      deconv4 = tf.layers.conv2d_transpose(inputs=relu3,
      filters=args.n_g_filters,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='deconv')
      batch_norm4 = tf.layers.batch_normalization(deconv4,
      name = 'batch_norm')
      relu4 = tf.nn.relu(batch_norm4, name='relu')
      #Output Layer
      with tf.variable_scope("last_layer"):
      logit = tf.layers.conv2d_transpose(inputs=relu4,
      filters=3,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='logit')
      output = tf.nn.tanh(logit)
      return output, logit



      def discriminator(x, args, reuse=False):
      with tf.device('/gpu:0'):
      with tf.variable_scope("discriminator", reuse=reuse):
      with tf.variable_scope("layer1"):
      conv1 = tf.layers.conv2d(inputs=x,
      filters=args.n_f_filters,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      relu1 = tf.nn.leaky_relu(conv1, alpha=0.2, name='relu')
      with tf.variable_scope("layer2"):
      conv2 = tf.layers.conv2d(inputs=relu1,
      filters=args.n_f_filters*2,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      batch_norm2 = tf.layers.batch_normalization(conv2,name='batch_norm')
      relu2 = tf.nn.leaky_relu(batch_norm2, alpha=0.2, name='relu')
      with tf.variable_scope("layer3"):
      conv3 = tf.layers.conv2d(inputs=relu2,
      filters=args.n_f_filters*4,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      batch_norm3 = tf.layers.batch_normalization(conv3, name='batch_norm')
      relu3 = tf.nn.leaky_relu(batch_norm3, name='relu')
      with tf.variable_scope("layer4"):
      conv4 = tf.layers.conv2d(inputs=relu3,
      filters=args.n_f_filters*8,
      kernel_size=4,
      strides=2,
      padding='same',
      use_bias=False,
      name='conv')
      batch_norm4 = tf.layers.batch_normalization(conv4, name='batch_norm')
      relu4 = tf.nn.leaky_relu(batch_norm4, alpha=0.2, name='relu')
      with tf.variable_scope("last_layer"):
      logit = tf.layers.conv2d(inputs=relu4,
      filters=1,
      kernel_size=4,
      strides=1,
      padding='valid',
      use_bias=False,
      name='conv')
      output = tf.nn.sigmoid(logit)
      return output, logit



      def sample_z(dim_z, num_batch):
      mu = 0
      sigma = 1
      s = np.random.normal(mu, sigma, num_batch*dim_z)
      samples = s.reshape(num_batch, 1, 1, dim_z)
      ##dist = tf.distributions.Normal(0.0, 1.0)
      ##samples = dist.sample([num_batch, 1, 1, dim_z])
      return samples
      #64,1,1,100 6400
      sample_z(100, 64)



      def get_losses(d_real_logits, d_fake_logits):
      #add new loss function here
      ###d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits, labels=tf.ones_like(d_real_logits)))
      ###d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.zeros_like(d_fake_logits)))
      ###d_loss = d_loss_real + d_loss_fake
      ###g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits, labels=tf.ones_like(d_fake_logits)))
      ###return d_loss, g_loss
      d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits,labels=tf.ones_like(d_real_logits)) + tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.zeros_like(d_fake_logits)))

      g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,labels=tf.ones_like(d_fake_logits)))
      return d_loss, g_loss



      def get_optimizers(learning_rate, beta1, beta2):
      d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
      g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1, beta2)
      return d_optimizer, g_optimizer


      def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
      d_step = d_optimizer.minimize(d_loss)
      g_step = g_optimizer.minimize(g_loss)
      return d_step, g_step


      LOGDIR = "logs_basic_dcgan"

      def merge_images(image_batch, size):
      h,w = image_batch.shape[1], image_batch.shape[2]
      c = image_batch.shape[3]
      img = np.zeros((int(h*size[0]), w*size[1], c))
      for idx, im in enumerate(image_batch):
      i = idx % size[1]
      j = idx // size[1]
      img[j*h:j*h+h, i*w:i*w+w,:] = im
      return img
      itr_fh = open('basic_gan_itr.txt', 'a+')

      def train(args):
      tf.reset_default_graph()
      data_loader = Dataset(args.data_path, args.num_images, args.image_size)
      #data_loader.preprocess_and_save_images('preprocessed', 'results_celebA') #preprocess the images once
      X = tf.placeholder(tf.float32, shape=[args.batch_size, args.image_size , args.image_size, 3])
      Z = tf.placeholder(tf.float32, shape=[args.batch_size, 1, 1, args.dim_z])

      G_sample, _ = generator(Z, args)
      D_real, D_real_logits = discriminator(X, args)
      D_fake, D_fake_logits = discriminator(G_sample, args, reuse=True)
      d_loss, g_loss = get_losses(D_real_logits, D_fake_logits)
      d_optimizer, g_optimizer = get_optimizers(args.lr, args.beta1, args.beta2)
      d_step, g_step = optimize(d_optimizer, g_optimizer, d_loss, g_loss)
      ###z_sum = tf.summary.histogram('z', Z)
      ###d_sum = tf.summary.histogram('d', D_real)
      ###G_sum = tf.summary.histogram('g', G_sample)
      ###d_loss_sum = tf.summary.scalar('d_loss', d_loss)
      ###g_loss_sum = tf.summary.scalar('g_loss', g_loss)
      ###d_sum = tf.summary.merge([z_sum, d_sum, d_loss_sum])
      ###g_sum = tf.summary.merge([z_sum, G_sum, g_loss_sum])
      ###saver = tf.train.Saver()
      ###merged_summary = tf.summary.merge_all()

      ###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
      ###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
      ###merged_summary = tf.summary.merge_all()

      with tf.Session() as sess:

      sess.run(tf.global_variables_initializer())
      for epoch in range(args.n_epoch):
      for itr, real_batch in enumerate(data_loader.get_nextbatch(args.batch_size)):
      print('itr is %d, and epoch is %d' %(itr, epoch))
      itr_fh.write("epoch: " + str(epoch) + " itr: " + str(itr) + "n")

      Z_sample = sample_z(args.dim_z, args.batch_size)

      _, _ = sess.run([d_step, g_step], feed_dict={X:real_batch , Z:Z_sample})
      sample = sess.run(G_sample, feed_dict={Z:Z_sample})
      print("sample size is: ", sample.shape)
      if itr==3164: #num_images/batch_size
      im_merged = merge_images(sample[:16], [4,4])
      plt.imsave('sample_gan_images/im_merged_epoch_%d.png' %(epoch), im_merged )
      scipy.misc.imsave('sample_gan_images/im_epoch_%d_itr_%d.png' %(epoch,itr), sample[1])
      ##merged_summary = sess.run(merged_summary, feed_dict={X:real_batch , Z:Z_sample})
      ###writer = tf.summary.FileWriter(LOGDIR)
      ###writer.add_summary(merged_summary, itr)
      ###d_loss_summary = tf.summary.scalar("Discriminator_Total_Loss", d_loss)
      ###g_loss_summary = tf.summary.scalar("Generator_Total_Loss", g_loss)
      ###merged_summary = tf.summary.merge_all()
      ###writer.add_graph(sess.graph)
      ###saver.save(sess, save_path='logs_basic_dcgan/gan.ckpt')


      train(args)


      Here is the images created at the end of first 5 epochs. I also have commented stuff related to tensorboard because it makes it very slow unfortunately.



      end of Epoch 0:
      enter image description here



      end of Epoch 1:
      enter image description here



      end of Epoch 2:
      enter image description here



      end of Epoch 3:
      enter image description here



      end of Epoch 4:
      enter image description here







      python tensorflow deep-learning generative-adversarial-network






      share|improve this question













      share|improve this question











      share|improve this question




      share|improve this question










      asked Nov 16 '18 at 20:37









      Mona JalalMona Jalal

      8,66131116224




      8,66131116224
























          1 Answer
          1






          active

          oldest

          votes


















          1














          I think the problem is related with the definition of the optimizers:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          d_step = d_optimizer.minimize(d_loss)
          g_step = g_optimizer.minimize(g_loss)
          return d_step, g_step


          Although you define each optimizer with the corresponding loss, you are not passing the list of variables that will be trained by each optimizer. Therefore, by default the function minimize will consider all variables under the graph collection GraphKeys.TRAINABLE_VARIABLES. Since all your variables are defined under this graph collection, your current code actually updates all variables from the generator and from the discriminator when you call d_step and when you call g_step.



          You have to define the list of variables for each model. Since you are using variable scopes, one way to do that is:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          t_vars = tf.trainable_variables()
          d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
          g_vars = [var for var in t_vars if var.name.startswith('generator')]

          d_step = d_optimizer.minimize(d_loss, var_list=d_vars)
          g_step = g_optimizer.minimize(g_loss, var_list=g_vars)
          return d_step, g_step





          share|improve this answer
























          • interesting. so do you know what it was learning on? according to the images, it was learning something'

            – Mona Jalal
            Nov 19 '18 at 3:40






          • 1





            I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

            – K. Bogdan
            Nov 20 '18 at 12:45












          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53345043%2fdcgan-tensorflow-code-doesnt-produce-faces-on-celeba-dataset%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          1 Answer
          1






          active

          oldest

          votes








          1 Answer
          1






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes









          1














          I think the problem is related with the definition of the optimizers:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          d_step = d_optimizer.minimize(d_loss)
          g_step = g_optimizer.minimize(g_loss)
          return d_step, g_step


          Although you define each optimizer with the corresponding loss, you are not passing the list of variables that will be trained by each optimizer. Therefore, by default the function minimize will consider all variables under the graph collection GraphKeys.TRAINABLE_VARIABLES. Since all your variables are defined under this graph collection, your current code actually updates all variables from the generator and from the discriminator when you call d_step and when you call g_step.



          You have to define the list of variables for each model. Since you are using variable scopes, one way to do that is:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          t_vars = tf.trainable_variables()
          d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
          g_vars = [var for var in t_vars if var.name.startswith('generator')]

          d_step = d_optimizer.minimize(d_loss, var_list=d_vars)
          g_step = g_optimizer.minimize(g_loss, var_list=g_vars)
          return d_step, g_step





          share|improve this answer
























          • interesting. so do you know what it was learning on? according to the images, it was learning something'

            – Mona Jalal
            Nov 19 '18 at 3:40






          • 1





            I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

            – K. Bogdan
            Nov 20 '18 at 12:45
















          1














          I think the problem is related with the definition of the optimizers:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          d_step = d_optimizer.minimize(d_loss)
          g_step = g_optimizer.minimize(g_loss)
          return d_step, g_step


          Although you define each optimizer with the corresponding loss, you are not passing the list of variables that will be trained by each optimizer. Therefore, by default the function minimize will consider all variables under the graph collection GraphKeys.TRAINABLE_VARIABLES. Since all your variables are defined under this graph collection, your current code actually updates all variables from the generator and from the discriminator when you call d_step and when you call g_step.



          You have to define the list of variables for each model. Since you are using variable scopes, one way to do that is:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          t_vars = tf.trainable_variables()
          d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
          g_vars = [var for var in t_vars if var.name.startswith('generator')]

          d_step = d_optimizer.minimize(d_loss, var_list=d_vars)
          g_step = g_optimizer.minimize(g_loss, var_list=g_vars)
          return d_step, g_step





          share|improve this answer
























          • interesting. so do you know what it was learning on? according to the images, it was learning something'

            – Mona Jalal
            Nov 19 '18 at 3:40






          • 1





            I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

            – K. Bogdan
            Nov 20 '18 at 12:45














          1












          1








          1







          I think the problem is related with the definition of the optimizers:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          d_step = d_optimizer.minimize(d_loss)
          g_step = g_optimizer.minimize(g_loss)
          return d_step, g_step


          Although you define each optimizer with the corresponding loss, you are not passing the list of variables that will be trained by each optimizer. Therefore, by default the function minimize will consider all variables under the graph collection GraphKeys.TRAINABLE_VARIABLES. Since all your variables are defined under this graph collection, your current code actually updates all variables from the generator and from the discriminator when you call d_step and when you call g_step.



          You have to define the list of variables for each model. Since you are using variable scopes, one way to do that is:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          t_vars = tf.trainable_variables()
          d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
          g_vars = [var for var in t_vars if var.name.startswith('generator')]

          d_step = d_optimizer.minimize(d_loss, var_list=d_vars)
          g_step = g_optimizer.minimize(g_loss, var_list=g_vars)
          return d_step, g_step





          share|improve this answer













          I think the problem is related with the definition of the optimizers:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          d_step = d_optimizer.minimize(d_loss)
          g_step = g_optimizer.minimize(g_loss)
          return d_step, g_step


          Although you define each optimizer with the corresponding loss, you are not passing the list of variables that will be trained by each optimizer. Therefore, by default the function minimize will consider all variables under the graph collection GraphKeys.TRAINABLE_VARIABLES. Since all your variables are defined under this graph collection, your current code actually updates all variables from the generator and from the discriminator when you call d_step and when you call g_step.



          You have to define the list of variables for each model. Since you are using variable scopes, one way to do that is:



          def optimize(d_optimizer, g_optimizer, d_loss, g_loss):
          t_vars = tf.trainable_variables()
          d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
          g_vars = [var for var in t_vars if var.name.startswith('generator')]

          d_step = d_optimizer.minimize(d_loss, var_list=d_vars)
          g_step = g_optimizer.minimize(g_loss, var_list=g_vars)
          return d_step, g_step






          share|improve this answer












          share|improve this answer



          share|improve this answer










          answered Nov 19 '18 at 1:27









          K. BogdanK. Bogdan

          1513




          1513













          • interesting. so do you know what it was learning on? according to the images, it was learning something'

            – Mona Jalal
            Nov 19 '18 at 3:40






          • 1





            I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

            – K. Bogdan
            Nov 20 '18 at 12:45



















          • interesting. so do you know what it was learning on? according to the images, it was learning something'

            – Mona Jalal
            Nov 19 '18 at 3:40






          • 1





            I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

            – K. Bogdan
            Nov 20 '18 at 12:45

















          interesting. so do you know what it was learning on? according to the images, it was learning something'

          – Mona Jalal
          Nov 19 '18 at 3:40





          interesting. so do you know what it was learning on? according to the images, it was learning something'

          – Mona Jalal
          Nov 19 '18 at 3:40




          1




          1





          I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

          – K. Bogdan
          Nov 20 '18 at 12:45





          I am not sure what the model is learning. But, we know that the generator G is being updated whenever the discriminator D is updated, since d_loss depends on the generated image. But, on the other hand, although all variables are considered when G is updated, D at the end will not be updated when we call the generator's update step g_step. Since g_loss do not depend on any variable of D. Thus, G is trying to generate faces, but D is trying to identify them and at the same time is modifying G to be able to do this. So, D is doing its task, but G can't generate faces since D is modifying G too

          – K. Bogdan
          Nov 20 '18 at 12:45




















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53345043%2fdcgan-tensorflow-code-doesnt-produce-faces-on-celeba-dataset%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Xamarin.iOS Cant Deploy on Iphone

          Glorious Revolution

          Dulmage-Mendelsohn matrix decomposition in Python