0% found this document useful (0 votes)
18 views4 pages

LA Lab

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
18 views4 pages

LA Lab

Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 4

10/10/24, 4:48 PM Untitled14.

ipynb - Colab

from keras.datasets import mnist from keras.models import


Sequential from keras.layers import BatchNormalization from
keras.layers import Dense, Reshape, Flatten, LeakyReLU from
tensorflow.keras.optimizers import Adam import numpy as np
!mkdir generated_images

img_width = 28 img_height = 28 channels = 1


img_shape = (img_width, img_height, channels)
latent_dim = 100 adam =
Adam(learning_rate=0.0001)

def build_generator():
model = Sequential()

model.add(Dense(256, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))

model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))

model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))

model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))

model.summary() return

model generator =

build_generator()

/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` arg


super().__init__(activity_regularizer=activity_regularizer, **kwargs)
/usr/local/lib/python3.10/dist-packages/keras/src/layers/activations/leaky_relu.py:41: UserWarning: Argument `alpha` is deprecated.
warnings.warn(
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 256) │ 25,856 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ leaky_re_lu (LeakyReLU) │ (None, 256) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization │ (None, 256) │ 1,024 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_1 (Dense) │ (None, 256) │ 65,792 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ leaky_re_lu_1 (LeakyReLU) │ (None, 256) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_1 │ (None, 256) │ 1,024 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_2 (Dense) │ (None, 256) │ 65,792 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ leaky_re_lu_2 (LeakyReLU) │ (None, 256) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_2 │ (None, 256) │ 1,024 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_3 (Dense) │ (None, 784) │ 201,488 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ reshape (Reshape) │ (None, 28, 28, 1) │ 0 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 362,000 (1.38 MB)
Trainable params: 360,464 (1.38 MB)
( )
def build_discriminator():
model = Sequential()

model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256)) model.add(Dense(1,
activation='sigmoid'))

model.summary()
return model

https://colab.research.google.com/drive/1wLkoofVmDXV2y_9zyorrvMDhay4CTsx_#scrollTo=nr3ZG_uF0sEu&printMode=true 1/4
10/10/24, 4:48 PM Untitled14.ipynb - Colab
discriminator = build_discriminator() discriminator.compile(loss='binary_crossentropy',
optimizer=adam, metrics=['accuracy'])

/usr/local/lib/python3.10/dist-packages/keras/src/layers/reshaping/flatten.py:37: UserWarning: Do not pass an `input_shape`/`input_d


super().__init__(**kwargs)
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ flatten (Flatten) │ (None, 784) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_4 (Dense) │ (None, 512) │ 401,920 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ leaky_re_lu_3 (LeakyReLU) │ (None, 512) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_5 (Dense) │ (None, 256) │ 131,328 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_6 (Dense) │ (None, 1) │ 257 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 533,505 (2.04 MB)
Trainable params: 533,505 (2.04 MB) Non-
trainable params: 0 (0.00 B)

GAN = Sequential()
discriminator.trainable = False
GAN.add(generator)
GAN.add(discriminator)

GAN.compile(loss='binary_crossentropy', optimizer=adam)
GAN.summary()

Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ sequential (Sequential) │ (None, 28, 28, 1) │ 362,000 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ sequential_1 (Sequential) │ (None, 1) │ 533,505 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 895,505 (3.42 MB)
Trainable params: 360,464 (1.38 MB)
Non-trainable params: 535,041 (2.04 MB)

#@title ## **7) Outputting


Images** import
matplotlib.pyplot as plt import
glob import imageio import PIL
save_name = 0.00000000

def save_imgs(epoch):
r, c = 5, 5 noise = np.random.normal(0, 1, (r *
c, latent_dim)) gen_imgs = generator.predict(noise)
global save_name save_name += 0.00000001
print("%.8f" % save_name)

# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5

fig, axs = plt.subplots(r, c)


cnt = 0 for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
# axs[i,j].imshow(gen_imgs[cnt])
axs[i,j].axis('off') cnt += 1

fig.savefig("generated_images/%.8f.png" % save_name)
print('saved') plt.close()

def train(epochs, batch_size=64, save_interval=200):


(X_train, _), (_, _) = mnist.load_data()

# Rescale data between -1 and 1


X_train = X_train / 127.5 - 1.

# Create our Y for our Neural Networks


valid = np.ones((batch_size, 1)) fakes
= np.zeros((batch_size, 1))

for epoch in range(epochs): # Get Random Batch


idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]

https://colab.research.google.com/drive/1wLkoofVmDXV2y_9zyorrvMDhay4CTsx_#scrollTo=nr3ZG_uF0sEu&printMode=true 2/4
10/10/24, 4:48 PM Untitled14.ipynb - Colab
# Generate Fake Images noise =
np.random.normal(0, 1, (batch_size, latent_dim))
gen_imgs = generator.predict(noise)

# Train discriminator d_loss_real =


discriminator.train_on_batch(imgs, valid) d_loss_fake =
discriminator.train_on_batch(gen_imgs, fakes) d_loss = 0.5
* np.add(d_loss_real, d_loss_fake) noise =
np.random.normal(0, 1, (batch_size, latent_dim))

# Inverse y label g_loss = GAN.train_on_batch(noise, valid) print("******* %d [D loss: %f,

acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss[0]))

if (epoch % save_interval) == 0:
save_imgs(epoch)
train(950 , batch_size= 64, save_interval= 200)

https://colab.research.google.com/drive/1wLkoofVmDXV2y_9zyorrvMDhay4CTsx_#scrollTo=nr3ZG_uF0sEu&printMode=true 3/4
10/10/24, 4:48 PM Untitled14.ipynb - Colab
/ / p
******* 948 [D loss: 2.605092, acc: 29.91%] [G loss : 2.606087]
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step
******* 949 [D loss: 2.606057, acc: 29.91%] [G loss : 2.607045]

# Display a single image using the epoch number


# def display_image(epoch_no):
# return PIL.Image.open('generated_images/%.8f.p ng'.format(epoch_no))

anim_file = 'dcgan.gif'

with imageio.get_writer (anim_file, mode='I') as writer:


filenames = glob.glob('generated_images/*.png' )
filenames = sorted(filenames)
for filename in filenames:
image = imageio.imread(filename)
writer.append_data (image)
image = imageio.imread(filename)
writer.append_data (image)

<ipython-input-8-5d911b6b7554>:11: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of
image = imageio.imread(filename)
<ipython-input-8-5d911b6b7554>:13: DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of
image = imageio.imread(filename)

https://colab.research.google.com/drive/1wLkoofVmDXV2y_9zyorrvMDhay4CTsx_#scrollTo=nr3ZG_uF0sEu&printMode=true 4/4

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy