SlideShare a Scribd company logo
1 of 13
Download to read offline
Using the code below, I need help with the following 3 things:
1) Write Python code to plot the images from the first epoch. Take a screenshot of the images
from the first epoch.
2) Write Python code to plot the images from the last epoch. Take a screenshot of the images
from the last epoch.
3) Comment on the network performance.
#Step 1: Import the required Python libraries:
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam,SGD
from keras.datasets import cifar10
#Step 2: Load the data.
#Loading the CIFAR10 data
(X, y), (_, _) = keras.datasets.cifar10.load_data()
#Selecting a single class of images
#The number was randomly chosen and any number
#between 1 and 10 can be chosen
X = X[y.flatten() == 8]
#Step 3: Define parameters to be used in later processes.
#Defining the Input shape
image_shape = (32, 32, 3)
latent_dimensions = 100
#Step 4: Define a utility function to build the generator.
def build_generator():
model = Sequential()
#Building the input layer
model.add(Dense(128 * 8 * 8, activation="relu",
input_dim=latent_dimensions))
model.add(Reshape((8, 8, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(Conv2D(3, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
#Generating the output image
noise = Input(shape=(latent_dimensions,))
image = model(noise)
return Model(noise, image)
#Step 5: Define a utility function to build the discriminator.
def build_discriminator():
#Building the convolutional layers
#to classify whether an image is real or fake
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2,
input_shape=image_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
#Building the output layer
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
image = Input(shape=image_shape)
validity = model(image)
return Model(image, validity)
#Step 6: Define a utility function to display the generated images.
def display_images():
r, c = 4,4
noise = np.random.normal(0, 1, (r * c,latent_dimensions))
generated_images = generator.predict(noise)
#Scaling the generated images
generated_images = 0.5 * generated_images + 0.5
fig, axs = plt.subplots(r, c)
count = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(generated_images[count, :,:,])
axs[i,j].axis('off')
count += 1
plt.show()
plt.close()
#Step 7: Build the GAN.
# Building and compiling the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5),
metrics=['accuracy'])
#Making the discriminator untrainable
#so that the generator can learn from fixed gradient
discriminator.trainable = False
# Building the generator
generator = build_generator()
#Defining the input for the generator
#and generating the images
z = Input(shape=(latent_dimensions,))
image = generator(z)
#Checking the validity of the generated image
valid = discriminator(image)
#Defining the combined model of the generator and the discriminator
combined_network = Model(z, valid)
combined_network.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5))
#Step 8: Train the network.
num_epochs=15000
batch_size=32
display_interval=2500
losses=[]
#Normalizing the input
X = (X / 127.5) - 1.
#Defining the Adversarial ground truths
valid = np.ones((batch_size, 1))
#Adding some noise
valid += 0.05 * np.random.random(valid.shape)
fake = np.zeros((batch_size, 1))
fake += 0.05 * np.random.random(fake.shape)
for epoch in range(num_epochs):
#Training the Discriminator
#Sampling a random half of images
index = np.random.randint(0, X.shape[0], batch_size)
images = X[index]
#Sampling noise and generating a batch of new images
noise = np.random.normal(0, 1, (batch_size, latent_dimensions))
generated_images = generator.predict(noise)
#Training the discriminator to detect more accurately
#whether a generated image is real or fake
discm_loss_real = discriminator.train_on_batch(images, valid)
discm_loss_fake = discriminator.train_on_batch(generated_images, fake)
discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake)
#Training the generator
#Training the generator to generate images
#that pass the authenticity test
genr_loss = combined_network.train_on_batch(noise, valid)
#Tracking the progress
if epoch % display_interval == 0:
display_images()
#Step 1: Import the required Python libraries:
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam,SGD
from keras.datasets import cifar10
#Step 2: Load the data.
#Loading the CIFAR10 data
(X, y), (_, _) = keras.datasets.cifar10.load_data()
#Selecting a single class of images
#The number was randomly chosen and any number
#between 1 and 10 can be chosen
X = X[y.flatten() == 8]
#Step 3: Define parameters to be used in later processes.
#Defining the Input shape
image_shape = (32, 32, 3)
latent_dimensions = 100
#Step 4: Define a utility function to build the generator.
def build_generator():
model = Sequential()
#Building the input layer
model.add(Dense(128 * 8 * 8, activation="relu",
input_dim=latent_dimensions))
model.add(Reshape((8, 8, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(BatchNormalization(momentum=0.78))
model.add(Activation("relu"))
model.add(Conv2D(3, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
#Generating the output image
noise = Input(shape=(latent_dimensions,))
image = model(noise)
return Model(noise, image)
#Step 5: Define a utility function to build the discriminator.
def build_discriminator():
#Building the convolutional layers
#to classify whether an image is real or fake
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2,
input_shape=image_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.82))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.25))
model.add(Dropout(0.25))
#Building the output layer
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
image = Input(shape=image_shape)
validity = model(image)
return Model(image, validity)
#Step 6: Define a utility function to display the generated images.
def display_images():
r, c = 4,4
noise = np.random.normal(0, 1, (r * c,latent_dimensions))
generated_images = generator.predict(noise)
#Scaling the generated images
generated_images = 0.5 * generated_images + 0.5
fig, axs = plt.subplots(r, c)
count = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(generated_images[count, :,:,])
axs[i,j].axis('off')
count += 1
plt.show()
plt.close()
#Step 7: Build the GAN.
# Building and compiling the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5),
metrics=['accuracy'])
#Making the discriminator untrainable
#so that the generator can learn from fixed gradient
discriminator.trainable = False
# Building the generator
generator = build_generator()
#Defining the input for the generator
#and generating the images
z = Input(shape=(latent_dimensions,))
image = generator(z)
#Checking the validity of the generated image
valid = discriminator(image)
#Defining the combined model of the generator and the discriminator
combined_network = Model(z, valid)
combined_network.compile(loss='binary_crossentropy',
optimizer=Adam(0.0002,0.5))
#Step 8: Train the network.
num_epochs=15000
batch_size=32
display_interval=2500
losses=[]
#Normalizing the input
X = (X / 127.5) - 1.
#Defining the Adversarial ground truths
valid = np.ones((batch_size, 1))
#Adding some noise
valid += 0.05 * np.random.random(valid.shape)
fake = np.zeros((batch_size, 1))
fake += 0.05 * np.random.random(fake.shape)
for epoch in range(num_epochs):
#Training the Discriminator
#Sampling a random half of images
index = np.random.randint(0, X.shape[0], batch_size)
images = X[index]
#Sampling noise and generating a batch of new images
noise = np.random.normal(0, 1, (batch_size, latent_dimensions))
generated_images = generator.predict(noise)
#Training the discriminator to detect more accurately
#whether a generated image is real or fake
discm_loss_real = discriminator.train_on_batch(images, valid)
discm_loss_fake = discriminator.train_on_batch(generated_images, fake)
discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake)
#Training the generator
#Training the generator to generate images
#that pass the authenticity test
genr_loss = combined_network.train_on_batch(noise, valid)
#Tracking the progress
if epoch % display_interval == 0:
display_images()

More Related Content

Similar to Using the code below- I need help with the following 3 things- 1) Writ.pdf

I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdfI want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
GordonF2XPatersonh
 
AIML4 CNN lab256 1hr (111-1).pdf
AIML4 CNN lab256 1hr (111-1).pdfAIML4 CNN lab256 1hr (111-1).pdf
AIML4 CNN lab256 1hr (111-1).pdf
ssuserb4d806
 
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdfI want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
MattU5mLambertq
 
building_games_with_ruby_rubyconf
building_games_with_ruby_rubyconfbuilding_games_with_ruby_rubyconf
building_games_with_ruby_rubyconf
tutorialsruby
 

Similar to Using the code below- I need help with the following 3 things- 1) Writ.pdf (20)

Assignment 6.1.pdf
Assignment 6.1.pdfAssignment 6.1.pdf
Assignment 6.1.pdf
 
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdfI want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
 
29-kashyap-mask-detaction.pptx
29-kashyap-mask-detaction.pptx29-kashyap-mask-detaction.pptx
29-kashyap-mask-detaction.pptx
 
CE344L-200365-Lab6.pdf
CE344L-200365-Lab6.pdfCE344L-200365-Lab6.pdf
CE344L-200365-Lab6.pdf
 
Viktor Tsykunov: Azure Machine Learning Service
Viktor Tsykunov: Azure Machine Learning ServiceViktor Tsykunov: Azure Machine Learning Service
Viktor Tsykunov: Azure Machine Learning Service
 
Scmad Chapter07
Scmad Chapter07Scmad Chapter07
Scmad Chapter07
 
The Ring programming language version 1.2 book - Part 35 of 84
The Ring programming language version 1.2 book - Part 35 of 84The Ring programming language version 1.2 book - Part 35 of 84
The Ring programming language version 1.2 book - Part 35 of 84
 
AIML4 CNN lab256 1hr (111-1).pdf
AIML4 CNN lab256 1hr (111-1).pdfAIML4 CNN lab256 1hr (111-1).pdf
AIML4 CNN lab256 1hr (111-1).pdf
 
alexnet.pdf
alexnet.pdfalexnet.pdf
alexnet.pdf
 
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdfI want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
I want you to add the output of the F1 score- Precision- ROC AUC- and.pdf
 
Structure Unstructured Data
Structure Unstructured DataStructure Unstructured Data
Structure Unstructured Data
 
[AI07] Revolutionizing Image Processing with Cognitive Toolkit
[AI07] Revolutionizing Image Processing with Cognitive Toolkit[AI07] Revolutionizing Image Processing with Cognitive Toolkit
[AI07] Revolutionizing Image Processing with Cognitive Toolkit
 
Pydata DC 2018 (Skorch - A Union of Scikit-learn and PyTorch)
Pydata DC 2018 (Skorch - A Union of Scikit-learn and PyTorch)Pydata DC 2018 (Skorch - A Union of Scikit-learn and PyTorch)
Pydata DC 2018 (Skorch - A Union of Scikit-learn and PyTorch)
 
Denis Sergienko "Pip install driven deep learning"
Denis Sergienko "Pip install driven deep learning"Denis Sergienko "Pip install driven deep learning"
Denis Sergienko "Pip install driven deep learning"
 
Image classification using cnn
Image classification using cnnImage classification using cnn
Image classification using cnn
 
Machine learning with py torch
Machine learning with py torchMachine learning with py torch
Machine learning with py torch
 
Quality Python Homework Help
Quality Python Homework HelpQuality Python Homework Help
Quality Python Homework Help
 
Hybrid quantum classical neural networks with pytorch and qiskit
Hybrid quantum classical neural networks with pytorch and qiskitHybrid quantum classical neural networks with pytorch and qiskit
Hybrid quantum classical neural networks with pytorch and qiskit
 
Unsupervised Aspect Based Sentiment Analysis at Scale
Unsupervised Aspect Based Sentiment Analysis at ScaleUnsupervised Aspect Based Sentiment Analysis at Scale
Unsupervised Aspect Based Sentiment Analysis at Scale
 
building_games_with_ruby_rubyconf
building_games_with_ruby_rubyconfbuilding_games_with_ruby_rubyconf
building_games_with_ruby_rubyconf
 

More from acteleshoppe

Using visual studio 2022- a C# windows form application- and your Doub.pdf
Using visual studio 2022- a C# windows form application- and your Doub.pdfUsing visual studio 2022- a C# windows form application- and your Doub.pdf
Using visual studio 2022- a C# windows form application- and your Doub.pdf
acteleshoppe
 
using C# language- a WPF application will be made with the application.pdf
using C# language- a WPF application will be made with the application.pdfusing C# language- a WPF application will be made with the application.pdf
using C# language- a WPF application will be made with the application.pdf
acteleshoppe
 

More from acteleshoppe (20)

Using your understanding of Hardy-Weinberg proportions- which of the f.pdf
Using your understanding of Hardy-Weinberg proportions- which of the f.pdfUsing your understanding of Hardy-Weinberg proportions- which of the f.pdf
Using your understanding of Hardy-Weinberg proportions- which of the f.pdf
 
Using visual studio 2022- a C# windows form application- and your Doub.pdf
Using visual studio 2022- a C# windows form application- and your Doub.pdfUsing visual studio 2022- a C# windows form application- and your Doub.pdf
Using visual studio 2022- a C# windows form application- and your Doub.pdf
 
Using the summary table above- match the sources of variation with the.pdf
Using the summary table above- match the sources of variation with the.pdfUsing the summary table above- match the sources of variation with the.pdf
Using the summary table above- match the sources of variation with the.pdf
 
Using PowerPoint draft an academic poster critically analysing and ill.pdf
Using PowerPoint draft an academic poster critically analysing and ill.pdfUsing PowerPoint draft an academic poster critically analysing and ill.pdf
Using PowerPoint draft an academic poster critically analysing and ill.pdf
 
Using the life cycle image above to help you define meiosis- haplece a.pdf
Using the life cycle image above to help you define meiosis- haplece a.pdfUsing the life cycle image above to help you define meiosis- haplece a.pdf
Using the life cycle image above to help you define meiosis- haplece a.pdf
 
Using the Influence tactic known as pressure usually Involves Multiple.pdf
Using the Influence tactic known as pressure usually Involves Multiple.pdfUsing the Influence tactic known as pressure usually Involves Multiple.pdf
Using the Influence tactic known as pressure usually Involves Multiple.pdf
 
Using the information below- please prepare the 2020 and 2021 Balance.pdf
Using the information below- please prepare the 2020 and 2021 Balance.pdfUsing the information below- please prepare the 2020 and 2021 Balance.pdf
Using the information below- please prepare the 2020 and 2021 Balance.pdf
 
Using the following national income accounting data- compute (a) GDP-.pdf
Using the following national income accounting data- compute (a) GDP-.pdfUsing the following national income accounting data- compute (a) GDP-.pdf
Using the following national income accounting data- compute (a) GDP-.pdf
 
Using the company Netflix- Based on your prior research- determine.pdf
Using the company Netflix-    Based on your prior research- determine.pdfUsing the company Netflix-    Based on your prior research- determine.pdf
Using the company Netflix- Based on your prior research- determine.pdf
 
Using the assigned class reading- -SOCIAL CONTROL THEORIES OF URBAN PO.pdf
Using the assigned class reading- -SOCIAL CONTROL THEORIES OF URBAN PO.pdfUsing the assigned class reading- -SOCIAL CONTROL THEORIES OF URBAN PO.pdf
Using the assigned class reading- -SOCIAL CONTROL THEORIES OF URBAN PO.pdf
 
Using the areas in Figure (the Empirical Rule)- find the areas between.pdf
Using the areas in Figure (the Empirical Rule)- find the areas between.pdfUsing the areas in Figure (the Empirical Rule)- find the areas between.pdf
Using the areas in Figure (the Empirical Rule)- find the areas between.pdf
 
Using sources from the Internet- identify some of the problems facing.pdf
Using sources from the Internet- identify some of the problems facing.pdfUsing sources from the Internet- identify some of the problems facing.pdf
Using sources from the Internet- identify some of the problems facing.pdf
 
Using Python- Prompt the user to enter a word and a single character-.pdf
Using Python- Prompt the user to enter a word and a single character-.pdfUsing Python- Prompt the user to enter a word and a single character-.pdf
Using Python- Prompt the user to enter a word and a single character-.pdf
 
Using putty with Bash shell- I am trying to complete the following-.pdf
Using putty with Bash shell-  I am trying to complete the following-.pdfUsing putty with Bash shell-  I am trying to complete the following-.pdf
Using putty with Bash shell- I am trying to complete the following-.pdf
 
Using linux - For each environment variable below- state whether it is.pdf
Using linux - For each environment variable below- state whether it is.pdfUsing linux - For each environment variable below- state whether it is.pdf
Using linux - For each environment variable below- state whether it is.pdf
 
Using Java and the parboiled Java library Write a BNF grammar for the.pdf
Using Java and the parboiled Java library Write a BNF grammar for the.pdfUsing Java and the parboiled Java library Write a BNF grammar for the.pdf
Using Java and the parboiled Java library Write a BNF grammar for the.pdf
 
using basic python P4-16 Factoring of integers- Write a program that a.pdf
using basic python P4-16 Factoring of integers- Write a program that a.pdfusing basic python P4-16 Factoring of integers- Write a program that a.pdf
using basic python P4-16 Factoring of integers- Write a program that a.pdf
 
using C# language- a WPF application will be made with the application.pdf
using C# language- a WPF application will be made with the application.pdfusing C# language- a WPF application will be made with the application.pdf
using C# language- a WPF application will be made with the application.pdf
 
Using a waterfall framework is suitable for a project involving which.pdf
Using a waterfall framework is suitable for a project involving which.pdfUsing a waterfall framework is suitable for a project involving which.pdf
Using a waterfall framework is suitable for a project involving which.pdf
 
Using an electronic version of Figure 13 (attached here)- 1- Illustrat.pdf
Using an electronic version of Figure 13 (attached here)- 1- Illustrat.pdfUsing an electronic version of Figure 13 (attached here)- 1- Illustrat.pdf
Using an electronic version of Figure 13 (attached here)- 1- Illustrat.pdf
 

Recently uploaded

The basics of sentences session 2pptx copy.pptx
The basics of sentences session 2pptx copy.pptxThe basics of sentences session 2pptx copy.pptx
The basics of sentences session 2pptx copy.pptx
heathfieldcps1
 
Beyond the EU: DORA and NIS 2 Directive's Global Impact
Beyond the EU: DORA and NIS 2 Directive's Global ImpactBeyond the EU: DORA and NIS 2 Directive's Global Impact
Beyond the EU: DORA and NIS 2 Directive's Global Impact
PECB
 
Seal of Good Local Governance (SGLG) 2024Final.pptx
Seal of Good Local Governance (SGLG) 2024Final.pptxSeal of Good Local Governance (SGLG) 2024Final.pptx
Seal of Good Local Governance (SGLG) 2024Final.pptx
negromaestrong
 
Gardella_PRCampaignConclusion Pitch Letter
Gardella_PRCampaignConclusion Pitch LetterGardella_PRCampaignConclusion Pitch Letter
Gardella_PRCampaignConclusion Pitch Letter
MateoGardella
 
Gardella_Mateo_IntellectualProperty.pdf.
Gardella_Mateo_IntellectualProperty.pdf.Gardella_Mateo_IntellectualProperty.pdf.
Gardella_Mateo_IntellectualProperty.pdf.
MateoGardella
 

Recently uploaded (20)

Unit-V; Pricing (Pharma Marketing Management).pptx
Unit-V; Pricing (Pharma Marketing Management).pptxUnit-V; Pricing (Pharma Marketing Management).pptx
Unit-V; Pricing (Pharma Marketing Management).pptx
 
The basics of sentences session 2pptx copy.pptx
The basics of sentences session 2pptx copy.pptxThe basics of sentences session 2pptx copy.pptx
The basics of sentences session 2pptx copy.pptx
 
Beyond the EU: DORA and NIS 2 Directive's Global Impact
Beyond the EU: DORA and NIS 2 Directive's Global ImpactBeyond the EU: DORA and NIS 2 Directive's Global Impact
Beyond the EU: DORA and NIS 2 Directive's Global Impact
 
Grant Readiness 101 TechSoup and Remy Consulting
Grant Readiness 101 TechSoup and Remy ConsultingGrant Readiness 101 TechSoup and Remy Consulting
Grant Readiness 101 TechSoup and Remy Consulting
 
Código Creativo y Arte de Software | Unidad 1
Código Creativo y Arte de Software | Unidad 1Código Creativo y Arte de Software | Unidad 1
Código Creativo y Arte de Software | Unidad 1
 
Measures of Dispersion and Variability: Range, QD, AD and SD
Measures of Dispersion and Variability: Range, QD, AD and SDMeasures of Dispersion and Variability: Range, QD, AD and SD
Measures of Dispersion and Variability: Range, QD, AD and SD
 
SOCIAL AND HISTORICAL CONTEXT - LFTVD.pptx
SOCIAL AND HISTORICAL CONTEXT - LFTVD.pptxSOCIAL AND HISTORICAL CONTEXT - LFTVD.pptx
SOCIAL AND HISTORICAL CONTEXT - LFTVD.pptx
 
psychiatric nursing HISTORY COLLECTION .docx
psychiatric  nursing HISTORY  COLLECTION  .docxpsychiatric  nursing HISTORY  COLLECTION  .docx
psychiatric nursing HISTORY COLLECTION .docx
 
Presentation by Andreas Schleicher Tackling the School Absenteeism Crisis 30 ...
Presentation by Andreas Schleicher Tackling the School Absenteeism Crisis 30 ...Presentation by Andreas Schleicher Tackling the School Absenteeism Crisis 30 ...
Presentation by Andreas Schleicher Tackling the School Absenteeism Crisis 30 ...
 
Advanced Views - Calendar View in Odoo 17
Advanced Views - Calendar View in Odoo 17Advanced Views - Calendar View in Odoo 17
Advanced Views - Calendar View in Odoo 17
 
Accessible design: Minimum effort, maximum impact
Accessible design: Minimum effort, maximum impactAccessible design: Minimum effort, maximum impact
Accessible design: Minimum effort, maximum impact
 
fourth grading exam for kindergarten in writing
fourth grading exam for kindergarten in writingfourth grading exam for kindergarten in writing
fourth grading exam for kindergarten in writing
 
Seal of Good Local Governance (SGLG) 2024Final.pptx
Seal of Good Local Governance (SGLG) 2024Final.pptxSeal of Good Local Governance (SGLG) 2024Final.pptx
Seal of Good Local Governance (SGLG) 2024Final.pptx
 
Gardella_PRCampaignConclusion Pitch Letter
Gardella_PRCampaignConclusion Pitch LetterGardella_PRCampaignConclusion Pitch Letter
Gardella_PRCampaignConclusion Pitch Letter
 
Unit-IV; Professional Sales Representative (PSR).pptx
Unit-IV; Professional Sales Representative (PSR).pptxUnit-IV; Professional Sales Representative (PSR).pptx
Unit-IV; Professional Sales Representative (PSR).pptx
 
Gardella_Mateo_IntellectualProperty.pdf.
Gardella_Mateo_IntellectualProperty.pdf.Gardella_Mateo_IntellectualProperty.pdf.
Gardella_Mateo_IntellectualProperty.pdf.
 
Paris 2024 Olympic Geographies - an activity
Paris 2024 Olympic Geographies - an activityParis 2024 Olympic Geographies - an activity
Paris 2024 Olympic Geographies - an activity
 
Z Score,T Score, Percential Rank and Box Plot Graph
Z Score,T Score, Percential Rank and Box Plot GraphZ Score,T Score, Percential Rank and Box Plot Graph
Z Score,T Score, Percential Rank and Box Plot Graph
 
Mattingly "AI & Prompt Design: The Basics of Prompt Design"
Mattingly "AI & Prompt Design: The Basics of Prompt Design"Mattingly "AI & Prompt Design: The Basics of Prompt Design"
Mattingly "AI & Prompt Design: The Basics of Prompt Design"
 
ICT Role in 21st Century Education & its Challenges.pptx
ICT Role in 21st Century Education & its Challenges.pptxICT Role in 21st Century Education & its Challenges.pptx
ICT Role in 21st Century Education & its Challenges.pptx
 

Using the code below- I need help with the following 3 things- 1) Writ.pdf

  • 1. Using the code below, I need help with the following 3 things: 1) Write Python code to plot the images from the first epoch. Take a screenshot of the images from the first epoch. 2) Write Python code to plot the images from the last epoch. Take a screenshot of the images from the last epoch. 3) Comment on the network performance. #Step 1: Import the required Python libraries: import numpy as np import matplotlib.pyplot as plt import keras from keras.layers import Input, Dense, Reshape, Flatten, Dropout from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam,SGD from keras.datasets import cifar10 #Step 2: Load the data. #Loading the CIFAR10 data (X, y), (_, _) = keras.datasets.cifar10.load_data() #Selecting a single class of images #The number was randomly chosen and any number #between 1 and 10 can be chosen X = X[y.flatten() == 8]
  • 2. #Step 3: Define parameters to be used in later processes. #Defining the Input shape image_shape = (32, 32, 3) latent_dimensions = 100 #Step 4: Define a utility function to build the generator. def build_generator(): model = Sequential() #Building the input layer model.add(Dense(128 * 8 * 8, activation="relu", input_dim=latent_dimensions)) model.add(Reshape((8, 8, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(Conv2D(3, kernel_size=3, padding="same")) model.add(Activation("tanh")) #Generating the output image noise = Input(shape=(latent_dimensions,))
  • 3. image = model(noise) return Model(noise, image) #Step 5: Define a utility function to build the discriminator. def build_discriminator(): #Building the convolutional layers #to classify whether an image is real or fake model = Sequential() model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.25)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.25))
  • 4. model.add(Dropout(0.25)) #Building the output layer model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) image = Input(shape=image_shape) validity = model(image) return Model(image, validity) #Step 6: Define a utility function to display the generated images. def display_images(): r, c = 4,4 noise = np.random.normal(0, 1, (r * c,latent_dimensions)) generated_images = generator.predict(noise) #Scaling the generated images generated_images = 0.5 * generated_images + 0.5 fig, axs = plt.subplots(r, c) count = 0 for i in range(r): for j in range(c): axs[i,j].imshow(generated_images[count, :,:,]) axs[i,j].axis('off') count += 1 plt.show() plt.close()
  • 5. #Step 7: Build the GAN. # Building and compiling the discriminator discriminator = build_discriminator() discriminator.compile(loss='binary_crossentropy', optimizer=Adam(0.0002,0.5), metrics=['accuracy']) #Making the discriminator untrainable #so that the generator can learn from fixed gradient discriminator.trainable = False # Building the generator generator = build_generator() #Defining the input for the generator #and generating the images z = Input(shape=(latent_dimensions,)) image = generator(z) #Checking the validity of the generated image valid = discriminator(image) #Defining the combined model of the generator and the discriminator combined_network = Model(z, valid) combined_network.compile(loss='binary_crossentropy', optimizer=Adam(0.0002,0.5)) #Step 8: Train the network. num_epochs=15000
  • 6. batch_size=32 display_interval=2500 losses=[] #Normalizing the input X = (X / 127.5) - 1. #Defining the Adversarial ground truths valid = np.ones((batch_size, 1)) #Adding some noise valid += 0.05 * np.random.random(valid.shape) fake = np.zeros((batch_size, 1)) fake += 0.05 * np.random.random(fake.shape) for epoch in range(num_epochs): #Training the Discriminator #Sampling a random half of images index = np.random.randint(0, X.shape[0], batch_size) images = X[index] #Sampling noise and generating a batch of new images noise = np.random.normal(0, 1, (batch_size, latent_dimensions)) generated_images = generator.predict(noise) #Training the discriminator to detect more accurately #whether a generated image is real or fake discm_loss_real = discriminator.train_on_batch(images, valid) discm_loss_fake = discriminator.train_on_batch(generated_images, fake)
  • 7. discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake) #Training the generator #Training the generator to generate images #that pass the authenticity test genr_loss = combined_network.train_on_batch(noise, valid) #Tracking the progress if epoch % display_interval == 0: display_images() #Step 1: Import the required Python libraries: import numpy as np import matplotlib.pyplot as plt import keras from keras.layers import Input, Dense, Reshape, Flatten, Dropout from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam,SGD from keras.datasets import cifar10 #Step 2: Load the data. #Loading the CIFAR10 data (X, y), (_, _) = keras.datasets.cifar10.load_data() #Selecting a single class of images
  • 8. #The number was randomly chosen and any number #between 1 and 10 can be chosen X = X[y.flatten() == 8] #Step 3: Define parameters to be used in later processes. #Defining the Input shape image_shape = (32, 32, 3) latent_dimensions = 100 #Step 4: Define a utility function to build the generator. def build_generator(): model = Sequential() #Building the input layer model.add(Dense(128 * 8 * 8, activation="relu", input_dim=latent_dimensions)) model.add(Reshape((8, 8, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.78)) model.add(Activation("relu")) model.add(Conv2D(3, kernel_size=3, padding="same"))
  • 9. model.add(Activation("tanh")) #Generating the output image noise = Input(shape=(latent_dimensions,)) image = model(noise) return Model(noise, image) #Step 5: Define a utility function to build the discriminator. def build_discriminator(): #Building the convolutional layers #to classify whether an image is real or fake model = Sequential() model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.25)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization(momentum=0.82)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25))
  • 10. model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.25)) model.add(Dropout(0.25)) #Building the output layer model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) image = Input(shape=image_shape) validity = model(image) return Model(image, validity) #Step 6: Define a utility function to display the generated images. def display_images(): r, c = 4,4 noise = np.random.normal(0, 1, (r * c,latent_dimensions)) generated_images = generator.predict(noise) #Scaling the generated images generated_images = 0.5 * generated_images + 0.5 fig, axs = plt.subplots(r, c) count = 0 for i in range(r): for j in range(c): axs[i,j].imshow(generated_images[count, :,:,]) axs[i,j].axis('off')
  • 11. count += 1 plt.show() plt.close() #Step 7: Build the GAN. # Building and compiling the discriminator discriminator = build_discriminator() discriminator.compile(loss='binary_crossentropy', optimizer=Adam(0.0002,0.5), metrics=['accuracy']) #Making the discriminator untrainable #so that the generator can learn from fixed gradient discriminator.trainable = False # Building the generator generator = build_generator() #Defining the input for the generator #and generating the images z = Input(shape=(latent_dimensions,)) image = generator(z) #Checking the validity of the generated image valid = discriminator(image) #Defining the combined model of the generator and the discriminator combined_network = Model(z, valid) combined_network.compile(loss='binary_crossentropy',
  • 12. optimizer=Adam(0.0002,0.5)) #Step 8: Train the network. num_epochs=15000 batch_size=32 display_interval=2500 losses=[] #Normalizing the input X = (X / 127.5) - 1. #Defining the Adversarial ground truths valid = np.ones((batch_size, 1)) #Adding some noise valid += 0.05 * np.random.random(valid.shape) fake = np.zeros((batch_size, 1)) fake += 0.05 * np.random.random(fake.shape) for epoch in range(num_epochs): #Training the Discriminator #Sampling a random half of images index = np.random.randint(0, X.shape[0], batch_size) images = X[index] #Sampling noise and generating a batch of new images noise = np.random.normal(0, 1, (batch_size, latent_dimensions)) generated_images = generator.predict(noise) #Training the discriminator to detect more accurately
  • 13. #whether a generated image is real or fake discm_loss_real = discriminator.train_on_batch(images, valid) discm_loss_fake = discriminator.train_on_batch(generated_images, fake) discm_loss = 0.5 * np.add(discm_loss_real, discm_loss_fake) #Training the generator #Training the generator to generate images #that pass the authenticity test genr_loss = combined_network.train_on_batch(noise, valid) #Tracking the progress if epoch % display_interval == 0: display_images()