import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense, Flatten, Reshape
from tensorflow.keras.models import Sequential
# Load and preprocess the MNIST dataset
(x_train, _), (_, _) = mnist.load_data()
x_train = x_train / 255.0 # normalize to [0, 1]
x_train = np.expand_dims(x_train, axis=-1)
x_train = x_train.astype(np.float32)
# Define the generator model
def build_generator(latent_dim):
model = Sequential([
Dense(128, activation='relu', input_dim=latent_dim),
Dense(784, activation='sigmoid'),
Reshape((28, 28))
])
return model
# Define the discriminator model
def build_discriminator():
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(1, activation='sigmoid')
])
return model
# Compile the discriminator model
discriminator = build_discriminator()
discriminator.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Build and compile the GAN model with discriminator set as non-trainable
discriminator.trainable = False
generator = build_generator(latent_dim=100)
gan_input = tf.keras.Input(shape=(100,))
gan_output = discriminator(generator(gan_input))
gan = tf.keras.Model(gan_input, gan_output)
gan.compile(optimizer='adam', loss='binary_crossentropy')
# Training the GAN
epochs = 10000
batch_size = 128
for epoch in range(epochs):
# Train discriminator
idx = np.random.randint(0, x_train.shape[0], batch_size)
real_imgs = x_train[idx]
noise = np.random.normal(0, 1, (batch_size, 100))
generated_imgs = generator.predict(noise)
real_labels = np.ones((batch_size, 1))
fake_labels = np.zeros((batch_size, 1))
d_loss_real = discriminator.train_on_batch(real_imgs, real_labels)
d_loss_fake = discriminator.train_on_batch(generated_imgs, fake_labels)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Train generator
noise = np.random.normal(0, 1, (batch_size, 100))
valid_labels = np.ones((batch_size, 1))
g_loss = gan.train_on_batch(noise, valid_labels)
# Print progress
print(f"{epoch}/{epochs} [D loss: {d_loss[0]} | D accuracy: {d_loss[1]}] [G loss: {g_loss}]")
# Save generated images at specific intervals
if epoch % 1000 == 0:
generated_img = generated_imgs[0] * 255
tf.keras.preprocessing.image.save_img(f'generated_img_{epoch}.png', generated_img)
print("Training Complete!")
pythonCopy code
import numpy as np
def activation_function(x):
# Using a simple step function as activation for illustrative purposes
return 1 if x >= 0 else 0
def perceptron(inputs, weights):
# Summing up the weighted inputs
sum_ = np.dot(inputs, weights)
# Applying the activation function
return activation_function(sum_)
pythonCopy code
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def feedforward(inputs, weights_input_hidden, weights_hidden_output):
# Input to Hidden Layer
hidden_input = np.dot(inputs, weights_input_hidden)
hidden_output = sigmoid(hidden_input)
# Hidden Layer to Output
final_input = np.dot(hidden_output, weights_hidden_output)
final_output = sigmoid(final_input)
return final_output
pythonCopy code
def sigmoid_derivative(x):