0% found this document useful (0 votes)
21 views2 pages

Lab 1 - Harshil - Parmar

The document outlines a process for loading the MNIST dataset, training a simple neural network model using PyTorch, and generating adversarial examples using the PGD attack. It demonstrates the negative impact of adversarial instances on model performance, highlighting a significant drop in accuracy when tested against adversarial cases. The analysis emphasizes the vulnerability of deep learning networks to small perturbations, illustrating the challenges posed by adversarial attacks.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
21 views2 pages

Lab 1 - Harshil - Parmar

The document outlines a process for loading the MNIST dataset, training a simple neural network model using PyTorch, and generating adversarial examples using the PGD attack. It demonstrates the negative impact of adversarial instances on model performance, highlighting a significant drop in accuracy when tested against adversarial cases. The analysis emphasizes the vulnerability of deep learning networks to small perturbations, illustrating the challenges posed by adversarial attacks.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 2

import torch

import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import idx2numpy
import matplotlib.pyplot as plt
from secml.adv.attacks.evasion import CAttackEvasionPGD
from secml.array import CArray
from secml.ml.classifiers import CClassifierPyTorch
from secml.optim.constraints import CConstraintL2, CConstraintBox
from secml.figure import CFigure

# Load MNIST Dataset from idx Files


mnist_data_path = "C:/Users/Hp15d/Desktop/MNIST/raw/"

train_images_file = mnist_data_path + "train-images.idx3-ubyte"


train_labels_file = mnist_data_path + "train-labels.idx1-ubyte"
test_images_file = mnist_data_path + "t10k-images.idx3-ubyte"
test_labels_file = mnist_data_path + "t10k-labels.idx1-ubyte"

# Load data using idx2numpy


train_images = idx2numpy.convert_from_file(train_images_file)
train_labels = idx2numpy.convert_from_file(train_labels_file)
test_images = idx2numpy.convert_from_file(test_images_file)
test_labels = idx2numpy.convert_from_file(test_labels_file)

# Convert to PyTorch tensors and normalize to [0,1]


train_images = torch.tensor(train_images, dtype=torch.float32).unsqueeze(1) / 255.0
train_labels = torch.tensor(train_labels, dtype=torch.long)
test_images = torch.tensor(test_images, dtype=torch.float32).unsqueeze(1) / 255.0
test_labels = torch.tensor(test_labels, dtype=torch.long)

# Subset of the dataset


train_images = train_images[:3000]
train_labels = train_labels[:3000]
test_images = test_images[:1000]
test_labels = test_labels[:1000]

class CustomMNISTDataset(Dataset):
def __init__(self, images, labels, transform=None):
self.images = images
self.labels = labels
self.transform = transform

def __len__(self):
return len(self.labels)

def __getitem__(self, idx):


image = self.images[idx]
label = self.labels[idx]

if self.transform:
image = self.transform(image)

return image, label

transform = transforms.Compose([transforms.Normalize((0.1307,), (0.3081,))])

# Create DataLoaders
train_dataset = CustomMNISTDataset(train_images, train_labels, transform=transform)
test_dataset = CustomMNISTDataset(test_images, test_labels, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)


test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=False)

print(f"MNIST dataset loaded: Training samples = {len(train_dataset)}, Testing samples = {len(test_dataset)}")

# Neural Network Model


class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(28*28, 128)
self.fc2 = nn.Linear(128, 10)

def forward(self, x):


x = x.view(-1, 28*28)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x

# Initialize model, loss, and optimizer


model = SimpleNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

def train_model(model, train_loader, criterion, optimizer, epochs=5):


model.train()
for epoch in range(epochs):
for images, labels in train_loader:
images, labels = images.view(-1, 28*28), labels
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print(f'Epoch {epoch+1}/{epochs}, Loss: {loss.item():.4f}')

train_model(model, train_loader, criterion, optimizer)

# Wrap PyTorch model with CClassifierPyTorch


cmodel = CClassifierPyTorch(
model,
input_shape=(1, 28, 28),
preprocess=None,
optimizer=optimizer,
loss=criterion
)

# Fit the classifier using the training dataset


cmodel.fit(train_images.view(3000, -1).numpy(), train_labels.numpy())

# Get a test sample


for x_batch, y_batch in test_loader:
x = x_batch[0] # First image in batch
y = y_batch[0].item() # First label
break # Only one sample

mean = 0.1307
std = 0.3081
x = x * std + mean # Reverse normalization

x = x.unsqueeze(0) # Adding batch dimension

solver_params = {
'eta': 0.3,
'max_iter': 100,
'eps': 1e-4
}

n_attack = CAttackEvasionPGD(
classifier=cmodel, # Target model
double_init=False, # Disable double initialization
distance='l2', # L2 norm attack
dmax=0.2, # Equivalent to epsilon (perturbation budget)
lb=0.0, # Lower bound (normalized image range)
ub=1.0, # Upper bound (normalized image range)
attack_classes='all',# Attack all classes
solver_params=solver_params
)

# Convert x and y to CArray


x_adv = n_attack.run(CArray(x.numpy().flatten()), CArray([y]))

from secml.figure import CFigure


import numpy as np

def show_digits(samples, preds, labels, digs, n_display=8):


samples = samples.atleast_2d()
n_display = min(n_display, samples.shape[0])

fig = CFigure(width=n_display * 2, height=3)


for idx in range(n_display):
fig.subplot(2, n_display, idx + 1)
fig.sp.xticks([])
fig.sp.yticks([])
fig.sp.imshow(samples[idx, :].reshape((28, 28)), cmap='gray')
fig.sp.title(f"{digs[labels[idx].item()]} ({digs[preds[idx].item()]})",
color=("green" if labels[idx].item() == preds[idx].item() else "red"))
fig.show()

digs = list(range(10))

show_digits(CArray(x.numpy()).reshape((1, -1)), cmodel.predict(CArray(x.numpy())).argmax(axis=1), CArray([y]), digs)

show_digits(CArray(x_adv.numpy().reshape(1, -1)), cmodel.predict(CArray(x_adv.numpy().reshape(1, -1))).argmax(axis=1), CArray([y]), digs)

print("Adversarial example generated successfully!")

# analysis and discussion


# Effect of Adversarial instances on Model Performance: Adversarial instances, as expected, have a negative effect on the model's performance.
# When tested on hostile cases, the model's performance drastically declines, despite its generally high accuracy on clean data.
# This decline in performance illustrates how adversarial attacks can cause deep learning networks to classify incorrectly based on small perturbations.
# Anticipated Outcomes: A decrease in accuracy when assessed on adversarial cases is anticipated due to the nature of adversarial attacks.
# This illustrates how model resilience can be compromised by adversarial cases.
# The model appears to be very vulnerable to these kinds of disturbances if, for example, your clean accuracy is 98% and the adversarial accuracy is 40%.

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy