0% found this document useful (0 votes)
5 views15 pages

Shakib

The document contains multiple code snippets demonstrating various programming concepts including basic variables, arithmetic operations, control structures (if-else statements, loops), function definitions, and graph traversal algorithms (BFS and DFS). It also showcases search algorithms like Greedy Best First Search and A* Search, as well as a simple implementation of a perceptron and a genetic algorithm. Additionally, it includes a section on preprocessing data for a Naive Bayes model using the scikit-learn library.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views15 pages

Shakib

The document contains multiple code snippets demonstrating various programming concepts including basic variables, arithmetic operations, control structures (if-else statements, loops), function definitions, and graph traversal algorithms (BFS and DFS). It also showcases search algorithms like Greedy Best First Search and A* Search, as well as a simple implementation of a perceptron and a genetic algorithm. Additionally, it includes a section on preprocessing data for a Naive Bayes model using the scikit-learn library.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 15

CODE:

Code 1: Basic Variables

name = "Shahadat Hossain Shakib"

age = 21
print(f"Name: {name}")

print(f"Age: {age}")

Output:

Code 2: Arithmetic Operation

a = 10

b=5

print("Sum:", a + b)

print("Difference:", a - b)

print("Multiplication:", a * b)

print("Quotient:", a / b)

print("Remainder:", a % b)
print("Power:", a ** b)

Output:
CODE:

Code 1: If-else Statement

number = -5

if number > 0:
print("The number is positive.")

else:

print("The number is not positive.")

Output:

Code 2: Loops

count1 = 0

while count1 < 5:

print(f"Count: {count1}")

count1+=1

Output:

Code 3: Defining Function

def greet(name):

return (f"Hello, {name}!")

message = greet("Shakib")

print(message)
Output:
CODE:

Code 1: BFS

from collections import deque

def bfs(graph, Start):


visited = set()

queue = deque([Start])

while queue:

node = queue.popleft()

if node not in visited:

print(node, end=" ")

visited.add(node)

queue.extend(graph[node])
graph = {

'A' : ['L','M'],

'L' : ['A', 'C', 'R'],

'M' : ['A', 'P', 'T'],

'C' : ['L'],

'R' : ['L', 'K'],

'P' : ['M', 'G'],


'T' : ['M', 'Y'],

'K' : ['R'],

'G' : ['P'],

'Y' : ['T']

bfs(graph, 'A')

Output:
Code 2: DFS

from collections import deque

def dfs(graph, Start):

visited = set()
stack = [Start]

while stack:

node = stack.pop()

if node not in visited:

print(node, end=" ")

visited.add(node)

stack.extend(reversed(graph[node]))

graph = {
'A' : ['L','M'],

'L' : ['A', 'C', 'R'],

'M' : ['A', 'P', 'T'],

'C' : ['L'],

'R' : ['L', 'K'],

'P' : ['M', 'G'],

'T' : ['M', 'Y'],


'K' : ['R'],

'G' : ['P'],

'Y' : ['T']

dfs(graph, 'A')

Output:
CODE:

Code 1: Greedy Best First Search

from queue import PriorityQueue

graph = {
'A':[('D', 3), ('E', 5)],

'D':[('A', 3),('L', 3),('M', 7)],

'L':[('D', 3), ('M', 2)],

'M':[('D', 7),('L', 2), ('Y', 1)],

'E':[('A', 5),('K', 11), ('T', 2)],

'K':[('E', 11), ('T', 5), ('Y', 2)],

'T':[('K', 5), ('Y', 12), ('E', 2)],

'Y':[('M', 1), ('K', 2), ('T', 12)]


}

heuristic ={

'A': 15,

'D': 12,

'E': 14,

'L': 10,

'M': 8,
'K': 9,

'T': 6,

'Y': 0

def best_first_search(start, goal):

visited = set()

pq = PriorityQueue()
pq.put((heuristic[start],start))
while not pq.empty():

_, current = pq.get()

if current in visited:

continue
if current not in visited:

print("visiting:", current)

visited.add(current)

if current == goal:

print("Goal Found")

return

for neighbor,_ in graph[current]:

if neighbor not in visited:


pq.put((heuristic[neighbor], neighbor))

print("Goal not found")

best_first_search('A', 'Y')

Output:

Code 2: A* Search

from queue import PriorityQueue

graph = {

'A':[('D', 3), ('E', 5)],

'D':[('A', 3),('L', 3),('M', 7)],


'L':[('D', 3), ('M', 2)],
'M':[('D', 7),('L', 2), ('Y', 1)],

'E':[('A', 5),('K', 11), ('T', 2)],

'K':[('E', 11), ('T', 5), ('Y', 2)],

'T':[('K', 5), ('Y', 12), ('E', 2)],


'Y':[('M', 1), ('K', 2), ('T', 12)]

heuristic = {

'A': 15,

'D': 12,

'E': 14,

'L': 10,

'M': 8,
'K': 9,

'T': 6,

'Y': 0

def a_star_search(start, goal):

pq = PriorityQueue()

pq.put((heuristic[start], 0, start, [start]))


visited = set()

while not pq.empty():

f, g, current, path = pq.get()

if current in visited:

continue

if current not in visited:

print("Visiting:", current)
visited.add(current)
if current == goal:

print("Path found", path)

return

for neighbor, cost in graph[current]:


new_g = g + cost

new_f = new_g + heuristic[neighbor]

new_path = path + [neighbor]

pq.put((new_f, new_g, neighbor, path+[neighbor]))

print("Goal not found")

a_star_search('A', 'Y')

Output:
CODE:

def step_function(x):

if x >= 0:

return 1
else:

return 0

class Perceptron:

def __init__(self, learning_rate = 0.4, epochs = 5):

self.weights = [0, 0]

self.bias = 0

self.learning_rate = learning_rate

self.epochs = epochs
def predict(self, inputs):

total = inputs[0] * self.weights[0] + inputs[1] * self.weights[1] + self.bias

return step_function(total)

def train(self, training_inputs, labels):

for epoch in range (self.epochs):

print(f"Epochs: {epoch+1}")

for inputs, label in zip(training_inputs, labels):


prediction = self.predict(inputs)

error = label - prediction

self.weights[0] += self.learning_rate * error * inputs[0]

self.weights[1] += self.learning_rate * error * inputs[1]

self.bias += self.learning_rate * error

print(f"Inputs: {inputs}, Target: {label}, Predict: {prediction}, Error: {error}")

print(f"Weights: {self.weights}, bias: {self.bias}")


training_inputs = [
[0, 0],

[0, 1],

[1, 0],

[1, 1]
]

labels = [0, 0, 0, 1]

p = Perceptron()

p.train(training_inputs, labels)

print("Testing Trained perceptron:")

for inputs in training_inputs:

print(f"Input:{inputs} => Output: {p.predict(inputs)}")

Output:
CODE:

import random

# Step 1: Fitness function

def fitness(x):
return 15 * x - x ** 2

# Step 2: Convert binary string to decimal

def decode(chromosome):

return int(chromosome, 2)

# Step 3: Create random chromosome

def random_chromosome():

return ''.join((random.choice(['0', '1'])) for _ in range(4))

# Step 4: Create initial population


def create_population(size):

return [random_chromosome() for _ in range(size)]

# Step 5: Selection (roulette wheel method)

def select_parent(population, fitness_values):

total = sum(fitness_values)

pick = random.uniform(0, total)

current = 0
for i in range(len(population)):

current += fitness_values[i]

if current > pick:

return population[i]

# Step 6: One-point crossover

def crossover(parent1, parent2):

point = random.randint(1, 3)
return parent1[:point] + parent2[point:], parent2[:point] + parent1[point:]
# Step 7: Mutation (bit flip)

def mutate(chromosome, mutation_rate = 0.01):

new_chrom=''

for bit in chromosome:


if random.random() < mutation_rate:

new_chrom += '1' if bit == '0' else '0'

else:

new_chrom += bit

return new_chrom

# Step 8: Main GA function

def genetic_algorithm(pop_size = 6, generations = 4):

population = create_population(pop_size)
for gen in range(generations):

print(f"Generation {gen}:")

fitness_values = [fitness(decode(c)) for c in population]

for i in range(pop_size):

print(f" {population[i]} (x={decode(population[i])}) → f(x)={fitness_values[i]}")

new_population = []

# Elitism: keep best 1


best = max(population, key=lambda c: fitness(decode(c)))

new_population.append(best)

while len(new_population) < pop_size:

parent1 = select_parent(population, fitness_values)

parent2 = select_parent(population, fitness_values)

child1, child2 = crossover(parent1, parent2)

new_population.append(mutate(child1))
if len(new_population) < pop_size:
new_population.append(mutate(child2))

population = new_population

# Final result

best = max(population, key=lambda c: fitness(decode(c)))


print(f"\nBest solution: {best} (x={decode(best)}) → f(x)={fitness(decode(best))}")

# Run the algorithm

genetic_algorithm()

OUTPUT:
CODE:

from sklearn import preprocessing

from sklearn.naive_bayes import GaussianNB

# Raw data
outlook = ['S','S','O','R','R','R','O','S','S','R','S','O','O','R']

temp = ['H','H','H','M','C','C','C','M','C','M','M','M','H','M']

humidity=['H','H','H','H','N','N','N','H','N','N','N','H','N','H']

windy = ['F','T','F','F','F','T','T','F','F','F','T','T','F','T']

play = ['N','N','P','P','P','N','P','N','P','P','P','P','P','N']

# Label encode each feature

outlook_encoded = preprocessing.LabelEncoder().fit_transform(outlook)

temp_encoded = preprocessing.LabelEncoder().fit_transform(temp)
humidity_encoded = preprocessing.LabelEncoder().fit_transform(humidity)

windy_encoded = preprocessing.LabelEncoder().fit_transform(windy)

play_encoded = preprocessing.LabelEncoder().fit_transform(play)

print("Converted Outlook:\n", outlook_encoded.tolist())

print("Converted Temp:\n", temp_encoded.tolist())

print("Converted Humidity:\n", humidity_encoded.tolist())

print("Converted Windy:\n", windy_encoded.tolist())


print("Converted Play:\n", play_encoded.tolist())

# Forcefully convert NumPy int64 to pure Python int

outlook_encoded = list(map(int, outlook_encoded))

temp_encoded = list(map(int, temp_encoded))

humidity_encoded = list(map(int, humidity_encoded))

windy_encoded = list(map(int, windy_encoded))

play_encoded = list(map(int, play_encoded))


# Combine all features into a list of lists

features = [list(x) for x in zip(outlook_encoded, temp_encoded, humidity_encoded,


windy_encoded)]

# Print everything cleanly

print("\nFeatures:\n", features)

print("Converted Play:\n", play_encoded)

# Train Naive Bayes model


agent_model = GaussianNB().fit(features, play_encoded)

# Predict for new input [0, 1, 0, 0]


prediction = agent_model.predict([[0, 1, 0, 0]])

print("\nPredicted Output:\n", prediction)

OUTPUT:

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy