0% found this document useful (0 votes)
16 views7 pages

ML2

Uploaded by

surbhi
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
16 views7 pages

ML2

Uploaded by

surbhi
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 7

Q1

load('fg.mat')

data = fg(:,2:17);

label= fg(:,1)

k = fg(:,1);

function [accuracy1, accuracy2] = baggadabosst(data,label)

ens = fitensemble(data,k,'AdaBoostM1',50,'Tree');

bag = fitensemble(data,k,'Bag',200,'Tree',...

'Type','Classification')

YfitS = (predict(bag,data));

YfitS1 = (predict(ens,data));

sum=0;

for i = 1:1576

if((YfitS(:,1)==label(:,1)))

sum = sum+1;

end

end

accuracy1 = sum\size(fg(:,1));

sum1=0;

for i = 1:1576

if((YfitS1(:,1)==label(:,1)))

sum1 = sum1 +1;

end

end

accuracy2 = sum1\size(fg(:,1));

end
X_tr = fg(1:1000,:);
y_tr = fg(1:1000,1);
X_te = fg(1001:1576,:);
Y_te = fg(1001:1576,1);
n_trees = 100;

[ train_err, test_err =AdaBoosfunctiont( X_tr, y_tr, X_te, y_te, n_trees,


option )
if option == 1
y_tr = y_tr - 2;
y_te = y_te - 2;
elseif option == 3
y_tr = y_tr - 4;
y_te = y_te - 4;
else
error("Wrong input for option");
end

N = size(X_tr, 1);

alpha = zeros(n_trees, 1);

% initialize D_1(n) = 1/N for all n = 1, ..., N


D = zeros(N,1);
D(:) = 1.0 / N;

% training collection
training = ones(n_trees, N);
testing = ones(n_trees, size(X_te, 1));

trainSize = size(y_tr ,1);


testSize = size(y_te, 1);

for t = 1:n_trees
stump = fitctree(X_tr, y_tr, "SplitCriterion", "deviance", "MaxNumSplits",
1, "Weights", D);

% learn g_t from the stump for both train and test sets
g_t = predict(stump, X_tr);
g_t_test = predict(stump, X_te);

% calculate epsilon_t
errors = g_t ~= y_tr;
products = D .* errors;
epsilon_t = sum(products);

% calculate alpha_t
alpha(t) = 1.0/2.0 * log((1-epsilon_t) / epsilon_t);

% reweight rule
D = D .* exp( (-1) * alpha(t) .* g_t .* y_tr);
% adjusting for normalization constant
Z = sum(D);
D = D ./ Z;

% collecting weak classifiers


training(t, :) = g_t;
testing(t, :) = g_t_test;

% aggregation rule
Gx = sign(sum(alpha .* training));
Gx_test = sign(sum(alpha .* testing));

train_err(t) = 1/trainSize * sum(Gx' ~= y_tr);


test_err(t) = 1/testSize * sum(Gx_test' ~= y_te);
end

figure;
hold on;
plot(1:n_trees, train_err, 'Color', 'red');
plot(1:n_trees, test_err, 'Color', [0 0.39 0]);
ylabel("Training & Testing Error");
xlabel("# of weak hypotheses");
legend("Training Error", "Testing Error");

if option == 1
title("One (1) vs. Three (3) Problem - AdaBoost");
elseif option == 3
title("Three (3) vs. Five (5) Problem - AdaBoost ");
end

% report last value (what it converges to at point n_trees)


train_err = train_err(n_trees);
test_err = test_err(n_trees);

end
X=data;
Y = label;
testX = fg(1:1000,:);
testY = fg(1001:1576,1);
numBags = 100;
option=1;

function [ oobErr ] = BaggedTrees( X, Y, numBags, option, testX, testY )

if option == 1
Y = Y - 2;
testY = testY - 2;
elseif option == 3
Y = Y - 4;
testY = testY - 4;
end

baggingSize = size(X,1);
OOB = zeros(numBags, 1);

% initializing variables based on train/test modes


if testX == 0
G = zeros(baggingSize, numBags);
N = baggingSize;
else
testSize = size(testX, 1);
G = zeros(testSize, numBags);
N = testSize;
end

for t=1:numBags

[data, index] = datasample(1:baggingSize, baggingSize);

if testX == 0
input = X(data, :);
output = Y(index);

unique = setdiff(1:baggingSize, data, 'sorted');


uniqueX = X(unique, :);

ct = fitctree(input, output);

% learning hypotheses
G(unique, t) = predict(ct, uniqueX);
else
input = testX(data, :);
output = testY(index);
uniqueX = testX;

% keeping cross-val option off so that we get max-depth tree


ct = fitctree(input, output);
% learning hypotheses
G(:, t) = predict(ct, uniqueX);
end

% get column vector of every element in the 1:t columns (increasing


with t)
aggrG = G(:,1:t);
% aggregated sums
sum_aggrG = sum(aggrG, 2);
% next need to know the error b/w G(x_n) and y_n
% in this case we need BINARY error for classification
errors = sign(sum_aggrG) ~= Y;
% sum of all errors from n=1 to N
errors_sum = sum(errors);
% taking average over N, the size of our bootstrapped aggregating
OOB(t) = 1/N * errors_sum;
end

% report out-of-bag error (last element)


oobErr = OOB(numBags);

% plotting OOB Error as a function of the number of bags from 1 to


% numBags
if testX == 0
figure;
hold on
scatter(1:numBags, OOB, 10, 'd', 'red', 'filled');
line(1:numBags, OOB);
ylabel("Out-Of-Bag Error");
xlabel("# of bags");

if option == 1
title("One (1) vs. Three (3) Problem");
elseif option == 3
title("Three (3) vs. Five (5) Problem");
end
end
end

q2)
Q2)

load('fg.mat')
data = fg(:,2:17);
label = fg(:,1);
xt = data';
yt = label';
load('fg.mat')
data = fg(:,2:17);
label = fg(:,1);
xt = data';
yt = label';
[remse_val, rmse_train] = mlp(xt,yt)
for i = 1:60
hiddenlayersize=i;
net = fitnet(hiddenlayersize);
net.divideParam.trainRatio=70/100;
net.divideParam.valRatio = 30/100;
net.divideParam.testRatio = 0/100;
[net, tr] = train(net,xt,yt);
ytrain = exp(net(xt(:,tr.trainInd)))-1;
ytrainTrue = exp(yt(tr.trainInd))-1;
yval = exp(net(xt(:,tr.valInd)))-1;
yValTrue = exp(yt(tr.valInd))-1;
rmse_val(i)=sqrt(mean((yval-yValTrue).^2));
rmse_train(i) = sqrt(mean((ytrain-ytrainTrue).^2));
end
Q

3)

load('fg.mat')

data = fg(:,2:17);

label= fg(:,1)

ens = fitensemble(data,k,'AdaBoostM1',50,'Tree');

bag = fitensemble(data,k,'Bag',200,'Tree',...

'Type','Classification')

cvMdl = crossval(ens); % Performs stratified 10-fold cross-


validation
cvtrainError = kfoldLoss(cvMdl)
ADABOOST
cvtrainError =

0.0019
cvtrainAccuracy =

0.9981

cvMdl2 = crossval(bag); % Performs stratified 10-fold cross-


validation
cvtrainError12 = kfoldLoss(cvMdl2)
cvtrainError =

6.3452e-04
cvtrainAccuracy =

0.9994
MLP error == .001
Acuuracy : 0.9990

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy