طبقه بندی knn

عنوان : طبقه بندی knn
گروه درخواست : MATLAB
شرح درخواست :

سلام
یه برنامه داریم که می خوایم روش طبقه بندی knn انجام بدیم منتها جواب نمی ده و ارور فانکشن در این کانتکست وجود ندار داره
چه جوری باید رفعش کنیم؟
تکست کدش این هست:
clc
clear all
close all
load('100m.mat'); % load the ECG signal from the file
ECG=val(1,:);
fs = 360; % Sampling rate
N = length (ECG); % Signal length
t = [0:N-1]/fs; % time index
RR_interval=0

figure(1)
subplot(2,4,[1 2])
plot(t,ECG)
xlabel('second');ylabel('Volts');title('Input ECG Signal')


ECG = ECG - mean (ECG ); % cancel DC components
ECG = ECG/ max( abs(ECG )); % normalize to one
heart_beat=0;
subplot(2,4,3)
plot(t,ECG)
xlabel('second');ylabel('Volts');title(' ECG Signal after cancellation DC drift and normalization')
% LPF (1-z^-6)^2/(1-z^-1)^2
b=[1 0 0 0 0 0 -2 0 0 0 0 0 1];
a=[1 -2 1];

h_LP=filter(b,a,[1 zeros(1,12)]); % transfer function of LPF
x2 = conv (ECG ,h_LP);
x2 = x2/ max( abs(x2 )); % normalize , for convenience .

subplot(2,4,4)
plot([0:length(x2)-1]/fs,x2)
xlabel('second');ylabel('Volts');title(' ECG Signal after LPF')
xlim([0 max(t)])

% HPF = Allpass-(Lowpass) = z^-16-[(1-z^-32)/(1-z^-1)]
b = [-1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 32 -32 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1];
a = [1 -1];

h_HP=filter(b,a,[1 zeros(1,32)]); % impulse response iof HPF

x3 = conv (x2 ,h_HP);
x3 = x3/ max( abs(x3 ));


subplot(2,4,5)
plot([0:length(x3)-1]/fs,x3)
xlabel('second');ylabel('Volts');title(' ECG Signal after HPF')
xlim([0 max(t)])

% Make impulse response
h = [-1 -2 0 2 1]/8;
% Apply filter
x4 = conv (x3 ,h);
x4 = x4 (2+[1: N]);
x4 = x4/ max( abs(x4 ));


subplot(2,4,6)
plot([0:length(x4)-1]/fs,x4)
xlabel('second');ylabel('Volts');title(' ECG Signal after Derivative')


x5 = x4 .^2;
x5 = x5/ max( abs(x5 ));
subplot(2,4,7)
plot([0:length(x5)-1]/fs,x5)
xlabel('second');ylabel('Volts');title(' ECG Signal Squarting')


% Make impulse response
h = ones (1 ,31)/31;
Delay = 15; % Delay in samples

% Apply filter
x6 = conv (x5 ,h);
x6 = x6 (15+[1: N]);
x6 = x6/ max( abs(x6 ));

subplot(2,4,8)
plot([0:length(x6)-1]/fs,x6)
xlabel('second');ylabel('Volts');title(' ECG Signal after Averaging')

max_h = max(x6);
thresh = mean (x6 );
poss_reg =(x6>thresh*max_h)';


left=zeros(1,3600);
right=zeros(1,3600);
left = find(diff(poss_reg')==1);
right = find(diff(poss_reg')==-1);

%left=left-(16+6); % cancle delay because of LP and HP
%right=right-(16+6);% cancle delay because of LP and HP

for i=1:length(left)-1
[R_value(i) R_loc(i)] = max( ECG(left(i):right(i)) );
R_loc(i) = R_loc(i)-1+left(i); % add offset

[Q_value(i) Q_loc(i)] = min( ECG((left(i)-16):R_loc(i)) );
Q_loc(i) = Q_loc(i)-1+left(i)-16; % add offset

[S_value(i) S_loc(i)] = min( ECG(left(i):right(i)) );
S_loc(i) = S_loc(i)-1+left(i); % add offset

[P_value(i), P_loc(i)]=max(ECG(left(i)-48:Q_loc(i)));
P_loc(i)=P_loc(i)-1+left(i)-48;

[T_value(i), T_loc(i)]=max(ECG(S_loc(i):right(i)));
T_loc(i)=T_loc(i)-1+left(i)+115;

heart_beat=heart_beat+1;

QRS(i)=S_loc(i)-Q_loc(i);

ampltide_R(i)=R_value(i);

QRS_area(i)=ampltide_R(i).*QRS(i);


end

for i=2:length(left)-1
pre_RR_interval(i)=(R_loc(i)-R_loc(i-1))/360;

end

for i=2:length(left)-1
post_RR_interval(i)=(pre_RR_interval(i)-pre_RR_interval(i-1))/360;
RR_interval=RR_interval+post_RR_interval(i)
end
Avg=RR_interval/12

% there is no selective wal
Q_loc=Q_loc(find(Q_loc~=0));
R_loc=R_loc(find(R_loc~=0));
S_loc=S_loc(find(S_loc~=0));

figure
title('ECG Signal with R points');
plot (t,ECG/max(ECG) , t(R_loc) ,R_value , 'r^', t(S_loc) ,S_value, '*',t(Q_loc) , Q_value, 'o',t(P_loc) , P_value, '+',t(T_loc) , T_value, '*');
legend('ECG','R','S','Q','p','T');
xlabel('second');ylabel('Volts');



PR_interval=(QRS(1)-P_loc(1))/360;

QRS_interval=(QRS(1)-QRS(end))/360;

QT_interval=(T_loc(end)-QRS(1))/360;

%????? ??? ?? ???? ?? ?? ????? p.v.c ????? ????? ?? ???? ?? ??? qrs ?????
%?? 0.11 ????? ?? ????
for i=1:12
if(post_RR_interval>Avg)
disp('pvc detected')
end
end
testing = [amplitude_R,QRS_area,RR_interval,S_value];
testing=[PR_interval,QRS_interval,QT_interval];
function [predicted_labels,nn_index,accuracy] = KNN_(k,data,labels,t_data,t_labels)
%KNN_: classifying using k-nearest neighbors algorithm. The nearest neighbors
%search method is euclidean distance
%Usage:
% [predicted_labels,nn_index,accuracy] = KNN_(Avg,training,amplitude_R,QRS_area,RR_interval,S_value,testing,post_RR_interval)
% predicted_labels = KNN_(Avg,training,amplitude_R,QRS_area,RR_interval,S_value,testing,post_RR_interval)
%Input:
% - k: number of nearest neighbors
% - data: (NxD) training data; N is the number of samples and D is the
% dimensionality of each data point
% - labels: training labels
% - t_data: (MxD) testing data; M is the number of data points and D
% is the dimensionality of each data point
% - t_labels: testing labels (default = [])
%Output:
% - predicted_labels: the predicted labels based on the k-NN
% algorithm
% - nn_index: the index of the nearest training data point for each training sample (Mx1).
% - accuracy: if the testing labels are supported, the accuracy of
% the classification is returned, otherwise it will be zero.
%Author: Mahmoud Afifi - York University
%checks
if nargin < 4
error('Too few input arguments.')
elseif nargin < 5
t_labels=[];
accuracy=0;
end
if size(data,2)~=size(t_data,2)
error('data should have the same dimensionality');
end
if mod(k,2)==0
error('to reduce the chance of ties, please choose odd k');
end
%initialization
predicted_labels=zeros(size(t_data,1),1);
ed=zeros(size(t_data,1),size(data,1)); %ed: (MxN) euclidean distances
ind=zeros(size(t_data,1),size(data,1)); %corresponding indices (MxN)
k_nn=zeros(size(t_data,1),k); %k-nearest neighbors for testing sample (Mxk)
%calc euclidean distances between each testing data point and the training
%data samples
for test_point=1:size(t_data,1)
for train_point=1:size(data,1)
%calc and store sorted euclidean distances with corresponding indices
ed(test_point,train_point)=sqrt(...
sum((t_data(test_point,:)-data(train_point,:)).^2));
end
[ed(test_point,:),ind(test_point,:)]=sort(ed(test_point,:));
end
%find the nearest k for each data point of the testing data
k_nn=ind(:,1:k);
nn_index=k_nn(:,1);
%get the majority vote
for i=1:size(k_nn,1)
options=unique(labels(k_nn(i,:)'));
max_count=0;
max_label=0;
for j=1:length(options)
L=length(find(labels(k_nn(i,:)')==options(j)));
if L>max_count
max_label=options(j);
max_count=L;
end
end
predicted_labels(i)=max_label;
end
%calculate the classification accuracy
if isempty(t_labels)==0
accuracy=length(find(predicted_labels==t_labels))/size(t_data,1);
end
end

شما هم سوال دارید ؟

از کارشناسان پاسخیاب بپرسید!