Clear all;
x =[];
t =[];
x( 1)= 0.22;
k = 4;
n = 900
N = 400
% generated a logical sequence, and the first 100 was deleted.
For i= 1:n
x(I+ 1)= k * x(I)*( 1-x(I));
end
x( 1:800)= x( 10 1:900);
% training RBF neural network with the first 400 data of x
For j= 1:N
P( 1:8,j)= X(j:j+7)';
end
t = X(9:408); % target data
net 1=newrb(P,T,0.00 1, 1); % training RBF neural network
n 1 = 300;
% Select 400-700 data of X for RBF neural network test.
For j= 1:N 1
P 1( 1:8,j)= X(j+400:j+7+400)';
end
t 1 = X(409:708); % target data
% simulation verification
a=sim(net 1,p 1); The prediction result of% test data %sim means simulating the network.
e = t 1-a; % one-step prediction error
Mse = sqrt(e*e')/size(e, 2)% mean square error.
% Draw a picture to depict the simulation results.
Figure (2)
plot(X( 1:200));
Axis ([1200-0.11.1]);
Title ('logistic chaotic sequence');
xlabel(' t ');
Ylabel ('magnitude');
Figure (3)
plot( 1:300,T 1,' b ', 1:300,a,' r * ');
H = legend ('chaotic sequence',' RBF neural network one-step prediction value');
Axis ([1300-0.51.5]);
continue
Title ('chaotic sequence and one-step predictive value');
xlabel(' t ');
Ylabel ('magnitude');
Fig. 4;
plot(e,' b-');
Axis ([1300-0.02 0.02]);
Title ('prediction error e');
xlabel(' t ');
Ylabel ('magnitude');
As for K-means clustering:
One of the most basic rbf algorithms, the learning algorithm adopts pseudo inverse function.
%
%
%
%
%
%
%
%
%
%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Clear all;
All closed;
clc
% defines the variables used.
r = 1; % domain radius, set manually
nodenum = 0; The number of% hidden layer nodes is determined by the actual data and r***
input data =[]; % input matrix
input path =“”; % Path to store original input data
node out =[]; % hidden layer input array
netout =[]; % network output array
Weight = []; % output weight matrix, which is the only weight.
input num = 0; Enter the dimension percentage.
output num = 0; % output dimension
center =[]; % cluster center
num train = 0; Number of% learning samples
row = 0; Number of% learning samples
sim row = 0; Percentage of all samples
numtest = 0; Number of% generalization samples
Strength =1; The range used in% normalization is usually 1.
yout =[]; Expected value of% output
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Initialization of% variable
r = 1;
input num = 9;
input path = ' e:\ yinjia \ data \ years un . dat ';
output num = 1;
simrow = 290
Source=load (input path);
% Save a copy of the source to prepare for denormalization.
copysource = source
% normalization processing
Source=normalize (source, intensity);
yout = source(input num+ 1:sim row+input num);
inputdata=phasespace(source,inputnum,sim row);
row = 250
numtrain = row
numtest = sim row-row;
% Set the first person input variable as the initial center.
After initialization, the location of% center will not change.
center=inputdata( 1,:)';
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% started a simple cluster.
The basic idea of% is: given a fixed neighborhood radius, the points within the neighborhood radius of the central point are regarded as
% is a point in the neighborhood; For points outside the radius, the first point that is not within the radius is taken as the new center point.
% design a function iscenter calculates whether a point is the center point.
For step =2: line
if iscenter(inputdata(step,)',center,r)
center=[center inputdata(step,)'];
nodenum = nodenum+ 1;
End% this end for is center (input data (step,))
End of step =2% this end:row
Cluster completion percentage.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Prepare the forward calculation of neural network.
[center row nodenum]= size(center);
Initialization of% network
nodeout=zeros(row,nodenum);
netout=zeros(row,output num);
weight=zeros(nodenum,output num);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Calculation of% network
For step size = 1: line
For step 1= 1:nodenum
nodeout(step,step 1)= GaussRadialBasisFunction(input data(step,:)', ...
center(:,step 1),r);
End% This step ends 1= 1:nodenum
end %this end for step= 1:row
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% uses pseudo-inverses to calculate weights
weight = pinv(node out)* yout( 1:250);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Percentage used for simulation
For step= 1:simrow
For step 1= 1:nodenum
nodeout(step,step 1)= GaussRadialBasisFunction(input data(step,:)', ...
center(:,step 1),r);
End% This step ends 1= 1:nodenum
end % this end for step = 1:sim row
mydata = nodeout * weight
% denormalization
mydata=unnomal(mydata,copysource,strength);
rl data = copy source(input num+ 1:input num+sim row);
plot(rl data); Hold on; plot(mydata,' r ');
% calculate an evaluation function
rmsetest =(norm(rl data(num train+ 1:simrow)-my data(num train+ 1:simrow)) ...
^2/(numtest- 1))^0.5
There are several small programs, you should be able to write them yourself.