SlideShare a Scribd company logo
1 of 14
EC 313: Adaptive Signal Processing                 Mayank Awasthi(10531005)
MATLAB Assignment – 1                              M.Tech , Communication Systems
                                                   IIT Roorkee

Chapter -5, Ques-20
function []= ques20()
%Given
a=.99;
uu=.05;

%Initializing v,u matrices to zero
v=zeros(100,101);
u=zeros(100,101);
%White noise generation with mean =0, variance= 0.02
for i= 1:100
v(i,:)= sqrt(0.02)*randn(1,101);
end

%Generating AR process with variance variance=1 considering 100 independent
%realizations
for i=1:100
   u(i,:)=filter(1 ,[1 a] ,v(i,:));
end

for n=1:100
w(1)=0;
for i=1:100
  f(n,i)=u(n,i)-w(i)*u(n,i+1);
  w(i+1)=w(i)+0.05*u(n,i+1)*f(n,i);
end
end

for i=1:100
for j=1:100
f(i,j)=f(i,j)^2;
end
end

%Ensemble averaging over 100 independent realization of the squared value
%of its output.
for n=1:100
 J(n)=mean(f(:,n));
end
 %Plotting Learning Curve
    plot(J)
end



     Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
Output:




          Ensemble averaging over 100 independent realization of the squared value
                               of its output vs time 1≤n≤100




   Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
Chapter -5, Ques-21
function []= ques21()
%Given
a1= 0.1;
a2= -0.8;
uu=0.05;
varv= 1- (a1*a1)/(1+a2)- (a2*a2) + (a1*a1*a2)/(1+a2)

v=zeros(1,102);
u=zeros(1,102);
%White noise generation with mean =0, variance= vvarv
  v= sqrt(varv)*randn(1,102);
%Generating AR process with variance variance=1
  u=filter(1 ,[1 a1,a2] ,v);

%Applying the LMS algorithm to calculate a1 and a2

w1(1)=0;
w2(1)=0;
for i=1:100
  f(i)=u(i)-w1(i)*u(i+1)-w2(i)*u(i+2);
  w1(i+1)=w1(i)+0.05*u(i+1)*f(i);
  w2(i+1)=w2(i)+0.05*u(i+2)*f(i);
end
a1_lms= -w1(100)
a2_lms= -w2(100)


%calculating eigen values
eig1= 1- a1/(1+a2);
eig2= 1+ a1/(1+a2);
%maximum step size
uumax= 2/eig2;
%calculating correlation matx and cross correlation matrix
R=[1, -a1/(1+a2); -a1/(1+a2), 1];
r1= -a1/(1+a2);
r2= (a1*a1)/(1+a2)- a2;
p= [r1;r2];
%Calculating optimum weights and minimum mean square error
wo= inv(R)*p;
Jmin= 1-transpose(p)*wo;


%Estimating expected value of weights and variance using small step size theory with
w(0)=0 so we have
 v1(1)= -1/sqrt(2)*(a1+a2);
 v2(1)= -1/sqrt(2)*(a1-a2);
 for n=1:100
    val=1;

    Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
for j=1:n
       val= (1-uu*eig1)*val;
    end
       meanv1(n)=v1(1)*val;
       meansqv1(n)= uu*Jmin/(2-uu*eig1)+ val*val*(v1(1)*v1(1)- uu*Jmin/(2-
uu*eig1));
  end

  for n=1:100
     val=1;
     for j=1:n
        val= (1-uu*eig2)*val;
     end
        meanv2(n)=v2(1)*val;
        meansqv2(n)= uu*Jmin/(2-uu*eig2)+ val*val*(v2(1)*v2(1)- uu*Jmin/(2-
uu*eig2));
  end

  for n=1:100
  w1(n)= -a1-(meanv1(n)+meanv2(n))/sqrt(2);
  w2(n)= -a2-(meanv1(n)-meanv2(n))/sqrt(2);
  theoritical_J(n)= Jmin + eig1*meansqv1(n)*meansqv1(n) +
eig2*meansqv2(100)*meansqv2(n);
  end

 expectedvalue_weights=[w1(100);w2(100)];
 a1_ss= -w1(100)
 a2_ss= -w2(100)

  %Generating AR process with variance variance=1 considering 100 independent
%realizations
v=zeros(100,102);
u=zeros(100,102);
%White noise generation with mean =0, variance= 0.02
for i= 1:100
v(i,:)= sqrt(0.02)*randn(1,102);
u(i,:)=filter(1 ,[1 a1,a2] ,v(i,:));
end

for n=1:100
w1(1)=0;
w2(1)=0;
for i=1:100
  f(n,i)=u(n,i)-w1(i)*u(n,i+1)-w2(i)*u(n,i+2);
  w1(i+1)=w1(i)+0.05*u(n,i+1)*f(n,i);
  w2(i+1)=w2(i)+0.05*u(n,i+2)*f(n,i);
end
end


    Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
for i=1:100
for j=1:100
f(i,j)=f(i,j)^2;
end
end
%Ensemble averaging over 100 independent realization of the squared value
%of its output.
for n=1:100
 J(n)=mean(f(:,n));
end
 %Plotting Learning Curve
 subplot(2,1,1);
    plot(J)

%ploting learning curve theoritically
subplot(2,1,2);
plot( theoritical_J);

end

Output

>> ques21 ()

varv = 0.2700

a1_lms = 0.980

a2_lms = -0.7591

a1_ss = 0.1276

a2_ss = -0.7720




      Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
Comparision of Theoritical learning curve and measured result of part d.




Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
Chapter-5 Ques 21- Part c

function []= ques21()
%Given
a1= 0.1;
a2= -0.8;

varv= 1- (a1*a1)/(1+a2)- (a2*a2) + (a1*a1*a2)/(1+a2);
v=zeros(1,102);
u=zeros(1,102);
%White noise generation with mean =0, variance= vvarv
   v= sqrt(varv)*randn(1,102);
%Generating AR process with variance variance=1
   u=filter(1 ,[1 a1,a2] ,v);

w1(1)=0;
w2(1)=0;
for i=1:100
  f(i)=u(i)-w1(i)*u(i+1)-w2(i)*u(i+2);
  w1(i+1)=w1(i)+0.05*u(i+1)*f(i);
  w2(i+1)=w2(i)+0.05*u(i+2)*f(i);
  e1(i)= -a1-w1(i);
  e2(i)= -a2-w2(i);
end

Fs = 32e3;

Pxx = periodogram(f);
Hpsd = dspdata.psd(Pxx,'Fs',Fs); % Create a PSD data object.
plot(Hpsd);             % Plot the PSD.

Pxx = periodogram(e1);
Hpsd = dspdata.psd(Pxx,'Fs',Fs); % Create a PSD data object.
plot(Hpsd);             % Plot the PSD.

Pxx = periodogram(e2);
Hpsd = dspdata.psd(Pxx,'Fs',Fs); % Create a PSD data object.
plot(Hpsd);             % Plot the PSD.

End




      Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
PSD Plot of f (n)




Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
PSD Plot of e1 (n)




Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
PSD Plot of e2 (n)




Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
Chapter -5, Ques-22
function []= ques22(h1,h2,h3)

iter=500;          %no pf iterations
M=21;              %no of tap inputs
P=100;             %no of independent experiments
uu=0.001;          %learning parameter
h=[h1 h2 h3];      %channel impulse response
D1=1;              %channel impulse response is symmetric about n=1
D2=(M-1)/2;
D=D1+D2;            %total delay

for j=1:P
 %Generating Bernoulli sequence
 x=sign(rand(1,530)-0.5);
 %channel output
 u1 =conv(x,h);
 v=sqrt(0.01)*randn(1,length(u1));
 %adding AWGN noise with channel output
 u=u1+v;

%Applying LMS Algoritm
w=zeros(1,M);               %initializing weight vector
 for i=1:iter
   f(j,i) = x(D+i)- w*transpose(u(i:M-1+i));
   w= w+ uu*u(i:M-1+i)*f(j,i);
  end
end
subplot(2,2,3);
stem(w);                      %Plotting the weight vector
subplot(2,2,4);
stem(h);                      %Plotting the impuse response of channel

%Ensemble averaging over 100 independent realization of the squared value
%of its output.
for j=1:P
  f(j,:)= f(j,:).^2;
end
for n=1:iter
 J(n)=mean(f(:,n));
End

%Plotting the learning curve
subplot(2,2,1:2);
plot(J);
end




    Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
Output:

   (i)    For h1= 0.25, h2= 1, h3= 0.25




          a. Learning curve of the equalizer by averaging the square value of error signal
             over ensemble of 100 independent trials of experiment
          b. Impulse response of the optimum transversal equalizer
          c. Impulse response of channel.




    Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
(ii)   For h1= 0.25, h2= 1, h3=- 0.25




       a. Learning curve of the equalizer by averaging the square value of error signal
          over ensemble of 100 independent trials of experiment
       b. Impulse response of the optimum transversal equalizer
       c. Impulse response of channel.




 Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
(iii)   For h1=- 0.25, h2= 1, h3= 0.25




        a. Learning curve of the equalizer by averaging the square value of error signal
           over ensemble of 100 independent trials of experiment
        b. Impulse response of the optimum transversal equalizer
        c. Impulse response of channel.




 Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee

More Related Content

What's hot

Recursive Compressed Sensing
Recursive Compressed SensingRecursive Compressed Sensing
Recursive Compressed Sensing
Pantelis Sopasakis
 
ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2
ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2
ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2
zukun
 

What's hot (19)

Electronusa Mechanica System
Electronusa Mechanica SystemElectronusa Mechanica System
Electronusa Mechanica System
 
Electronusa Mechanical System
Electronusa Mechanical SystemElectronusa Mechanical System
Electronusa Mechanical System
 
Electronusa Mechanical System
Electronusa Mechanical SystemElectronusa Mechanical System
Electronusa Mechanical System
 
Electronusa Mechanical System
Electronusa Mechanical SystemElectronusa Mechanical System
Electronusa Mechanical System
 
Numerical integration based on the hyperfunction theory
Numerical integration based on the hyperfunction theoryNumerical integration based on the hyperfunction theory
Numerical integration based on the hyperfunction theory
 
Electronusa Mechanical System
Electronusa Mechanical SystemElectronusa Mechanical System
Electronusa Mechanical System
 
Recursive Compressed Sensing
Recursive Compressed SensingRecursive Compressed Sensing
Recursive Compressed Sensing
 
Electronusa Mechanical System
Electronusa Mechanical SystemElectronusa Mechanical System
Electronusa Mechanical System
 
Tercer Punto
Tercer PuntoTercer Punto
Tercer Punto
 
Sloshing-aware MPC for upper stage attitude control
Sloshing-aware MPC for upper stage attitude controlSloshing-aware MPC for upper stage attitude control
Sloshing-aware MPC for upper stage attitude control
 
A Course in Fuzzy Systems and Control Matlab Chapter Three
A Course in Fuzzy Systems and Control Matlab Chapter ThreeA Course in Fuzzy Systems and Control Matlab Chapter Three
A Course in Fuzzy Systems and Control Matlab Chapter Three
 
Introducing Zap Q-Learning
Introducing Zap Q-Learning   Introducing Zap Q-Learning
Introducing Zap Q-Learning
 
ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2
ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2
ECCV2008: MAP Estimation Algorithms in Computer Vision - Part 2
 
HMPC for Upper Stage Attitude Control
HMPC for Upper Stage Attitude ControlHMPC for Upper Stage Attitude Control
HMPC for Upper Stage Attitude Control
 
problemas-geotecnia-resueltos
 problemas-geotecnia-resueltos problemas-geotecnia-resueltos
problemas-geotecnia-resueltos
 
Reinforcement Learning: Hidden Theory and New Super-Fast Algorithms
Reinforcement Learning: Hidden Theory and New Super-Fast AlgorithmsReinforcement Learning: Hidden Theory and New Super-Fast Algorithms
Reinforcement Learning: Hidden Theory and New Super-Fast Algorithms
 
Dip 5 mathematical preliminaries
Dip 5 mathematical preliminariesDip 5 mathematical preliminaries
Dip 5 mathematical preliminaries
 
Need for Controllers having Integer Coefficients in Homomorphically Encrypted D...
Need for Controllers having Integer Coefficients in Homomorphically Encrypted D...Need for Controllers having Integer Coefficients in Homomorphically Encrypted D...
Need for Controllers having Integer Coefficients in Homomorphically Encrypted D...
 
Numerical approach for Hamilton-Jacobi equations on a network: application to...
Numerical approach for Hamilton-Jacobi equations on a network: application to...Numerical approach for Hamilton-Jacobi equations on a network: application to...
Numerical approach for Hamilton-Jacobi equations on a network: application to...
 

Similar to Adaptive signal processing simon haykins

Incorporate the SOR method in the multigridTest-m and apply the multig.pdf
Incorporate the SOR method in the multigridTest-m and apply the multig.pdfIncorporate the SOR method in the multigridTest-m and apply the multig.pdf
Incorporate the SOR method in the multigridTest-m and apply the multig.pdf
aartechindia
 
Matlab kod taslağı
Matlab kod taslağıMatlab kod taslağı
Matlab kod taslağı
Merve Cvdr
 
Fourier series example
Fourier series exampleFourier series example
Fourier series example
Abi finni
 
adv-2015-16-solution-09
adv-2015-16-solution-09adv-2015-16-solution-09
adv-2015-16-solution-09
志远 姚
 
Matlab fair-record-model
Matlab fair-record-modelMatlab fair-record-model
Matlab fair-record-model
ajaydev1111
 

Similar to Adaptive signal processing simon haykins (20)

Incorporate the SOR method in the multigridTest-m and apply the multig.pdf
Incorporate the SOR method in the multigridTest-m and apply the multig.pdfIncorporate the SOR method in the multigridTest-m and apply the multig.pdf
Incorporate the SOR method in the multigridTest-m and apply the multig.pdf
 
Matlab programs
Matlab programsMatlab programs
Matlab programs
 
Dsp manual
Dsp manualDsp manual
Dsp manual
 
DSP_EXP.pptx
DSP_EXP.pptxDSP_EXP.pptx
DSP_EXP.pptx
 
Ece 415 control systems, fall 2021 computer project 1
Ece 415 control systems, fall 2021 computer project  1 Ece 415 control systems, fall 2021 computer project  1
Ece 415 control systems, fall 2021 computer project 1
 
Numerical Method Assignment
Numerical Method AssignmentNumerical Method Assignment
Numerical Method Assignment
 
Control System Assignment Help
Control System Assignment HelpControl System Assignment Help
Control System Assignment Help
 
Matlab kod taslağı
Matlab kod taslağıMatlab kod taslağı
Matlab kod taslağı
 
Fourier series example
Fourier series exampleFourier series example
Fourier series example
 
Introduction to Neural Networks and Deep Learning from Scratch
Introduction to Neural Networks and Deep Learning from ScratchIntroduction to Neural Networks and Deep Learning from Scratch
Introduction to Neural Networks and Deep Learning from Scratch
 
Computational Method to Solve the Partial Differential Equations (PDEs)
Computational Method to Solve the Partial Differential  Equations (PDEs)Computational Method to Solve the Partial Differential  Equations (PDEs)
Computational Method to Solve the Partial Differential Equations (PDEs)
 
Signal Prosessing Lab Mannual
Signal Prosessing Lab Mannual Signal Prosessing Lab Mannual
Signal Prosessing Lab Mannual
 
Matlab plotting
Matlab plottingMatlab plotting
Matlab plotting
 
Project on CFD using MATLAB
Project on CFD using MATLABProject on CFD using MATLAB
Project on CFD using MATLAB
 
Digital Signal Processing
Digital Signal ProcessingDigital Signal Processing
Digital Signal Processing
 
adv-2015-16-solution-09
adv-2015-16-solution-09adv-2015-16-solution-09
adv-2015-16-solution-09
 
Matlab fair-record-model
Matlab fair-record-modelMatlab fair-record-model
Matlab fair-record-model
 
Dsp Lab Record
Dsp Lab RecordDsp Lab Record
Dsp Lab Record
 
Lecture 2: Stochastic Hydrology
Lecture 2: Stochastic Hydrology Lecture 2: Stochastic Hydrology
Lecture 2: Stochastic Hydrology
 
BS LAB Manual (1).pdf
BS LAB Manual  (1).pdfBS LAB Manual  (1).pdf
BS LAB Manual (1).pdf
 

More from Department of Telecommunications, Ministry of Communication & IT (INDIA)

More from Department of Telecommunications, Ministry of Communication & IT (INDIA) (9)

Workshop proposal
Workshop proposalWorkshop proposal
Workshop proposal
 
Feeding system for disabled report
Feeding system for disabled reportFeeding system for disabled report
Feeding system for disabled report
 
Numerical methods generating polynomial
Numerical methods generating polynomialNumerical methods generating polynomial
Numerical methods generating polynomial
 
Design ideas mayank
Design ideas mayankDesign ideas mayank
Design ideas mayank
 
Pdpm,mayank awasthi,jabalpur,i it kanpur, servo motor,keil code
Pdpm,mayank awasthi,jabalpur,i it kanpur, servo motor,keil codePdpm,mayank awasthi,jabalpur,i it kanpur, servo motor,keil code
Pdpm,mayank awasthi,jabalpur,i it kanpur, servo motor,keil code
 
Analogic
AnalogicAnalogic
Analogic
 
jacobi method, gauss siedel for solving linear equations
jacobi method, gauss siedel for solving linear equationsjacobi method, gauss siedel for solving linear equations
jacobi method, gauss siedel for solving linear equations
 
An Introduction To Speech Recognition
An Introduction To Speech RecognitionAn Introduction To Speech Recognition
An Introduction To Speech Recognition
 
Numerical Methods Solving Linear Equations
Numerical Methods Solving Linear EquationsNumerical Methods Solving Linear Equations
Numerical Methods Solving Linear Equations
 

Adaptive signal processing simon haykins

  • 1. EC 313: Adaptive Signal Processing Mayank Awasthi(10531005) MATLAB Assignment – 1 M.Tech , Communication Systems IIT Roorkee Chapter -5, Ques-20 function []= ques20() %Given a=.99; uu=.05; %Initializing v,u matrices to zero v=zeros(100,101); u=zeros(100,101); %White noise generation with mean =0, variance= 0.02 for i= 1:100 v(i,:)= sqrt(0.02)*randn(1,101); end %Generating AR process with variance variance=1 considering 100 independent %realizations for i=1:100 u(i,:)=filter(1 ,[1 a] ,v(i,:)); end for n=1:100 w(1)=0; for i=1:100 f(n,i)=u(n,i)-w(i)*u(n,i+1); w(i+1)=w(i)+0.05*u(n,i+1)*f(n,i); end end for i=1:100 for j=1:100 f(i,j)=f(i,j)^2; end end %Ensemble averaging over 100 independent realization of the squared value %of its output. for n=1:100 J(n)=mean(f(:,n)); end %Plotting Learning Curve plot(J) end Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 2. Output: Ensemble averaging over 100 independent realization of the squared value of its output vs time 1≤n≤100 Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 3. Chapter -5, Ques-21 function []= ques21() %Given a1= 0.1; a2= -0.8; uu=0.05; varv= 1- (a1*a1)/(1+a2)- (a2*a2) + (a1*a1*a2)/(1+a2) v=zeros(1,102); u=zeros(1,102); %White noise generation with mean =0, variance= vvarv v= sqrt(varv)*randn(1,102); %Generating AR process with variance variance=1 u=filter(1 ,[1 a1,a2] ,v); %Applying the LMS algorithm to calculate a1 and a2 w1(1)=0; w2(1)=0; for i=1:100 f(i)=u(i)-w1(i)*u(i+1)-w2(i)*u(i+2); w1(i+1)=w1(i)+0.05*u(i+1)*f(i); w2(i+1)=w2(i)+0.05*u(i+2)*f(i); end a1_lms= -w1(100) a2_lms= -w2(100) %calculating eigen values eig1= 1- a1/(1+a2); eig2= 1+ a1/(1+a2); %maximum step size uumax= 2/eig2; %calculating correlation matx and cross correlation matrix R=[1, -a1/(1+a2); -a1/(1+a2), 1]; r1= -a1/(1+a2); r2= (a1*a1)/(1+a2)- a2; p= [r1;r2]; %Calculating optimum weights and minimum mean square error wo= inv(R)*p; Jmin= 1-transpose(p)*wo; %Estimating expected value of weights and variance using small step size theory with w(0)=0 so we have v1(1)= -1/sqrt(2)*(a1+a2); v2(1)= -1/sqrt(2)*(a1-a2); for n=1:100 val=1; Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 4. for j=1:n val= (1-uu*eig1)*val; end meanv1(n)=v1(1)*val; meansqv1(n)= uu*Jmin/(2-uu*eig1)+ val*val*(v1(1)*v1(1)- uu*Jmin/(2- uu*eig1)); end for n=1:100 val=1; for j=1:n val= (1-uu*eig2)*val; end meanv2(n)=v2(1)*val; meansqv2(n)= uu*Jmin/(2-uu*eig2)+ val*val*(v2(1)*v2(1)- uu*Jmin/(2- uu*eig2)); end for n=1:100 w1(n)= -a1-(meanv1(n)+meanv2(n))/sqrt(2); w2(n)= -a2-(meanv1(n)-meanv2(n))/sqrt(2); theoritical_J(n)= Jmin + eig1*meansqv1(n)*meansqv1(n) + eig2*meansqv2(100)*meansqv2(n); end expectedvalue_weights=[w1(100);w2(100)]; a1_ss= -w1(100) a2_ss= -w2(100) %Generating AR process with variance variance=1 considering 100 independent %realizations v=zeros(100,102); u=zeros(100,102); %White noise generation with mean =0, variance= 0.02 for i= 1:100 v(i,:)= sqrt(0.02)*randn(1,102); u(i,:)=filter(1 ,[1 a1,a2] ,v(i,:)); end for n=1:100 w1(1)=0; w2(1)=0; for i=1:100 f(n,i)=u(n,i)-w1(i)*u(n,i+1)-w2(i)*u(n,i+2); w1(i+1)=w1(i)+0.05*u(n,i+1)*f(n,i); w2(i+1)=w2(i)+0.05*u(n,i+2)*f(n,i); end end Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 5. for i=1:100 for j=1:100 f(i,j)=f(i,j)^2; end end %Ensemble averaging over 100 independent realization of the squared value %of its output. for n=1:100 J(n)=mean(f(:,n)); end %Plotting Learning Curve subplot(2,1,1); plot(J) %ploting learning curve theoritically subplot(2,1,2); plot( theoritical_J); end Output >> ques21 () varv = 0.2700 a1_lms = 0.980 a2_lms = -0.7591 a1_ss = 0.1276 a2_ss = -0.7720 Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 6. Comparision of Theoritical learning curve and measured result of part d. Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 7. Chapter-5 Ques 21- Part c function []= ques21() %Given a1= 0.1; a2= -0.8; varv= 1- (a1*a1)/(1+a2)- (a2*a2) + (a1*a1*a2)/(1+a2); v=zeros(1,102); u=zeros(1,102); %White noise generation with mean =0, variance= vvarv v= sqrt(varv)*randn(1,102); %Generating AR process with variance variance=1 u=filter(1 ,[1 a1,a2] ,v); w1(1)=0; w2(1)=0; for i=1:100 f(i)=u(i)-w1(i)*u(i+1)-w2(i)*u(i+2); w1(i+1)=w1(i)+0.05*u(i+1)*f(i); w2(i+1)=w2(i)+0.05*u(i+2)*f(i); e1(i)= -a1-w1(i); e2(i)= -a2-w2(i); end Fs = 32e3; Pxx = periodogram(f); Hpsd = dspdata.psd(Pxx,'Fs',Fs); % Create a PSD data object. plot(Hpsd); % Plot the PSD. Pxx = periodogram(e1); Hpsd = dspdata.psd(Pxx,'Fs',Fs); % Create a PSD data object. plot(Hpsd); % Plot the PSD. Pxx = periodogram(e2); Hpsd = dspdata.psd(Pxx,'Fs',Fs); % Create a PSD data object. plot(Hpsd); % Plot the PSD. End Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 8. PSD Plot of f (n) Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 9. PSD Plot of e1 (n) Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 10. PSD Plot of e2 (n) Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 11. Chapter -5, Ques-22 function []= ques22(h1,h2,h3) iter=500; %no pf iterations M=21; %no of tap inputs P=100; %no of independent experiments uu=0.001; %learning parameter h=[h1 h2 h3]; %channel impulse response D1=1; %channel impulse response is symmetric about n=1 D2=(M-1)/2; D=D1+D2; %total delay for j=1:P %Generating Bernoulli sequence x=sign(rand(1,530)-0.5); %channel output u1 =conv(x,h); v=sqrt(0.01)*randn(1,length(u1)); %adding AWGN noise with channel output u=u1+v; %Applying LMS Algoritm w=zeros(1,M); %initializing weight vector for i=1:iter f(j,i) = x(D+i)- w*transpose(u(i:M-1+i)); w= w+ uu*u(i:M-1+i)*f(j,i); end end subplot(2,2,3); stem(w); %Plotting the weight vector subplot(2,2,4); stem(h); %Plotting the impuse response of channel %Ensemble averaging over 100 independent realization of the squared value %of its output. for j=1:P f(j,:)= f(j,:).^2; end for n=1:iter J(n)=mean(f(:,n)); End %Plotting the learning curve subplot(2,2,1:2); plot(J); end Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 12. Output: (i) For h1= 0.25, h2= 1, h3= 0.25 a. Learning curve of the equalizer by averaging the square value of error signal over ensemble of 100 independent trials of experiment b. Impulse response of the optimum transversal equalizer c. Impulse response of channel. Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 13. (ii) For h1= 0.25, h2= 1, h3=- 0.25 a. Learning curve of the equalizer by averaging the square value of error signal over ensemble of 100 independent trials of experiment b. Impulse response of the optimum transversal equalizer c. Impulse response of channel. Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee
  • 14. (iii) For h1=- 0.25, h2= 1, h3= 0.25 a. Learning curve of the equalizer by averaging the square value of error signal over ensemble of 100 independent trials of experiment b. Impulse response of the optimum transversal equalizer c. Impulse response of channel. Mayank Awasthi(10531005), M.Tech , Communication Systems, IIT Roorkee