File:Steepest descent.png
Summary
Description |
English: Developed according to [1]. |
Date | |
Source | Own work |
Author | Kirlf |
PNG development | |
Source code | Python codeimport numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz
def convmtx(h,n):
return toeplitz(np.hstack([h, np.zeros(n-1)]), np.hstack([h[0], np.zeros(n-1)]))
def MSE_calc(sigmaS, R, p, w):
w = w.reshape(w.shape[0], 1)
wH = np.conj(w).reshape(1, w.shape[0])
p = p.reshape(p.shape[0], 1)
pH = np.conj(p).reshape(1, p.shape[0])
MSE = sigmaS - np.dot(wH, p) - np.dot(pH, w) + np.dot(np.dot(wH, R), w)
return MSE[0, 0]
def mu_opt_calc(gamma, R):
gamma = gamma.reshape(gamma.shape[0], 1)
gammaH = np.conj(gamma).reshape(1, gamma.shape[0])
mu_opt = np.dot(gammaH, gamma) / np.dot(np.dot(gammaH, R), gamma)
return mu_opt[0, 0]
M = 5 # number of sensors
h = np.array([0.722-1j*0.779, -0.257-1j*0.722, -0.789-1j*1.862])
L = len(h)-1 # number of signal sources
H = convmtx(h,M-L)
sigmaS = 1 # the desired signal's (s(n)) power
sigmaN = 0.01 # the noise's (n(n)) power
# The correlation matrix of the received signal:
# Rxx = E\{x(n)x(n)^{H}\}, where ^\{H\} means Hermitian
Rxx = (sigmaS)*(np.dot(H,np.matrix(H).H))+(sigmaN)*np.identity(M)
# The cross-correlation vector between the tap-input vector x(n) and the desired response s(n):
p = (sigmaS)*H[:,0]
p = p.reshape((len(p), 1))
# Solution of the Wiener-Hopf equation:
wopt = np.dot(np.linalg.inv(Rxx), p)
MSEopt = MSE_calc(sigmaS, Rxx, p, wopt)
# Steepest descent algorithm testing:
coeff = np.array([1, 0.9, 0.5, 0.2, 0.1])
lamda_max = max(np.linalg.eigvals(Rxx))
mus = 2/lamda_max*coeff # different step sizes
N_steps = 100
MSE = np.empty((len(mus), N_steps), dtype=complex)
for mu_idx, mu in enumerate(mus):
w = np.zeros((M,1), dtype=complex)
for N_i in range(N_steps):
w = w - mu*(np.dot(Rxx, w) - p)
MSE[mu_idx, N_i] = MSE_calc(sigmaS, Rxx, p, w)
MSEoptmu = np.empty((1, N_steps), dtype=complex)
w = np.zeros((M,1), dtype=complex)
for N_i in range(N_steps):
gamma = p - np.dot(Rxx,w)
mu_opt = mu_opt_calc(gamma, Rxx)
w = w - mu_opt*(np.dot(Rxx,w) - p)
MSEoptmu[:, N_i] = MSE_calc(sigmaS, Rxx, p, w)
x = [i for i in range(1, N_steps+1)]
plt.figure(figsize=(5, 4), dpi=300)
for idx, item in enumerate(coeff):
if item == 1:
item = ''
plt.loglog(x, np.abs(MSE[idx, :]), label='$\mu = '+str(item)+'\mu_{max}$')
plt.loglog(x, np.abs(MSEoptmu[0, :]), label='$\mu = \mu_{opt}$')
plt.loglog(x, np.abs(MSEopt*np.ones((len(x), 1), dtype=complex)), label = 'Wiener solution')
plt.grid(True)
plt.xlabel('Number of steps')
plt.ylabel('Mean-Square Error')
plt.title('Steepest descent')
plt.legend(loc='best')
plt.minorticks_on()
plt.grid(which='major')
plt.grid(which='minor', linestyle=':')
plt.savefig('SD.png')
|
Licensing
I, the copyright holder of this work, hereby publish it under the following license:
This file is licensed under the Creative Commons Attribution-Share Alike 4.0 International license.
- You are free:
- to share – to copy, distribute and transmit the work
- to remix – to adapt the work
- Under the following conditions:
- attribution – You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.
- share alike – If you remix, transform, or build upon the material, you must distribute your contributions under the same or compatible license as the original.
- ↑ Haykin, Simon S. Adaptive filter theory. Pearson Education India, 2008. - p. 108-142, 217-242