Commit 66f3bab2 authored by Fredrik Bagge Carlson's avatar Fredrik Bagge Carlson
Browse files

Initial commit

parent 3fce4fbf
""" Performs PCA PCA(W,doplot=false)"""
function PCA(W,doplot=false)
W0 = mean(W,1);
W = W-repmat(W0,size(W,1),1);
(score,latent,C) = svd(W)
score = score*diagm(latent)
if doplot
newplot(cumsum(latent)./sum(latent),"o")
title("Fraction of variance explained")
ylim([0,1])
end
C,score,latent,W0
end
module SysId
if !isdefined(:DEBUG); DEBUG = false; end
export
Model,
Network,
AR,
ARX,
RBFARX,
FitStatistics,
FitResult
## Fit Methods =================
:LS
:LM
## Types =======================
abstract Model
abstract LinearModel <: Model
abstract NonLinearModel <: Model
abstract Network <: NonLinearModel
"""
`a::Vector{Float64}`: The polynomial coeffs A(z) (not including the first 1)\n
`na::Int`\n
`bias::Bool`\n
`λ::Float64`\n
"""
type AR <: Model
a::Vector{Float64}
na::Int
bias::Bool
λ::Float64
end
"""
`a::Vector{Float64}`: The polynomial coeffs A(z) (not including the first 1)\n
`b::VecOrMat{Float64}`: The polynomial coeffs B(z)\n
`na::Int`\n
`nb::Vector{Int}`\n
`bias::Bool`\n
`λ::Float64`\n
"""
type ARX <: Model
a::Vector{Float64}
b::VecOrMat{Float64}
na::Int
nb::Vector{Int}
bias::Bool
λ::Float64
end
type RBFARX <: Network
na::Int
n_centers::Int
n_state::Int
outputnet::Bool
normalized::Bool
end
type Fitstatistics
RMS::Float64
FIT::Float64
AIC::Float64
Fitstatistics(e::Vector{Float64}) = new(rms(e),fit(e),aic(e))
end
type Fitresult
error::VecOrMat{Float64}
fit::Fitstatistics
method::Symbol
end
type Iddata
y::VecOrMat{Float64}
u::VecOrMat{Float64}
Ts::Float64
Iddata(y::VecOrMat{Float64}) = new(y,[],0)
Iddata(y::VecOrMat{Float64} ,Ts::Float64) = new(y,[],Ts)
Iddata(y::VecOrMat{Float64}, u::VecOrMat{Float64}) = new(y,u,0)
end
end
"""`ar(y, na; λ = 0, doplot=false)`"""
function ar(y, na; λ = 0, doplot=false, bias=false)
y_train, A = getARRegressor(y,na;bias=bias)
n_points = size(A,1)
if λ == 0
w = A\y_train
else
w = (A'A + λeye(na+1))\A'y_train
end
prediction = A*w
error = y_train - prediction
if doplot
newplot(y_train,"k")
plot(prediction,"b")
plot(error,"r"); title("Fitresult, AR, na: $na, RMSE: $(round(rms(error),4)) Fit = $(round(fit(y_train,prediction),4))")
# Exit ===============================================
println("AR done. na: $na + 1 bias, RMSE: $(round(rms(error),4))")
end
return w, error
end
function arx(y, u, na, nb; λ = 0, doplot=false, bias=false)
y_train, A = getARXRegressor(y,u, na, nb; bias=bias)
n_points = size(A,1)
if λ == 0
w = A\y_train
else
w = (A'A + λeye(na+1))\A'y_train
end
prediction = A*w
error = y_train - prediction
if doplot
newplot(y_train,"k")
plot(prediction,"b")
plot(error,"r"); title("Fitresult, ARX, na: $na, nb: $nb, $(bias ? "+ 1 bias," : "") RMSE: $(round(rms(error),4)) Fit = $(round(fit(y_train,prediction),4))")
# Exit ===============================================
println("ARX done. na: $na, nb: $nb, $(bias ? "+ 1 bias," : "") RMSE: $(round(rms(error),4))")
end
return w, error
end
function getARRegressor(y,na;bias=false)
A = toeplitz(y[na+1:end],y[na+1:-1:1])
y = copy(A[:,1])
A = A[:,2:end]
n_points = size(A,1)
if bias
A = [A ones(n_points)]
end
return y,A
end
function getARXRegressor(y,u, na, nb; bias=false)
assert(length(nb) == size(u,2))
@show m = max(na+1,maximum(nb))
@show n = length(y) - m+1
@show offs = m-na-1
@show A = toeplitz(y[offs+na+1:n+na+offs],y[offs+na+1:-1:1])
@show y = copy(A[:,1])
@show A = A[:,2:end]
for i = 1:length(nb)
@show offs = m-nb[i]
A = [A toeplitz(u[nb[i]+offs:n+nb[i]+offs-1,i],u[nb[i]+offs:-1:1+offs,i])]
end
if bias
@show A = [A ones(n)]
end
return y,A
end
"""Plots the RMSE and AIC for model orders up to `n`. Useful for model selection"""
function findNa(y,n)
error = zeros(n,2)
for i = 1:n
w,e = ar(y,i)
error[i,1] = rms(e)
error[i,2] = aic(e,i+1)
print(i,", ")
end
println("Done")
plotsub(error,"-o")
show()
end
function kalman(R1,R2,theta, y, A, P)
# This is really a Kalman filter, not RLS
ATP = A'*P;
K = (P*A)/(R2+ATP*A);
P = P - (P*A*ATP)./(R2 + ATP*A) + R1;
yp = A'*theta
e = (y-yp)[1];
red = 1;
# if abs(e) > 0.025
# red = 0.2;
# end
theta = theta + K*e*red;
theta, P, e, yp[1]
end
function kernelPCA(X; α=1.0)
κ = GaussianKernel(α)
K = kernelmatrix(κ,X)
N = size(K)[1]
In = ones(N,N)./N
K = K-In*K - K*In + In*K*In
(D,V) = eig(K)
Kpc = K*V
Kpc,D,V
end
function toeplitz{T}(c::Array{T},r::Array{T})
nc = length(c)
nr = length(r)
A = zeros(T, nc, nr)
A[:,1] = c
A[1,:] = r
for i in 2:nr
A[2:end,i] = A[1:end-1,i-1]
end
A
end
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment