Commit 40f9982f authored by Fredrik Bagge Carlson's avatar Fredrik Bagge Carlson
Browse files

Updated types and removed bias from ar arx

parent c7a5d448
...@@ -24,21 +24,19 @@ abstract Model ...@@ -24,21 +24,19 @@ abstract Model
abstract LinearModel <: Model abstract LinearModel <: Model
abstract NonLinearModel <: Model abstract NonLinearModel <: Model
abstract Network <: NonLinearModel abstract Network <: NonLinearModel
typealias Polynom{T<:Real} Array{T,1} typealias Polynom{T<:Real} Union(Array{T,1} , T)
typealias PolynomMatrix Array{Polynom,1} typealias PolynomMatrix{T} Union(Array{Polynom{T},1},Polynom{T}, T)
""" """
`a::Vector{Float64}`: The polynomial coeffs A(z) (not including the first 1)\n `a::Vector{Float64}`: The polynomial coeffs A(z) (not including the first 1)\n
`na::Int`\n `na::Int`\n
`bias::Bool`\n
`λ::Float64`\n `λ::Float64`\n
""" """
type AR <: LinearModel type AR <: LinearModel
a::Polynom a::Polynom
na::Int na::Int
bias::Bool
λ::Float64 λ::Float64
end end
...@@ -47,15 +45,13 @@ end ...@@ -47,15 +45,13 @@ end
`b::VecOrMat{Float64}`: The polynomial coeffs B(z)\n `b::VecOrMat{Float64}`: The polynomial coeffs B(z)\n
`na::Int`\n `na::Int`\n
`nb::Vector{Int}`\n `nb::Vector{Int}`\n
`bias::Bool`\n
`λ::Float64`\n `λ::Float64`\n
""" """
type ARX <: LinearModel type ARX <: LinearModel
a::Polynom a::Polynom
b::PolynomMatrix b::PolynomMatrix
na::Int na::Int
nb::Vector{Int} nb::Polynom{Int}
bias::Bool
λ::Float64 λ::Float64
end end
......
"""`ar(y, na; λ = 0, doplot=false, bias=false)`""" """`ar(y, na; λ = 0, doplot=false)`"""
function ar(y::Vector{Float64}, na; λ = 0, bias=false) function ar(y::Vector{Float64}, na; λ = 0)
y_train, A = getARregressor(y,na;bias=bias) y_train, A = getARregressor(y,na)
n_points = size(A,1) n_points = size(A,1)
if λ == 0 if λ == 0
w = A\y_train w = A\y_train
...@@ -9,16 +9,15 @@ function ar(y::Vector{Float64}, na; λ = 0, bias=false) ...@@ -9,16 +9,15 @@ function ar(y::Vector{Float64}, na; λ = 0, bias=false)
end end
prediction = A*w prediction = A*w
error = y_train - prediction error = y_train - prediction
model = AR(w,na,bias,λ) model = AR(w,na,λ)
na = bias ? na+1:na
result = FitResult(y_train,prediction,na, λ>0?(:LS_reg) :(:LS)) result = FitResult(y_train,prediction,na, λ>0?(:LS_reg) :(:LS))
return model, result return model, result
end end
ar(iddata::IdData, na; λ = 0, doplot=false, bias=false) = ar(iddata.y, na; λ = 0, bias=bias) ar(iddata::IdData, na; λ = 0, doplot=false) = ar(iddata.y, na; λ = 0)
"""arx(y, u, na, nb; λ = 0, doplot=false, bias=false)""" """arx(y, u, na, nb; λ = 0, doplot=false)"""
function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=false, bias=false) function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=false)
y_train, A = getARXregressor(y,u, na, nb; bias=bias) y_train, A = getARXregressor(y,u, na, nb)
n_points = size(A,1) n_points = size(A,1)
if λ == 0 if λ == 0
w = A\y_train w = A\y_train
...@@ -30,33 +29,29 @@ function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=fa ...@@ -30,33 +29,29 @@ function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=fa
si = na+1 si = na+1
b = Polynom[Polynom(w[si:si+nb[1]-1])] b = Polynom[w[si:si+nb[1]-1]]
si += nb[1] si += nb[1]
for i = 2:length(nb) for i = 2:length(nb)
push!(b,Polynom(w[si:si+nb[i]-1])) push!(b,w[si:si+nb[i]-1])
si += nb[i] si += nb[i]
end end
model = ARX(w,na,nb,bias,λ) model = ARX(w[1:na],b,na,nb,λ)
na = bias ? na+1:na
result = FitResult(y_train,prediction,na, λ>0?(:LS_reg) :(:LS)) result = FitResult(y_train,prediction,na, λ>0?(:LS_reg) :(:LS))
return model, result return model, result
end end
arx(iddata::IdData, na, nb; λ = 0, bias=false) = arx(iddata.y, iddata.u, na, nb; λ = 0, bias=bias) arx(iddata::IdData, na, nb; λ = 0) = arx(iddata.y, iddata.u, na, nb; λ = 0)
function getARregressor(y::Vector{Float64},na;bias=false) function getARregressor(y::Vector{Float64},na)
A = toeplitz(y[na+1:end],y[na+1:-1:1]) A = toeplitz(y[na+1:end],y[na+1:-1:1])
y = copy(A[:,1]) y = copy(A[:,1])
A = A[:,2:end] A = A[:,2:end]
n_points = size(A,1) n_points = size(A,1)
if bias
A = [A ones(n_points)]
end
return y,A return y,A
end end
getARregressor(iddata::IdData, na, bias=false) = getARXregressor(iddata.y, na, bias=bias) getARregressor(iddata::IdData, na) = getARXregressor(iddata.y, na)
function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb; bias=false) function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb)
assert(length(nb) == size(u,2)) assert(length(nb) == size(u,2))
m = max(na+1,maximum(nb)) m = max(na+1,maximum(nb))
n = length(y) - m+1 n = length(y) - m+1
...@@ -70,12 +65,10 @@ function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb; bias=f ...@@ -70,12 +65,10 @@ function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb; bias=f
A = [A toeplitz(u[nb[i]+offs:n+nb[i]+offs-1,i],u[nb[i]+offs:-1:1+offs,i])] A = [A toeplitz(u[nb[i]+offs:n+nb[i]+offs-1,i],u[nb[i]+offs:-1:1+offs,i])]
end end
if bias
A = [A ones(n)]
end
return y,A return y,A
end end
getARXregressor(iddata::IdData, na, nb; bias=false) = getARXregressor(iddata.y,iddata.u, na, nb, bias=bias) getARXregressor(iddata::IdData, na, nb) = getARXregressor(iddata.y,iddata.u, na, nb)
"""Plots the RMSE and AIC for model orders up to `n`. Useful for model selection""" """Plots the RMSE and AIC for model orders up to `n`. Useful for model selection"""
...@@ -84,7 +77,7 @@ function find_na(y::Vector{Float64},n::Int) ...@@ -84,7 +77,7 @@ function find_na(y::Vector{Float64},n::Int)
for i = 1:n for i = 1:n
w,e = ar(y,i) w,e = ar(y,i)
error[i,1] = rms(e) error[i,1] = rms(e)
error[i,2] = aic(e,i+1) error[i,2] = aic(e,i)
print(i,", ") print(i,", ")
end end
println("Done") println("Done")
......
...@@ -51,24 +51,24 @@ function run_tests() ...@@ -51,24 +51,24 @@ function run_tests()
end end
@test "AR " begin @test "AR " begin
model, result = ar(collect(1:5.0),1,bias=true) model, result = ar(collect(1:5.0),2)
fit = result.fit fit = result.fit
@tassert isa(model,AR) @tassert isa(model,AR)
@tassert isa(result,FitResult) @tassert isa(result,FitResult)
@tassert isa(fit,FitStatistics) @tassert isa(fit,FitStatistics)
@tassert model.a [1;1] @tassert model.a [2;-1]
@tassert fit.FIT 100.0 @tassert fit.FIT 100.0
@tassert isapprox(fit.RMS, 0.0, atol=1e-10) @tassert isapprox(fit.RMS, 0.0, atol=1e-10)
@tassert result.method == :LS @tassert result.method == :LS
end end
@test "ARX " begin @test "ARX " begin
model, result = arx(collect(1:5.0),collect(1:5.0),1,1,bias=true) model, result = arx(collect(1:5.0),collect(1:5.0),1,1)
fit = result.fit fit = result.fit
@tassert isa(model,ARX) @tassert isa(model,ARX)
@tassert isa(result,FitResult) @tassert isa(result,FitResult)
@tassert isa(fit,FitStatistics) @tassert isa(fit,FitStatistics)
@tassert model.a [1;1] @tassert model.a [2;-1]
@tassert fit.FIT 100.0 @tassert fit.FIT 100.0
@tassert isapprox(fit.RMS, 0.0, atol=1e-10) @tassert isapprox(fit.RMS, 0.0, atol=1e-10)
@tassert result.method == :LS @tassert result.method == :LS
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment