Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Fredrik Bagge Carlson
SystemIdentification
Commits
d90732ab
Commit
d90732ab
authored
Sep 13, 2016
by
Fredrik Bagge Carlson
Browse files
Clean up
parent
9702b42f
Changes
11
Show whitespace changes
Inline
Side-by-side
src/MCMC/testStan.jl
deleted
100644 → 0
View file @
9702b42f
######### Stan program example ###########
# module Tmp
const
σw0
=
1.0
const
σw
=
1
const
σv
=
1.0
const
theta0
=
[
0.5
,
25
,
8
]
s2piσv
=
log
(
sqrt
(
2
*
pi
)
*
σv
)
ProjDir
=
pwd
()
function
f_sample
(
x
::
Vector
,
t
::
Int64
)
c
=
8
*
cos
(
1.2
*
(
t
-
1
))
@inbounds
for
i
=
1
:
length
(
x
)
x
[
i
]
=
0.5
*
x
[
i
]
+
25
*
x
[
i
]
./
(
1
+
x
[
i
]
^
2
)
+
c
+
σw
*
randn
()
end
return
x
end
f_sample
(
x
::
Float64
,
t
::
Int64
)
=
0.5
*
x
+
25
*
x
/
(
1
+
x
^
2
)
+
8
*
cos
(
1.2
*
(
t
-
1
))
+
σw
*
randn
()
T
=
100
M
=
1
x
=
Array
(
Float64
,
T
)
y
=
Array
(
Float64
,
T
)
x0
=
0
x
[
1
]
=
σw
*
randn
()
y
[
1
]
=
σv
*
randn
()
for
t
=
1
:
T
-
1
x
[
t
+
1
]
=
f_sample
(
x
[
t
],
t
)
y
[
t
+
1
]
=
0.05
x
[
t
+
1
]
^
2
+
σv
*
randn
()
end
# t
using
Stan
,
Mamba
odemodel
=
"
data {
int<lower=1> T;
real y[T];
}
parameters {
real theta[3];
real sigma[2];
real x[T];
}
model {
sigma[1] ~ cauchy(1,1);
sigma[2] ~ cauchy(1,1);
theta[1] ~ cauchy(0.5,0.5);
theta[2] ~ cauchy(25,25);
theta[3] ~ cauchy(8,8);
x[1] ~ normal(0,1);
y[1] ~ normal(0.05*x[1]*x[1], sigma);
for (t in 1:(T-1)){
x[t+1] ~ normal(theta[1]*x[t] + theta[2]*x[t]/(1+x[t]*x[t]) + theta[3]*cos(1.2*(t-1)),sigma[1]);
y[t+1] ~ normal(0.05*x[t+1]*x[t+1], sigma[2]);
}
}
"
odedict
=
Dict
(
"T"
=>
T
,
"y"
=>
y
)
stanmodel
=
Stanmodel
(
name
=
"ode"
,
model
=
odemodel
,
nchains
=
4
,
update
=
10000
)
@time
sim1
=
stan
(
stanmodel
,
[
odedict
],
ProjDir
,
diagnostics
=
false
,
CmdStanDir
=
CMDSTAN_HOME
)
## Subset Sampler Output to variables suitable for describe().
monitor
=
[
"lp__"
,
"accept_stat__"
]
sim
=
sim1
[
1
:
size
(
sim1
,
1
),
monitor
,
1
:
size
(
sim1
,
3
)]
describe
(
sim
)
println
()
p
=
plot
(
sim
,
[
:
trace
,
:
mean
,
:
density
,
:
autocor
],
legend
=
true
)
draw
(
p
,
ncol
=
4
,
filename
=
"
$
(stanmodel.name)-infoplot"
,
fmt
=:
pdf
)
## Subset Sampler Output to variables suitable for describe().
monitor
=
[
"theta.1"
,
"theta.2"
,
"theta.3"
]
sim
=
sim1
[
1
:
size
(
sim1
,
1
),
monitor
,
1
:
size
(
sim1
,
3
)]
describe
(
sim
)
println
()
p
=
plot
(
sim
,
[
:
trace
,
:
mean
,
:
density
,
:
autocor
],
legend
=
true
)
draw
(
p
,
ncol
=
4
,
filename
=
"
$
(stanmodel.name)-thetaplot"
,
fmt
=:
pdf
)
## Subset Sampler Output to variables suitable for describe().
monitor
=
[
"sigma.1"
,
"sigma.2"
]
sim
=
sim1
[
:
,
monitor
,
:
]
describe
(
sim
)
println
()
p
=
plot
(
sim
,
[
:
trace
,
:
mean
,
:
density
,
:
autocor
],
legend
=
true
)
draw
(
p
,
nrow
=
4
,
ncol
=
4
,
filename
=
"
$
(stanmodel.name)-sigmaplot"
,
fmt
=:
pdf
)
src/PCA.jl
View file @
d90732ab
...
@@ -6,3 +6,17 @@ function PCA(W)
...
@@ -6,3 +6,17 @@ function PCA(W)
score
=
score
*
diagm
(
latent
)
score
=
score
*
diagm
(
latent
)
C
,
score
,
latent
,
W0
C
,
score
,
latent
,
W0
end
end
using
MLKernels
function
kernelPCA
(
X
;
α
=
1.0
)
κ
=
GaussianKernel
(
α
)
K
=
kernelmatrix
(
κ
,
X
)
N
=
size
(
K
)[
1
]
In
=
fill
(
1
/
N
,(
N
,
N
))
K
=
K
-
In
*
K
-
K
*
In
+
In
*
K
*
In
# Make sure mean is zero
(
D
,
V
)
=
eig
(
K
)
Kpc
=
K
*
V
Kpc
,
D
,
V
end
src/SystemIdentification.jl
View file @
d90732ab
...
@@ -13,7 +13,7 @@ FitResult,
...
@@ -13,7 +13,7 @@ FitResult,
IdData
,
IdData
,
# Functions
# Functions
ar
,
arx
,
getARregressor
,
getARXregressor
,
find_na
,
ar
,
arx
,
getARregressor
,
getARXregressor
,
find_na
,
toeplitz
,
kalman
,
kalman_smoother
,
forward_kalman
,
PCA
toeplitz
,
kalman
,
kalman_smoother
,
forward_kalman
,
PCA
,
plotmodel
## Fit Methods =================
## Fit Methods =================
:
LS
:
LS
...
...
src/armax.jl
View file @
d90732ab
...
@@ -93,7 +93,7 @@ getARXregressor(iddata::IdData, na, nb) = getARXregressor(iddata.y,iddata.u, na,
...
@@ -93,7 +93,7 @@ getARXregressor(iddata::IdData, na, nb) = getARXregressor(iddata.y,iddata.u, na,
"""Plots the RMSE and AIC for model orders up to `n`. Useful for model selection"""
"""Plots the RMSE and AIC for model orders up to `n`. Useful for model selection"""
function
find_na
(
y
::
Vector
{
Float64
}
,
n
::
Int
)
function
find_na
(
y
::
Vector
,
n
::
Int
)
error
=
zeros
(
n
,
2
)
error
=
zeros
(
n
,
2
)
for
i
=
1
:
n
for
i
=
1
:
n
w
,
e
=
ar
(
y
,
i
)
w
,
e
=
ar
(
y
,
i
)
...
@@ -102,18 +102,21 @@ function find_na(y::Vector{Float64},n::Int)
...
@@ -102,18 +102,21 @@ function find_na(y::Vector{Float64},n::Int)
print
(
i
,
", "
)
print
(
i
,
", "
)
end
end
println
(
"Done"
)
println
(
"Done"
)
plotsub
(
error
,
"-o"
)
scatter
(
error
,
show
=
true
)
show
()
end
end
import
PyPlot
"""
function
PyPlot.plot
(
y
,
m
::
AR
)
plotmodel(y,m::AR)
Plots a signal `y` and the output of the model `m`
"""
function
plotmodel
(
y
,
m
::
AR
)
na
=
length
(
m
.
a
)
na
=
length
(
m
.
a
)
y
,
A
=
getARregressor
(
y
,
na
)
y
,
A
=
getARregressor
(
y
,
na
)
yh
=
A
*
m
.
a
yh
=
A
*
m
.
a
error
=
y
-
yh
error
=
y
-
yh
new
plot
(
y
,
"k"
)
plot
(
y
,
c
=:
black
)
plot
(
yh
,
"b"
)
plot
(
yh
,
c
=:
b
)
plot
(
error
,
"r
"
)
plot
(
error
,
c
=:
r
,
title
=
"Fitresult, AR, n_a:
$
na, RMSE =
$
(rms(error)) Fit =
$
(fit(y,yh))
"
)
title
(
"Fitresult, AR, n_a:
$
na, RMSE =
$
(rms(error)) Fit =
$
(fit(y,yh))"
)
end
end
src/cuckooSearch.jl
View file @
d90732ab
using
Devectorize
using
Devectorize
"""
"""
`cuckoo_search(f,X0;Lb=-convert(Float64,Inf),Ub=convert(Float64,Inf),n=25,pa=0.25, Tol=1.0e-5, max_iter = 1e5, timeout = Inf)`
\n
`cuckoo_search(f,X0;Lb=-convert(Float64,Inf),Ub=convert(Float64,Inf),n=25,pa=0.25, Tol=1.0e-5, max_iter = 1e5, timeout = Inf)`
\n
`n` = Number of nests (or different solutions)
`n` = Number of nests (or different solutions)
`pa=0.25` Discovery rate of alien eggs/solutions
`pa=0.25` Discovery rate of alien eggs/solutions
Change this if you want to get better results
Change this if you want to get better results
...
...
src/idinput.jl
View file @
d90732ab
"""
idinput(N, class = "
prbs
"; band = 1)
Create a input signal for a sys.id. experiment, currently only supports prbs signals.
"""
function
idinput
(
N
,
class
=
"prbs"
;
band
=
1
)
function
idinput
(
N
,
class
=
"prbs"
;
band
=
1
)
output
=
zeros
(
N
)
output
=
zeros
(
N
)
if
lowercase
(
class
)
==
"prbs"
if
lowercase
(
class
)
==
"prbs"
...
@@ -18,4 +23,3 @@ function idinput(N, class = "prbs"; band = 1)
...
@@ -18,4 +23,3 @@ function idinput(N, class = "prbs"; band = 1)
return
output
return
output
end
end
src/kernelPCA.jl
deleted
100644 → 0
View file @
9702b42f
using
MLKernels
function
kernelPCA
(
X
;
α
=
1.0
)
κ
=
GaussianKernel
(
α
)
K
=
kernelmatrix
(
κ
,
X
)
N
=
size
(
K
)[
1
]
In
=
fill
(
1
/
N
,(
N
,
N
))
K
=
K
-
In
*
K
-
K
*
In
+
In
*
K
*
In
# Make sure mean is zero
(
D
,
V
)
=
eig
(
K
)
Kpc
=
K
*
V
Kpc
,
D
,
V
end
src/least_squares.jl
View file @
d90732ab
...
@@ -36,9 +36,13 @@ function dsvd(A,b,λ)
...
@@ -36,9 +36,13 @@ function dsvd(A,b,λ)
dsvd
(
A
,
U
,
S
,
V
,
b
,
λ
)
dsvd
(
A
,
U
,
S
,
V
,
b
,
λ
)
end
end
"""
Lcurve(normE, normX, λ)
Plots the L-curve. normE and normX are obtained from, e.g., `dsvd`
"""
function
Lcurve
(
normE
,
normX
,
λ
)
function
Lcurve
(
normE
,
normX
,
λ
)
plot
(
normE
,
normX
,
xscale
=:
log10
,
yscale
=:
log10
,
m
=:
o
)
plot
(
normE
,
normX
,
xscale
=:
log10
,
yscale
=:
log10
,
m
=:
o
,
xlabel
=
"RMSE"
,
ylabel
=
"||k||"
,
title
=
"L-curve"
)
annotations
=
[(
normE
[
i
],
normX
[
i
],
"λ=
$
(round(λ[i],8))"
)
for
i
in
1
:
length
(
λ
)]
annotations
=
[(
normE
[
i
],
normX
[
i
],
"λ=
$
(round(λ[i],8))"
)
for
i
in
1
:
length
(
λ
)]
annotate!
(
annotations
)
annotate!
(
annotations
)
xlabel!
(
"RMSE"
);
ylabel!
(
"||k||"
);
title!
(
"L-curve"
)
end
end
src/sobol.jl
View file @
d90732ab
...
@@ -194,21 +194,18 @@ function InitSobol(dim::Int64)
...
@@ -194,21 +194,18 @@ function InitSobol(dim::Int64)
end
end
"""
"""
`using Winston`
Plots the first 4 outputs of sobel(2,512)
Plots the first 4 outputs of sobel(2,512)
"""
"""
function
test_sobol
()
function
test_sobol
()
X
,
nextseed
,
MeM
=
sobol
(
2
,
512
)
X
,
nextseed
,
MeM
=
sobol
(
2
,
512
)
figure
();
pp
=
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
".b"
);
hold
(
true
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
c
=:
b
)
X
,
nextseed
,
MeM
=
sobol
(
X
,
nextseed
,
MeM
)
X
,
nextseed
,
MeM
=
sobol
(
X
,
nextseed
,
MeM
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
".g"
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
c
=:
g
)
X
,
nextseed
,
MeM
=
sobol
(
X
,
nextseed
,
MeM
)
X
,
nextseed
,
MeM
=
sobol
(
X
,
nextseed
,
MeM
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
".r"
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
c
=:
r
)
X
,
nextseed
,
MeM
=
sobol
(
X
,
nextseed
,
MeM
)
X
,
nextseed
,
MeM
=
sobol
(
X
,
nextseed
,
MeM
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
".m"
)
plot
(
X
[
:
,
1
],
X
[
:
,
2
],
c
=:
m
)
display
(
pp
)
end
end
src/toeplitz.jl
deleted
100644 → 0
View file @
9702b42f
function
toeplitz
{
T
}(
c
::
Array
{
T
},
r
::
Array
{
T
})
nc
=
length
(
c
)
nr
=
length
(
r
)
A
=
zeros
(
T
,
nc
,
nr
)
A
[
:
,
1
]
=
c
A
[
1
,
:
]
=
r
for
i
in
2
:
nr
A
[
2
:
end
,
i
]
=
A
[
1
:
end
-
1
,
i
-
1
]
end
A
end
src/transfer_functions.jl
View file @
d90732ab
using
DSP
using
DSP
"""
tfest(y,u)
Estimate a transfer function model using the Correlogram approach
H = Syu/Suu
"""
function
tfest
(
y
,
u
)
function
tfest
(
y
,
u
)
Cyu
=
xcorr
(
y
,
u
)
Cyu
=
xcorr
(
y
,
u
)
Cuu
=
xcorr
(
u
,
u
)
Cuu
=
xcorr
(
u
,
u
)
...
@@ -9,11 +15,3 @@ function tfest(y,u)
...
@@ -9,11 +15,3 @@ function tfest(y,u)
end
end
tfest
(
iddata
::
IdData
)
=
tfest
(
iddata
.
y
,
iddata
.
u
)
tfest
(
iddata
::
IdData
)
=
tfest
(
iddata
.
y
,
iddata
.
u
)
N
=
200000
;
u
=
randn
(
N
);
y
=
filt
(
ones
(
5
),
5
,
u
);
H
=
tfest
(
y
,
u
);
semilogy
(
H
.
F
,
abs
(
H
.
P
))
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment