diff --git a/src/SystemIdentification.jl b/src/SystemIdentification.jl
index a484b8e91131ad8bcb8e9f29f15545f879203281..8d47ab1f3785127c68cf2bdcd2aa70e22d0aaee8 100644
--- a/src/SystemIdentification.jl
+++ b/src/SystemIdentification.jl
@@ -24,21 +24,19 @@ abstract Model
 abstract LinearModel <: Model
 abstract NonLinearModel <: Model
 abstract Network <: NonLinearModel
-typealias Polynom{T<:Real} Array{T,1}
-typealias PolynomMatrix Array{Polynom,1}
+typealias Polynom{T<:Real} Union(Array{T,1} , T)
+typealias PolynomMatrix{T} Union(Array{Polynom{T},1},Polynom{T}, T)
 
 
 
 """
 `a::Vector{Float64}`: The polynomial coeffs A(z) (not including the first 1)\n
 `na::Int`\n
-`bias::Bool`\n
 `λ::Float64`\n
 """
 type AR <: LinearModel
     a::Polynom
     na::Int
-    bias::Bool
     λ::Float64
 end
 
@@ -47,15 +45,13 @@ end
 `b::VecOrMat{Float64}`: The polynomial coeffs B(z)\n
 `na::Int`\n
 `nb::Vector{Int}`\n
-`bias::Bool`\n
 `λ::Float64`\n
 """
 type ARX <: LinearModel
     a::Polynom
     b::PolynomMatrix
     na::Int
-    nb::Vector{Int}
-    bias::Bool
+    nb::Polynom{Int}
     λ::Float64
 end
 
diff --git a/src/armax.jl b/src/armax.jl
index 9c2775110ffa60707076d5167261d61e7af98baa..cb23ca164c2a4a9a3021af8e73d82bf601b35613 100644
--- a/src/armax.jl
+++ b/src/armax.jl
@@ -1,6 +1,6 @@
-"""`ar(y, na; λ = 0, doplot=false, bias=false)`"""
-function ar(y::Vector{Float64}, na; λ = 0, bias=false)
-    y_train, A = getARregressor(y,na;bias=bias)
+"""`ar(y, na; λ = 0, doplot=false)`"""
+function ar(y::Vector{Float64}, na; λ = 0)
+    y_train, A = getARregressor(y,na)
     n_points = size(A,1)
     if λ == 0
         w = A\y_train
@@ -9,16 +9,15 @@ function ar(y::Vector{Float64}, na; λ = 0, bias=false)
     end
     prediction = A*w
     error = y_train - prediction
-    model = AR(w,na,bias,λ)
-    na = bias ? na+1:na
+    model = AR(w,na,λ)
     result = FitResult(y_train,prediction,na, λ>0?(:LS_reg) :(:LS))
     return model, result
 end
-ar(iddata::IdData, na; λ = 0, doplot=false, bias=false) = ar(iddata.y, na; λ = 0, bias=bias)
+ar(iddata::IdData, na; λ = 0, doplot=false) = ar(iddata.y, na; λ = 0)
 
-"""arx(y, u, na, nb; λ = 0, doplot=false, bias=false)"""
-function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=false, bias=false)
-    y_train, A = getARXregressor(y,u, na, nb; bias=bias)
+"""arx(y, u, na, nb; λ = 0, doplot=false)"""
+function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=false)
+    y_train, A = getARXregressor(y,u, na, nb)
     n_points = size(A,1)
     if λ == 0
         w = A\y_train
@@ -30,33 +29,29 @@ function arx(y::Vector{Float64}, u::VecOrMat{Float64}, na, nb; λ = 0, doplot=fa
 
 
     si = na+1
-    b = Polynom[Polynom(w[si:si+nb[1]-1])]
+    b = Polynom[w[si:si+nb[1]-1]]
     si += nb[1]
     for i = 2:length(nb)
-        push!(b,Polynom(w[si:si+nb[i]-1]))
+        push!(b,w[si:si+nb[i]-1])
         si += nb[i]
     end
-    model = ARX(w,na,nb,bias,λ)
-    na = bias ? na+1:na
+    model = ARX(w[1:na],b,na,nb,λ)
     result = FitResult(y_train,prediction,na, λ>0?(:LS_reg) :(:LS))
     return model, result
 end
-arx(iddata::IdData, na, nb; λ = 0,  bias=false) = arx(iddata.y, iddata.u, na, nb; λ = 0, bias=bias)
+arx(iddata::IdData, na, nb; λ = 0) = arx(iddata.y, iddata.u, na, nb; λ = 0)
 
 
-function getARregressor(y::Vector{Float64},na;bias=false)
+function getARregressor(y::Vector{Float64},na)
     A = toeplitz(y[na+1:end],y[na+1:-1:1])
     y = copy(A[:,1])
     A = A[:,2:end]
     n_points = size(A,1)
-    if bias
-        A = [A ones(n_points)]
-    end
     return y,A
 end
-getARregressor(iddata::IdData, na, bias=false) = getARXregressor(iddata.y, na, bias=bias)
+getARregressor(iddata::IdData, na) = getARXregressor(iddata.y, na)
 
-function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb; bias=false)
+function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb)
     assert(length(nb) == size(u,2))
     m = max(na+1,maximum(nb))
     n = length(y) - m+1
@@ -70,12 +65,10 @@ function getARXregressor(y::Vector{Float64},u::VecOrMat{Float64}, na, nb; bias=f
         A = [A toeplitz(u[nb[i]+offs:n+nb[i]+offs-1,i],u[nb[i]+offs:-1:1+offs,i])]
     end
 
-    if bias
-        A = [A ones(n)]
-    end
+
     return y,A
 end
-getARXregressor(iddata::IdData, na, nb; bias=false) = getARXregressor(iddata.y,iddata.u, na, nb, bias=bias)
+getARXregressor(iddata::IdData, na, nb) = getARXregressor(iddata.y,iddata.u, na, nb)
 
 
 """Plots the RMSE and AIC for model orders up to `n`. Useful for model selection"""
@@ -84,7 +77,7 @@ function find_na(y::Vector{Float64},n::Int)
     for i = 1:n
         w,e = ar(y,i)
         error[i,1] = rms(e)
-        error[i,2] = aic(e,i+1)
+        error[i,2] = aic(e,i)
         print(i,", ")
     end
     println("Done")
diff --git a/test/tests.jl b/test/tests.jl
index b769bdc4396ff73aaf9987f7541b24c827105be1..34f31026f55b2428e8ae5aec0e294248732bf7dd 100644
--- a/test/tests.jl
+++ b/test/tests.jl
@@ -51,24 +51,24 @@ function run_tests()
     end
 
     @test "AR " begin
-        model, result = ar(collect(1:5.0),1,bias=true)
+        model, result = ar(collect(1:5.0),2)
         fit = result.fit
         @tassert isa(model,AR)
         @tassert isa(result,FitResult)
         @tassert isa(fit,FitStatistics)
-        @tassert model.a ≈ [1;1]
+        @tassert model.a ≈ [2;-1]
         @tassert fit.FIT ≈ 100.0
         @tassert isapprox(fit.RMS, 0.0, atol=1e-10)
         @tassert result.method == :LS
     end
 
     @test "ARX " begin
-        model, result = arx(collect(1:5.0),collect(1:5.0),1,1,bias=true)
+        model, result = arx(collect(1:5.0),collect(1:5.0),1,1)
         fit = result.fit
         @tassert isa(model,ARX)
         @tassert isa(result,FitResult)
         @tassert isa(fit,FitStatistics)
-        @tassert model.a ≈ [1;1]
+        @tassert model.a ≈ [2;-1]
         @tassert fit.FIT ≈ 100.0
         @tassert isapprox(fit.RMS, 0.0, atol=1e-10)
         @tassert result.method == :LS