diff --git a/examples/training.jl b/examples/training.jl
index 763e4b715de6a9b6561018d271237b1742d14415..7a10074d26f767681a9305c15b00d22685423318 100644
--- a/examples/training.jl
+++ b/examples/training.jl
@@ -7,13 +7,22 @@
 #       extension: .jl
 #       format_name: percent
 #       format_version: '1.3'
-#       jupytext_version: 1.14.1
+#       jupytext_version: 1.14.5
 #   kernelspec:
-#     display_name: Julia 1.8.1
+#     display_name: Julia 1.8.4
 #     language: julia
 #     name: julia-1.8
 # ---
 
+# %% [markdown]
+# # Aim and Scope 
+
+# %% [markdown]
+# This notebook contains initial examples used in implementations of gradients.
+
+# %% [markdown]
+# # Prelude
+
 # %%
 import Pkg
 Pkg.activate("..")
@@ -25,6 +34,7 @@ using Revise
 using IQC
 
 # %%
+using ControlSystems
 using LinearAlgebra
 import Hypatia
 import ControlSystems
@@ -33,14 +43,31 @@ import Zygote
 import Symbolics
 import Random
 import Flux
-import ProgressMeter: Progress, next!
+import ProgressMeter: Progress, next!, @showprogress
 import ProgressMeter
 using Printf
 using Plots
 
+# %%
+using Logging
+
+# %%
+Logging.disable_logging(Logging.Warn)
+
 # %%
 ProgressMeter.ijulia_behavior(:clear)
 
+# %% [markdown]
+# # Static NN Gain Bound
+
+# %% [markdown]
+# We train a randomly initialized network to reproduce the identify function by minimizing
+# $$(y-x)^2 \le \gamma^2 x^2$$
+# where $x$ and $y$ are input and output respectively.
+#
+# The weights are randomly initialized, but with the "correct" sign to simplify the problem.
+#
+
 # %%
 Random.seed!(17)
 
@@ -79,13 +106,13 @@ end
 IQC.gradient(sys, γ²)
 
 # %%
-n_iters = 100
+n_iters = 1000
 γ²s = []
 pm = Progress(n_iters)
 
 
 for iter in 1:n_iters
-    λ = 1/iter
+    λ = 1e-1/sqrt(iter)
     grads = IQC.gradient(sys, γ²)
     for W in [W1, W2, W3, W4]
         IQC.set_param!(sys, W, IQC.value(sys, W) - λ*grads[W])
@@ -95,17 +122,23 @@ for iter in 1:n_iters
     next!(pm, showvalues=[(:γ², IQC.value(sys, γ²)), (:W1, IQC.value(sys, W1)), (:W2, IQC.value(sys, W2)), (:W3, IQC.value(sys, W3)), (:W4, IQC.value(sys, W4))])
 end
 
-# %% [raw]
-# plot(γ²s, label = "", yaxis = (:log10, "γ²"), xaxis = "Steps") |> display
+# %%
+@show minimum(γ²s) maximum(γ²s);
+
+# %%
+plot(clamp.(γ²s, 1e-10, Inf), label = "", yaxis = (:log10, "γ²"), xaxis = "Steps")
 
 # %%
 @show IQC.value(sys, γ²);
 
+# %% [markdown]
+# # Convergence Rate of P-controlled First Order system
+
 # %%
 s = ControlSystems.tf("s")
 
 # %%
-function margin_grads(a_val, k_val)
+function conv(a_val, k_val)
     sys = DynamicSystem(opt = Hypatia.Optimizer)
     k = @IQC.parameter sys k k_val
     a = @IQC.parameter sys a a_val
@@ -127,32 +160,23 @@ function margin_grads(a_val, k_val)
         x^2 <= 0
     end
 
-    (success, P, margin, feas_prob) = solve!(sys, max_margin=true, req_PSD_P=true, silent=true)
-    @assert success
-
-    (; A, B, C, Pi, u, y) = feas_prob
-
-    grads = IQC.margin_gradient(sys, P)
-    (grads[a], grads[k])
+    IQC.convergence_rate(sys)
 end
 
 # %%
-margin_grads(0.0, 0.0)
+conv(1.0, 0.0)
 
 # %%
-margin_grads(1.0, 2.0)
+conv(0.0, 1.0)
 
 # %%
-margin_grads(1.0, 0.0)
+conv(1.0, 2.0)
 
 # %%
-margin_grads(0.0, 1.0)
-
-# %% [markdown]
-# The derivatives of the margin do not represent any stability measure, so it seems not to be helpful here.
+conv(2.0, 1.0)
 
 # %%
-function conv(a_val, k_val)
+function conv_grad(a_val, k_val)
     sys = DynamicSystem(opt = Hypatia.Optimizer)
     k = @IQC.parameter sys k k_val
     a = @IQC.parameter sys a a_val
@@ -174,23 +198,52 @@ function conv(a_val, k_val)
         x^2 <= 0
     end
 
-    IQC.convergence_rate(sys)
+    λ = IQC.convergence_rate(sys)
+    (success, P, feas_prob) = solve!(sys, λ = λ, req_PSD_P=true, silent=true)
+    @assert success    
+    
+    grads = IQC.convergence_rate_gradient(sys, λ, P)
+    (λ, grads[a], grads[k])
 end
 
 # %%
-conv(1.0, 0.0)
+conv_grad(1.0, 0.0)
 
 # %%
-conv(0.0, 1.0)
+conv_grad(3.0, 10.0)
 
 # %%
-conv(1.0, 2.0)
+a = 1
+k = 2
+
+λs = []
+as = []
+ks = []
+n_iters = 10^2
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    (λ, ∂a, ∂k) = conv_grad(a, k)
+    step = 1e-1
+    a += step*∂a
+    k += step*∂k
+    push!(λs, λ)
+    push!(as, a)
+    push!(ks, k)
+    next!(pm, showvalues=[(:λ, λ), (:a, a), (:k, k)])
+end
 
 # %%
-λ = conv(2.0, 1.0)
+plt_a = plot(λs, label = "", yaxis = "conv_rate", xaxis = "steps")
+plt_b = plot(as, label = "", yaxis = "a", xaxis = "steps")
+plt_c = plot(ks, label = "", yaxis = "k", xaxis = "steps")
+plt_d = plot(ks, as, label = "", yaxis = "a", xaxis = "k")
+plot(plt_a, plt_b, plt_c, plt_d)
+
+# %% [markdown]
+# # Gain Bound of P-controlled first order system
 
 # %%
-function conv_grad(a_val, k_val)
+function _gain_grad(a_val, k_val; R=0.0)
     sys = DynamicSystem(opt = Hypatia.Optimizer)
     k = @IQC.parameter sys k k_val
     a = @IQC.parameter sys a a_val
@@ -198,6 +251,7 @@ function conv_grad(a_val, k_val)
     @signals sys begin
         x, 1
         u, 1
+        w, 1
     end
 
     @equalities sys begin
@@ -205,24 +259,445 @@ function conv_grad(a_val, k_val)
     end
 
     @tfs sys begin
-        x = (1/(s-a))*u
+        x = (1/(s-a))*(u+w)
+    end
+
+    τ = gen_τ(sys, IQC.Scalar(), minimize=true, nonneg=true)
+    @target sys begin
+        x^2 + R*u^2 <= τ*w^2
+    end
+
+    (success, P, feas_prob) = solve!(sys, req_PSD_P=true, silent=true)
+    IQC.set_param!(sys, τ, IQC.value(sys, τ)) 
+    @assert success    
+    
+    grads = IQC.gradient(sys, τ)
+    (IQC.value(sys, τ), grads[a], grads[k])
+end
+
+function gain_grad(a_val, k_val; R=0.0, n_tries=10)
+    n_failed = 0
+    done = false
+    ret_val = nothing
+    while n_failed<n_tries && !done
+        try
+            ret_val = _gain_grad(a_val, k_val, R=R)
+            done = true
+        catch e
+            n_failed += 1
+        end
+    end
+    ret_val
+end
+
+# %%
+gain_grad(1, 2)
+
+# %%
+a = 1
+k = 2
+
+τs = []
+as = []
+ks = []
+n_iters = 10^3
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    (τ, ∂a, ∂k) = gain_grad(a, k)
+    step = 1e-1
+    a -= step*∂a
+    k -= step*∂k
+    push!(τs, τ)
+    push!(as, a)
+    push!(ks, k)
+    next!(pm, showvalues=[(:τ, τ), (:a, a), (:k, k)])
+end
+
+# %%
+plt_a = plot(τs, label = "", yaxis = "gain bound", xaxis = "steps")
+plt_b = plot(as, label = "", yaxis = "a", xaxis = "steps")
+plt_c = plot(ks, label = "", yaxis = "k", xaxis = "steps")
+plt_d = plot(ks, as, label = "", yaxis = "a", xaxis = "k")
+plot(plt_a, plt_b, plt_c, plt_d)
+
+# %%
+a = 1
+k = 2
+
+τs = []
+as = []
+ks = []
+n_iters = 10^3
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    (τ, ∂a, ∂k) = gain_grad(a, k, R=1)
+    step = 1e-1
+    # a -= step*∂a
+    k -= step*∂k
+    push!(τs, τ)
+    push!(as, a)
+    push!(ks, k)
+    next!(pm, showvalues=[(:τ, τ), (:a, a), (:k, k)])
+end
+
+# %%
+plt_a = plot(τs, label = "", yaxis = "gain bound", xaxis = "steps")
+plt_b = plot(as, label = "", yaxis = "a", xaxis = "steps")
+plt_c = plot(ks, label = "", yaxis = "k", xaxis = "steps")
+plt_d = plot(ks, as, label = "", yaxis = "a", xaxis = "k")
+plot(plt_a, plt_b, plt_c, plt_d)
+
+# %%
+ks = range(a+1, 10, length=10)
+@time τs = [ gain_grad(a, k, R=1)[1] for k in ks ]
+
+# %%
+a = 1
+k = 2
+
+τs = []
+as = []
+ks = []
+n_iters = 10^3
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    (τ, ∂a, ∂k) = gain_grad(a, k, R=10)
+    step = 1e-1
+    # a -= step*∂a
+    k -= step*∂k
+    push!(τs, τ)
+    push!(as, a)
+    push!(ks, k)
+    next!(pm, showvalues=[(:τ, τ), (:a, a), (:k, k)])
+end
+
+# %%
+plt_a = plot(τs, label = "", yaxis = "gain bound", xaxis = "steps")
+plt_b = plot(as, label = "", yaxis = "a", xaxis = "steps")
+plt_c = plot(ks, label = "", yaxis = "k", xaxis = "steps")
+plt_d = plot(ks, as, label = "", yaxis = "a", xaxis = "k")
+plot(plt_a, plt_b, plt_c, plt_d)
+
+# %%
+ks = range(a+1, 10, length=10)
+@time τs = [ gain_grad(a, k, R=10)[1] for k in ks ]
+
+# %% [markdown]
+# # Convergence Rate of PD-Controlled Second Order System 
+
+# %%
+function conv_rate(ζ_val, ω_val, kp_val, kd_val)
+    sys = DynamicSystem(opt = Hypatia.Optimizer)
+    kp = @IQC.parameter sys kp kp_val
+    kd = @IQC.parameter sys kd kd_val
+    ζ = @IQC.parameter sys ζ ζ_val
+    ω = @IQC.parameter sys ω ω_val
+
+    @signals sys begin
+        x, 1
+        dx, 1
+        u, 1
+    end
+
+    @equalities sys begin
+        0 = u+kp*x+kd*dx
+    end
+
+    @tfs sys begin
+        x = (1/(s^2+2*ζ*ω*s+ω^2))*u
+        dx = s*x
     end
 
     @target sys begin
         x^2 <= 0
     end
 
-    λ = IQC.convergence_rate(sys)
+    λ = IQC.convergence_rate(sys, ϵ=1e-6)
+    λ
+end
+
+# %%
+function conv_grad(ζ_val, ω_val, kp_val, kd_val)
+    sys = DynamicSystem(opt = Hypatia.Optimizer)
+    kp = @IQC.parameter sys kp kp_val
+    kd = @IQC.parameter sys kd kd_val
+    ζ = @IQC.parameter sys ζ ζ_val
+    ω = @IQC.parameter sys ω ω_val
+
+    @signals sys begin
+        x, 1
+        dx, 1
+        u, 1
+    end
+
+    @equalities sys begin
+        0 = u+kp*x+kd*dx
+    end
+
+    @tfs sys begin
+        x = (1/(s^2+2*ζ*ω*s+ω^2))*u
+        dx = s*x
+    end
+
+    @target sys begin
+        x^2 <= 0
+    end
+
+    λ = IQC.convergence_rate(sys, ϵ=1e-6)
     (success, P, feas_prob) = solve!(sys, λ = λ, req_PSD_P=true, silent=true)
     @assert success    
     
-    IQC.convergence_rate_gradient(sys, λ, P)
+    grads = IQC.convergence_rate_gradient(sys, λ, P)
+    (λ, grads[kp], grads[kd])
 end
 
 # %%
-conv_grad(1.0, 0.0)
+ω = 1
+ζ = .2
+kp = -.1
+kd = -.1
+
+grads = conv_grad(ζ, ω, kp, kd)
 
 # %%
-conv_grad(3.0, 10.0)
+λs = []
+kps = []
+kds = []
+n_iters = 10^2
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    step = 1e-2
+    (λ, ∂kp, ∂kd) = conv_grad(ζ, ω, kp, kd)
+    kp += step*∂kp
+    kd += step*∂kd
+    push!(λs, λ)
+    push!(kps, kp)
+    push!(kds, kd)
+    next!(pm, showvalues=[(:λ, λ), (:kp, kp), (:kd, kd)])
+end
+
+# %%
+plot(λs)
+
+# %%
+plot(kps)
+
+# %%
+plot(kds)
+
+# %%
+function train(kp, kd)
+    λs = []
+    kps = []
+    kds = []
+    n_iters = 10^3
+    # pm = Progress(n_iters)
+    for iter in 1:n_iters
+        (λ, ∂kp, ∂kd) = conv_grad(ζ, ω, kp, kd)
+        step = 1e-1 * 1/sqrt(∂kp^2 + ∂kd^2) * 1/sqrt(iter)
+        kp += step*∂kp
+        kd += step*∂kd
+        push!(λs, λ)
+        push!(kps, kp)
+        push!(kds, kd)
+        # next!(pm, showvalues=[(:λ, λ), (:kp, kp), (:kd, kd)])
+    end
+    (λs, kps, kds)
+end
+
+# %%
+ress = Dict([])
+@time for kp in [1, 0, -1], kd in [1, 0, -1]
+    (λs, kps, kds) = train(kp, kd)
+    ress[(kp,kd)] = (kps, kds)
+    plt_a = plot(λs, label = "", yaxis = "conv_rate", xaxis = "steps")
+    plt_b = plot(kps, label = "", yaxis = "kp", xaxis = "steps")
+    plt_c = plot(kds, label = "", yaxis = "kd", xaxis = "steps")
+    plt_d = plot(kps, kds, label = "", yaxis = "kd", xaxis = "kp")
+    plot!(kps[end:end], kds[end:end], line = nothing, marker = :dot, label = "")
+    plot(plt_a, plt_b, plt_c, plt_d, title = "kp = $(kp); kd = $(kd)") |> display
+end
+
+# %%
+plot()
+for (kps, kds) in values(ress)
+    plot!(kps, kds, color=:blue, label = "")
+    plot!(kps[end:end], kds[end:end], line = nothing, color=:red, marker = :dot, label = "")
+end
+plot!(xaxis = "kp", yaxis = "kd")
+
+# %%
+kps = range(-2, 2, length=25)
+kds = range(-2, 2, length=25)
+@time contourf(kps, kds, (kp, kd) -> conv_rate(.1, 1, kp, kd))
+
+
+for (kps, kds) in values(ress)
+    plot!(kps, kds, color=:blue, label = "")
+    plot!(kps[end:end], kds[end:end], line = nothing, color=:red, marker = :dot, label = "")
+end
+plot!(xaxis = "kp", yaxis = "kd")
+
+# %% [markdown]
+# # Gain Bound of PD-controlled Second Order System
+
+# %%
+function _gain_grad(ζ_val, ω_val, kp_val, kd_val; R = 0.0)
+    
+    sys = DynamicSystem(opt = Hypatia.Optimizer)
+    kp = @IQC.parameter sys kp kp_val
+    kd = @IQC.parameter sys kd kd_val
+    ζ = @IQC.parameter sys ζ ζ_val
+    ω = @IQC.parameter sys ω ω_val
+
+    @signals sys begin
+        x, 1
+        dx, 1
+        u, 1
+        w, 1
+    end
+
+    @equalities sys begin
+        0 = u+kp*x+kd*dx
+    end
+
+    @tfs sys begin
+        x = (1/(s^2+2*ζ*ω*s+ω^2))*(u+w)
+        dx = s*x
+    end
+
+    τ = gen_τ(sys, IQC.Scalar(), minimize=true, nonneg=true)
+    @target sys begin
+        x^2 + R*u^2 <= τ*w^2
+    end
+
+    (success, P, feas_prob) = solve!(sys, req_PSD_P=true, silent=true)
+    IQC.set_param!(sys, τ, IQC.value(sys, τ)) 
+    @assert success    
+    
+    grads = IQC.gradient(sys, τ)
+    (IQC.value(sys, τ), grads[kp], grads[kd])
+end
+
+
+function gain_grad(ζ_val, ω_val, kp_val, kd_val; R=0.0, n_tries = 10)
+    n_failed = 0
+    done = false
+    ret_val = nothing
+    while n_failed < n_tries && !done
+        try
+            ret_val = _gain_grad(ζ_val, ω_val, kp_val, kd_val; R=0.0)
+            done = true
+        catch e
+            n_failed += 1
+        end
+    end
+    ret_val
+end         
+
+# %%
+ζ = .1
+ω = 1
+kp = 0.5
+kd = 0.5
+
+
+for _ in 1:20 
+    try
+        @show _gain_grad(ζ, ω, kp, kd)
+    catch e
+        @show e
+    end
+end
+
+# %%
+ζ = .1
+ω = 1
+kp = 0.0
+kd = 0.0
+
+
+for _ in 1:20 
+    try
+        @show _gain_grad(ζ, ω, kp, kd)
+    catch e
+        @show e
+    end
+end
+
+# %% [markdown]
+# Is there no gain bound here?
+#
+# Due to the damping, I do expect a gain bound, and matlab should be able to find our with H-inifnity functionality.
+
+# %%
+ζ = .1
+ω = 1
+kp = .5
+kd = .5
+∂kp = 0
+∂kd = 0
+τ = 0
+
+τs = []
+kps = []
+kds = []
+n_iters = 10^2
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    (τ, ∂kp, ∂kd) = gain_grad(ζ, ω, kp, kd)
+    step = 1e-1/sqrt(iter)
+    kp -= step*∂kp
+    kd -= step*∂kd
+    push!(τs, τ)
+    push!(kps, kp)
+    push!(kds, kd)
+    next!(pm, showvalues=[(:τ, τ), (:kp, kp), (:kd, kd)])
+end
+
+# %%
+plot(τs, yaxis = ("Gain Bound", :log10), xaxis = "Gradient Steps")
+
+# %%
+plot(kps, kds, xaxis = "kp", yaxis = "kd", label = "")
+plot!(kps[end:end], kds[end:end], line=nothing, marker=:dot, label = "")
+
+# %%
+ζ = .1
+ω = 1
+kp = .5
+kd = .5
+∂kp = 0
+∂kd = 0
+τ = 0
+
+τs = []
+kps = []
+kds = []
+n_iters = 10^2
+pm = Progress(n_iters)
+for iter in 1:n_iters
+    (τ, ∂kp, ∂kd) = gain_grad(ζ, ω, kp, kd, R = 1.0)
+    step = 1e-1/sqrt(iter)
+    kp -= step*sign(∂kp)
+    kd -= step*sign(∂kd)
+    push!(τs, τ)
+    push!(kps, kp)
+    push!(kds, kd)
+    next!(pm, showvalues=[(:τ, τ), (:kp, kp), (:kd, kd)])
+end
+
+# %%
+plot(τs, yaxis = ("Gain Bound", :log10), xaxis = "Gradient Steps")
+
+# %%
+plot(plot(kps),plot(kds))
+
+# %%
+plot(kps, kds, xaxis = "kp", yaxis = "kd", label = "")
+plot!(kps[end:end], kds[end:end], line=nothing, marker=:dot, label = "")
+
+# %% [markdown]
+# For sufficiently large $R$, I expect the optimal $kp$ to be finite, but I seem to get large $kp$. Matlab should be able to tell me what the answer is, and then I should try to interpret the result.
 
 # %%