diff --git a/paper/figures/fig_simple_example.tex b/paper/figures/fig_simple_example.tex
index f03a3181e966f6f5626c9aeabc9e2e1c533ae704..6bad29a4261ccea34c85ad71d93a09a233e19642 100644
--- a/paper/figures/fig_simple_example.tex
+++ b/paper/figures/fig_simple_example.tex
@@ -8,7 +8,7 @@ group style = {group size = 3 by 1,
                yticklabels at=edge left,
                xticklabels at=edge bottom,
                horizontal sep=10mm},
-height = 0.22\textwidth,
+height = 0.26\textwidth,
 width = 0.38\textwidth,
 xmin=1,
 xmax=2,
diff --git a/paper/main.bib b/paper/main.bib
index 16535c75f824a9ce8f59574549756bc56e3a4fa0..e8a1c4648f4a88149ec8e998d0c58bc5238c6a95 100755
--- a/paper/main.bib
+++ b/paper/main.bib
@@ -1,3 +1,52 @@
+@article{gaid2008design,
+  title={A design methodology for weakly-hard real-time control},
+  author={Gaid, Mongi Ben and Simon, Daniel and Sename, Olivier},
+  journal={IFAC Proceedings Volumes},
+  volume={41},
+  number={2},
+  pages={10258--10264},
+  year={2008},
+  publisher={Elsevier}
+}
+@inproceedings{lincoln2002jitterbug,
+  title={Jitterbug: A tool for analysis of real-time control performance},
+  author={Lincoln, Bo and Cervin, Anton},
+  booktitle={Proceedings of the 41st IEEE Conference on Decision and Control, 2002.},
+  volume={2},
+  pages={1319--1324},
+  year={2002},
+  organization={IEEE}
+}
+
+@inproceedings{aminifar2012designing,
+  title={Designing high-quality embedded control systems with guaranteed stability},
+  author={Aminifar, Amir and Samii, Soheil and Eles, Petru and Peng, Zebo and Cervin, Anton},
+  booktitle={2012 IEEE 33rd Real-Time Systems Symposium},
+  pages={283--292},
+  year={2012},
+  organization={IEEE}
+}
+
+@article{biondi2018selecting,
+  title={Selecting the transition speeds of engine control tasks to optimize the performance},
+  author={Biondi, Alessandro and Natale, Marco Di and Buttazzo, Giorgio C and Pazzaglia, Paolo},
+  journal={ACM Transactions on Cyber-Physical Systems},
+  volume={2},
+  number={1},
+  pages={1},
+  year={2018},
+  publisher={ACM}
+}
+
+@inproceedings{pazzaglia2017framework,
+  title={A framework for the co-simulation of engine controls and task scheduling},
+  author={Pazzaglia, Paolo and Di Natale, Marco and Buttazzo, Giorgio and Secchiari, Matteo},
+  booktitle={International Conference on Software Engineering and Formal Methods},
+  pages={438--452},
+  year={2017},
+  organization={Springer}
+}
+
 @book{sun2006switched,
   title={Switched linear systems: control and design},
   author={Sun, Zhendong},
diff --git a/paper/main.tex b/paper/main.tex
index 9d850a72f472a3103448ed9e868cd6d28dbfb97d..731f47f870b51c39d62abe4c4b59eecb786e47da 100755
--- a/paper/main.tex
+++ b/paper/main.tex
@@ -82,6 +82,18 @@
 </ccs2012>
 \end{CCSXML}
 
+\EventEditors{Sophie Quinton}
+\EventNoEds{1}
+\EventLongTitle{31st Euromicro Conference on Real-Time Systems (ECRTS 2019)}
+\EventShortTitle{ECRTS 2019}
+\EventAcronym{ECRTS}
+\EventYear{2019}
+\EventDate{July 9--12, 2019}
+\EventLocation{Stuttgart, Germany}
+\EventLogo{}
+\SeriesVolume{133}
+\ArticleNo{17}
+
 \ccsdesc[500]{Computing methodologies~Computational control theory}
 \ccsdesc[500]{Computer systems organization~Embedded software}
 \ccsdesc[500]{Software and its engineering~Real-time systems software}
diff --git a/paper/sec/analysis.tex b/paper/sec/analysis.tex
index 234fbf3d05a46439d1b1b6b99248b28dc9caf1b4..883c10314915ecec0d0ffe10ea3ce9fd536a4d4d 100755
--- a/paper/sec/analysis.tex
+++ b/paper/sec/analysis.tex
@@ -18,8 +18,9 @@ pessimistic~\cite{chen2017probabilistic} or have a high computational
 complexity~\cite{von2018efficiently}. This limits the applicability
 of these techniques in non-trivial cases. Moreover, there are few
 works dealing with joint probabilities of consecutive jobs,
-like~\cite{tanasa2015probabilistic}, but they still \st{suffer from limited} 
-\textcolor{red}{lack of (SE RIFORMULATO COSI' POSSIAMO RISPARMIARE UNA RIGA)}scalability.
+like~\cite{tanasa2015probabilistic}, but they still 
+%suffer from limited 
+\textcolor{red}{lack of} scalability.
 
 To handle the scalability issue, we adopt a simulation-based
 approach, backed up by the \emph{scenario
@@ -27,11 +28,13 @@ theory}~\cite{calafiore2006scenario}, that \emph{empirically}
 performs the uncertainty characterization, and provides
 \emph{formal guarantees} on the robustness of the resulting
 estimation. The scenario theory \textcolor{red}{allows to exploit}
-\st{is capable of exploiting} the fact that simulating the taskset 
+%\st{is capable of exploiting} 
+the fact that simulating the taskset 
 execution (with statistical significance) is less computationally 
 expensive than an analytical approach that incurs into the problem of combinatorial explosion of the different possible uncertainty 
-realizations. In practice, this means that we: (i) \st{randomly 
-extract} \textcolor{red}{sample the} execution times from the 
+realizations. In practice, this means that we: (i) 
+%\st{randomly extract}
+\textcolor{red}{sample the} execution times from the 
 probability distributions specified for each 
 task, $f_i^{\mathcal{C}}(c)$, (ii) schedule the tasks, checking the 
 resulting set of sequences $\Omega$, and (iii) find the worst-case 
@@ -49,13 +52,16 @@ process $1-\beta$ \textcolor{red}{according to the number of extracted samples}.
 The Scenario Theory has been developed in the field of robust
 control~\cite{calafiore2006scenario} to provide robustness guarantees
 for convex optimization problems in presence of probabilistic
-uncertainty. A characteristic of these problems is that accounting
+uncertainty. 
+In these problems,
+%A characteristic of these problems is that 
+accounting
 for all the possible uncertainty realization might be achieved
 analytically, but is computationally too heavy or results in
 pessimistic bounds. The Scenario Theory proposes an empirical method
 in which samples are drawn from the possible realizations of
-uncertainty. \textcolor{red}{By providing a lower bound on the number of 
-samples to be drawn from the uncetainty space} it provides statistical 
+uncertainty, \textcolor{red}{finding a lower bound on the number of 
+samples}. It provides statistical 
 guarantees \textcolor{red}{on the value of the cost function} with 
 respect to the general case, provided that the sources of uncertainty 
 are the same. 
@@ -80,8 +86,8 @@ $J_{seq}(\omega)$, that determines when we consider a sequence worse
 than another (from the perspective of the controller execution).
 Denoting with $\mu_{\text{tot}}(\omega)$ the total number of job
 skips and deadline misses that the control task experienced in
-$\omega$, and with $\mu_{\text{seq}}(\omega)$ the \st{total} 
-\textcolor{red}{maximum} number of consecutive deadline misses or 
+$\omega$, and with $\mu_{\text{seq}}(\omega)$ the 
+maximum number of consecutive deadline misses or 
 skipped jobs in $\omega$, we use as a cost function
 \begin{equation}\label{eq:Jseq}
   J_{seq}(\omega) = \mu_{\text{tot}}(\omega)\,\mu_{\text{seq}}(\omega)
@@ -92,10 +98,10 @@ of simulated sequences $\Omega = \{ \omega_1, \dots
 \text{arg}\,\max\limits_{\omega \in \Omega}J_{seq}(\omega)$. The
 number of simulations, $n_{\text{sim}}$ is selected based on the
 scenario approach, and provides probabilistic bounds on the
-uncertainty realization, giving us \st{some} formal guarantees on the
-design \textcolor{red}{according to the chosen cost function}.
+uncertainty realization, giving us formal guarantees on the
+design according to the chosen cost function.
 \textcolor{red}{
-The choice of the cost function is anyhow not-univocal. For instance the 
+The choice of the cost function is anyhow not-univocal. For instance, the 
 number of sub-sequences of a given length with at least a given number of 
 deadline misses or the shortest subsequence with more than a given number 
 deadline misses would be other viable choices.
@@ -148,7 +154,7 @@ interferences between the tasks). In practice, we want to be able to
 detect cascaded effects \textcolor{red}{that might happen due to the 
 probabilistic nature of the execution times of the tasks. Some samplings
 could in fact make the utilization of instances of the taskset greater 
-than one. For this reason} \st{, so} simulations that include several 
+than one. For this reason} simulations that include several 
 hyperperiods should be performed. On top of that significancy with 
 respect the controlled of the physical system is required \textcolor{red}{(since the existance of the hyperperiod is not always guaranteed)}, hence 
 the length of the simulated sequences should cover its dynamics.
diff --git a/paper/sec/experiments.tex b/paper/sec/experiments.tex
index 9b140732c06c9273fa956ff9edb1a7c292a7d369..21f6c85afd0aad7f53351580211cab621703d824 100644
--- a/paper/sec/experiments.tex
+++ b/paper/sec/experiments.tex
@@ -24,15 +24,14 @@ distributions, our experimental evaluation follows this procedure:
 %
 \item Using the UUnifast algorithm~\cite{bini2005measuring}, we
 generate an initial taskset $\Gamma'$, composed of $N_T-1$ tasks and
-having utilization $U_{\Gamma'}$. We order the tasks following the
-Rate Monotonic priority ordering. In the following, we show examples
+having utilization $U_{\Gamma'}$. We order the tasks using
+Rate Monotonic priority. In the following, we show examples
 where $N_T \in \{5, 10, 20 \}$, and $U_{\Gamma'} \in \{0.70, 0.80
-\}$. The tasks periods are chosen randomly from a bucket of values,
+\}$. Tasks periods are chosen randomly from a bucket of values,
 ranging between $100\,$ms and $1000\,$ms, with steps of $10\,$ms. The
 execution times generated by the UUnifast algorithm are set as the
-WCETs of the tasks. We accept the generated taskset if all tasks
-respect their (hard) deadlines using the WCET values, and recompute
-the taskset otherwise.
+WCETs of the tasks. All tasks in the generated task set must
+respect their (hard) deadlines using the WCET values.
 %
 \item For each generated taskset, we build a control task $\tau_d$.
 The control task is set to have the lowest priority in the set. We
@@ -102,7 +101,7 @@ Finally, the performance of the controlled system, where the control
 update is driven by the sequence of delays and holds from task
 schedule, is computed using \emph{JitterTime}~\cite{cervin2019jittertime}, a
 simulation-based tool for analysis of control systems performance
-inspired by \emph{Jitterbug} and
+inspired by \emph{Jitterbug}~\cite{lincoln2002jitterbug} and
 \emph{TrueTime}~\cite{cervin2003does}. This new tool, built
 entirely in Matlab, is able to model transitions between different
 states with variable and conditional probabilities (overcoming some
@@ -167,11 +166,11 @@ tasks. More generally, however, the Kill strategy is dominated by
 both the Skip-Next and the Queue(1) performance, that allow the
 design to reach shorter periods and to lower the cost function. The
 Kill strategy, that was achieving very good performance when tested
-in isolation (with a single control task), does not handle additional
+with a single control task, does not handle additional
 load well. In fact, the failure of the Kill strategy is due to
 cascaded effects -- killing subsequent jobs due to interference
-introduce long delays for the control signal computation, while
-allowing the job to terminate in the next period (as the other two
+introduces long delays for the control signal, while
+allowing the job to terminate anyway (as the other two
 strategies do) leads to better performance. 
 The Queue(1) and Skip-Next strategies behave similarly for both
 low values of $R_d^B$ and high number of tasks. However, the Queue(1)
diff --git a/paper/sec/method.tex b/paper/sec/method.tex
index 818624e64a1c758124b4bd2eda9614c13011665e..efb970bfb0a51040d0a82e9ee07ba0c8194446bc 100755
--- a/paper/sec/method.tex
+++ b/paper/sec/method.tex
@@ -64,7 +64,7 @@ behavior from simulations of a certain number of control jobs.
 
 \begin{figure}[t]
 \centering
-\begin{tikzpicture}
+\begin{tikzpicture}[scale=0.92,>=latex]
 % ---------------------------------------------- BLOCKS
 \draw[ultra thick] (1.5, 3.5) rectangle +(2, 1) % scenario theory
   node[midway, align=center] {Scenario\\Theory};
@@ -133,7 +133,7 @@ behavior from simulations of a certain number of control jobs.
     {$\xi$: Strategy for\\Deadline Miss} -- (7, 1.6);
 \draw[->, thick] (7, 2.9) -- (7, 3.4);
 \draw[->, thick] (7, 5)
-  node[above] {$\{\Gamma, n_\text{job}\}$} -- (7, 4.6);
+  node[right] {$\{\Gamma, n_\text{job}\}$} -- (7, 4.6);
 
 \end{tikzpicture}
 \caption{Approach Overview.}
@@ -181,8 +181,9 @@ when the taskset is executed and the controller is connected to the
 real plant, using a cost function $J_\text{ctl}$, which allows us to
 compare the performance of different deadline management strategies.
 We can then determine the best deadline management strategy and
-control period, based on the specific control task and the additional
-computational load on our platform.
+control period for the system under analysis.
+%, based on the specific control task and the additional
+%computational load on our platform.
 
 As output of our approach we obtain $y$, the evaluation of each tested
 strategy $\xi$ for the specific problem. As a by-product, we also obtain 
@@ -192,7 +193,7 @@ sequences to understand how to improve the control
 performance (i.e., for example
 optimize a certain task in the taskset).
 
-\subsection{Paper Organization}
+\paragraph*{Paper Organization}
 \label{sec:method:organization}
 
 In the following, Section~\ref{sec:model} discusses the model used
diff --git a/paper/sec/model.tex b/paper/sec/model.tex
index b76d94c6afaac47f892adaf8fc055bb43ce0821c..b579da36a7d1832f9c0171f222559d694032d430 100644
--- a/paper/sec/model.tex
+++ b/paper/sec/model.tex
@@ -1,13 +1,13 @@
 \section{System Model and Problem Definition}
 \label{sec:model}
 
-%This section introduces the models used in the paper.
-%Section~\ref{sec:model:taskset} describes the model of the taskset
-%executing on the hardware. Section~\ref{sec:model:plant} discusses
-%the models of plant and
-%control task. Finally, Section~\ref{sec:model:dmstrat}
-%introduces the three strategies used to handle deadline
-%misses.
+This section introduces the models used in the paper.
+Section~\ref{sec:model:taskset} describes the model of the taskset
+executing on the hardware. Section~\ref{sec:model:plant} discusses
+the models of plant and
+control task. Finally, Section~\ref{sec:model:dmstrat}
+introduces the three strategies used to handle deadline
+misses.
 
 \subsection{Taskset Model}
 \label{sec:model:taskset}
@@ -44,11 +44,12 @@ distribution $\mathcal{C}_i$ with $N_i$ integer values, ranging
 between a Best Case Execution Time (BCET) $C^{\text{min}}_i$ and a
 Worst Case Execution Time (WCET) $C^{\text{max}}_i$. Furthermore, we
 consider tasks that behave well in most cases, i.e., tasks whose
-probability density functions are skewed towards lower values. In
-fact, we want to capture tasks which experience occasional faulty
-conditions. \pp{While, in principle, our approach can be applied 
-to systems with generic probability density functions, this choice}
-%This choice 
+probability density functions are skewed towards lower values. 
+In fact,
+\pp{while our approach can be applied 
+to systems with generic probability density functions,} 
+we want to capture tasks which experience occasional faulty
+conditions. This choice 
 is in agreement with most works that analyze
 execution time distributions for real-time
 tasks~\cite{wilhelm2008worst}. We will generally refer to the utilization 
@@ -79,8 +80,8 @@ that at least one job of $\tau_d$ respects its deadline, i.e. $R_d^B
 \subsection{Plant and Controller Model}
 \label{sec:model:plant}
 
-The plant to be controlled by $\tau_d$ is described as a Linear
-Time Invariant (LTI), Multi-Input Multi-Output (MIMO) system in
+The plant to be controlled by $\tau_d$ is described as a linear
+time invariant, multi-input multi-output system in
 continuous time. In line with standard assumptions, we assume the plant to be
 controllable and the state to be fully measurable. The plant dynamics is described as
 %by a vector linear stochastic differential equation
@@ -99,7 +100,7 @@ state vector and ${\mathbf{\dot x}}(t)$ its time derivative. The term
 $\mathbf{u_c}(t)$ is the vector that contains the control signals.
 The vector $\mathbf{v_c}(t)$ represents the plant disturbance, modeled as white noise
 with known covariance matrix $R_c$. The goal of the
-control is to minimize a cost function, defined as the mean value of a quadratic function of the state vector and the control vector,
+control is to minimize a cost function, defined as the mean value of a quadratic function of the state vector and the control vector:
 %
 \begin{equation}
 \label{eq:cost}
@@ -116,7 +117,7 @@ as shown in Figure~\ref{fig:pandc}. \pp{The behavior of these devices can be mod
 %
 \begin{figure}[t]
 	\centering
-	\begin{tikzpicture}[scale=0.78,>=latex]
+	\begin{tikzpicture}[scale=0.72,>=latex]
 	\draw[ultra thick] (0,0) rectangle node[midway] {Sampler} (2,1);
 %	\draw[ultra thick] (4,0) rectangle
 %	node[midway, align=center] {Control\\Task} (6,1);
@@ -180,13 +181,13 @@ synthesis.
 
 \begin{remark}
 \pp{
-In this paper, we consider $\tau_d$ as the task 
-with the lowest priority. However, a similar approach can be used
-if other tasks with priority lower than $\tau_d$ do exist. If this is the
-case, the design proposed hereafter is still valid in principle,
-since those tasks cannot interfere with $\tau_d$.
-However, the choice on the values of $T_d$ should always guarantee 
-the schedulability of the lower priority tasks. 
+In this paper, we work under the assumption that $\tau_d$ is the task 
+with the lowest priority. If other tasks with priority lower
+than $\tau_d$ do exist, the design proposed hereafter is still valid 
+in principle, since those tasks cannot interfere with $\tau_d$.
+However, if this is the case, the choice on the values of $T_d$ 
+should be tied with schedulability guarantees for the lower 
+priority tasks. 
 Due to space constraints, we reserve to analyze 
 this more general case as a future work. }
 \end{remark}
@@ -218,7 +219,7 @@ designing a control task with periods smaller than its WCRT, i.e.,
 $T_d< R_d^W$, thus greatly extending the design space. We
 remark that, with $T_d\geq R_d^W$ there are no deadline misses, and
 standard approaches for the control design can be used
-\cite{kim1998task,xu2015exploiting}.
+\cite{kim1998task,aminifar2012designing,xu2015exploiting}.
 
 Choosing $T_d< R_d^W$ implies the risk that the control task will miss some deadlines.
 A deadline miss is a timing violation that can produce unbounded
@@ -284,7 +285,7 @@ next one is put in the ready queue.
 
 \begin{figure}[t]
   \centering
-  \begin{RTGrid}[nonumbers=1,nosymbols=1,width=8cm]{3}{20}
+  \begin{RTGrid}[nonumbers=1,nosymbols=1,width=7cm]{3}{20}
 
   \Label{1.5}{0}{$a_{d,1}$}
   \Label{1.5}{6}{$a_{d,2}$}
diff --git a/paper/sec/related.tex b/paper/sec/related.tex
index 0fb35301fd9e9c95d73456d9976ac8f9d07ad104..15dd70738e1716e35fcb045b2d4e7331a48783ba 100755
--- a/paper/sec/related.tex
+++ b/paper/sec/related.tex
@@ -30,23 +30,25 @@ input-output delay experienced by the control flow comes from the
 interference of higher priority tasks due to limited computational
 resources, that may even cause some job to miss their deadlines.
 Unforeseen delays may be caused, for example, by overload
-activations~\cite{hammadeh2014extending, xu2015improved}, or longer
-executions due to cache misses~\cite{davis2013analysis,
-altmeyer2014correctness}. In recent works, systems that experience
+activations~\cite{hammadeh2014extending, xu2015improved}, cache misses~\cite{davis2013analysis,
+altmeyer2014correctness} or complex interactions between scheduling and
+system state~\cite{biondi2018selecting}. In recent works, systems that experience
 deadline misses are described using the so called weakly-hard
 model~\cite{bernat2001weakly}. In this model, the possibility of
 missing a deadline is upper-bounded by a constraint $(m,K)$, which
 gives the maximum number of deadlines $m$ that may happen every $K$
-activation of a task. This model has proved being particularly
+activation of a task. This model has proved being
 suitable for studying the effects of missed deadlines on the
 performance of control tasks and
 scheduling~\cite{ramanathan1999overload, frehse2014formal}. A
 detailed modeling of the control performance considering different
-deadline miss handling strategies and hit and miss sequences is
-presented in~\cite{pazzaglia2018beyond}. Other works faced the
-co-design problem in overloaded systems by using complex scheduling
+deadline miss handling strategies is
+presented in~\cite{pazzaglia2018beyond}. The effects of missed deadlines
+on system performance have been studied also using co-simulation~\cite{pazzaglia2017framework}. 
+Other works faced the
+co-design problem in overloaded systems by using complex
 mechanisms that take into account system stability and processor
-load~\cite{yoshimoto2011optimal, chwa2018closing}.
+load~\cite{gaid2008design, yoshimoto2011optimal, chwa2018closing}.
 
 In this paper, we study the effects of missed deadlines on the
 control performance by describing miss and hit events in a