diff --git a/Assignments/A4/Assig4.pdf b/Assignments/A4/Assig4.pdf
index f77f47bb1b2e6e70f5da3b844e602816df79174b..f25204883944d625193b4c4081abbdfac6cb2daf 100644
Binary files a/Assignments/A4/Assig4.pdf and b/Assignments/A4/Assig4.pdf differ
diff --git a/Assignments/A4/Assig4.tex b/Assignments/A4/Assig4.tex
index 9073aadec368ff3202ac7dc9913825479163fa6a..21dfe789259646f297b0dc18bdce103543110e7a 100644
--- a/Assignments/A4/Assig4.tex
+++ b/Assignments/A4/Assig4.tex
@@ -43,8 +43,8 @@ Bonus marks are available for students that both implement and test their specif
 \section *{Deadlines}
 
 \begin{itemize}
-\item Specification: due 11:59 pm Apr 3
-\item Code and Test Suite (Bonus): due 11:59 pm Apr 3
+\item Specification: due 11:59 pm Apr 5
+\item Code and Test Suite (Bonus): due 11:59 pm Apr 5
 \end{itemize}
 
 \section *{Step \refstepcounter{stepnum} \thestepnum}
diff --git a/Lectures/L35_Analysis/Analysis.pdf b/Lectures/L35_Analysis/Analysis.pdf
index 7ff09d632110a55b62430abc3be110ba7f757b67..7b958622c82827918494bd69080b03c82fc1c181 100644
Binary files a/Lectures/L35_Analysis/Analysis.pdf and b/Lectures/L35_Analysis/Analysis.pdf differ
diff --git a/Lectures/L35_Analysis/Analysis.tex b/Lectures/L35_Analysis/Analysis.tex
index bf8aad776e221704f6a0d688afa906fd7c807135..b2b6ea3cedc6c21c7467c523c257b6fb6bf13429 100755
--- a/Lectures/L35_Analysis/Analysis.tex
+++ b/Lectures/L35_Analysis/Analysis.tex
@@ -61,7 +61,6 @@
 \item Model checking
 \end{itemize}
 \item Debugging
-\item Verifying performance and reliability
 \end{itemize}
 
 \end{frame}
@@ -79,7 +78,7 @@
 
 \item A4
 \bi
-\item Due April 3 at 11:59 pm
+\item \structure{Due April 5 at 11:59 pm (new deadline)}
 \item \structure{Advice}
 \bi
 \item \structure{Identify the module secrets, write the MIS syntax, start
@@ -541,120 +540,5 @@ View all of these as complementary
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
-\begin{frame}
-\frametitle{Verifying Performance}
-
-\begin{itemize}
-\item Worst case analysis versus average behaviour
-\item For worst case focus on proving that the system response time is bounded
-  by some function of the external requests
-\item Standard deviation
-\item Analytical versus experimental approaches
-\item Consider verifying the performance of a pacemaker
-\item Visualize performance via
-\bi
-\item Identify a measure of performance (time, storage, FLOPS, accuracy, etc.)
-\item Identify an independent variable (problem size, number of processors,
-  condition number, etc.)
-\ei
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\begin{frame}
-\frametitle{Verifying Reliability}
-
-\begin{itemize}
-\item There are approaches to measuring reliability on a probabilistic basis, as
-  in other engineering fields
-\item Unfortunately there are some difficulties with this approach
-\item Independence of failures does not hold for software
-\item Reliability is concerned with measuring the probability of the occurrence
-  of failure
-\item Meaningful parameters include
-\begin{itemize}
-\item Average total number of failures observed at time $t$: $AF(t)$
-\item Failure intensity: $FI(T)=AF'(t)$
-\item Mean time to failure at time $t$: $MTTF(t) = 1/FI(t)$
-\end{itemize}
-\item Time in the model can be execution or clock or calendar time
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\begin{frame}
-\frametitle{Verifying Subjective Qualities}
-
-\begin{itemize}
-\item \structure{What do you think is meant by empirical software engineering?}
-\item \structure{What problems might be studied by empirical software
-    engineering?}
-\item \structure{Does the usual engineering analogy hold for empirical software
-    engineering?}
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\begin{frame}
-\frametitle{Verifying Subjective Qualities}
-
-\begin{itemize}
-\item Consider notions like simplicity, reusability, understandability …
-\item Software science (due to Halstead) has been an attempt
-\item Tries to measure some software qualities, such as
-abstraction level, effort, …
-\item by measuring some quantities on code, such as
-\bi
-\item $\eta_1$, number of distinct operators in the program
-\item $\eta_2$, number of distinct operands in the program
-\item $N_1$, number of occurrences of operators in the program
-\item $N_2$, number of occurrences of operands in the program
-\ei
-\item Extract information from repo, including number of commits, issues etc.
-\item Empirical software engineering
-\item Appropriate analogy switches from engineering to medicine
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\begin{frame}
-\frametitle{Source Code Metric}
-
-\begin{itemize}
-\item \structure{What are the consequences of complex code?}
-\item \structure{How might you measure code complexity?}
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\begin{frame}
-\frametitle{McCabe's Source Code Metric}
-
-\begin{itemize}
-\item Cyclomatic complexity of the control graph
-\bi
-\item $C = e - n + 2 p$
-\item $e$ is number of edges, $n$ is number of nodes, and $p$ is number of
-  connected components
-\ei
-\item McCabe contends that well-structured modules have $C$ in range $3 .. 7$,
-  and $C = 10$ is a reasonable upper limit for the complexity of a single module
-\item Confirmed by empirical evidence 
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
 \end{document}
 
diff --git a/Lectures/L36_DesignPatterns/DesignPatterns.pdf b/Lectures/L36_DesignPatterns/DesignPatterns.pdf
index dce7a90cd5746053c6547703d8f689a4b6a39bad..bf0e5228f73a942f952fca9b6efc1c8f273add14 100644
Binary files a/Lectures/L36_DesignPatterns/DesignPatterns.pdf and b/Lectures/L36_DesignPatterns/DesignPatterns.pdf differ
diff --git a/Lectures/L36_DesignPatterns/DesignPatterns.tex b/Lectures/L36_DesignPatterns/DesignPatterns.tex
index 60dcd1329866f1fd195b25c341b75368d1b1f666..1619649c532038645364f1c5c31ea3a813264f91 100755
--- a/Lectures/L36_DesignPatterns/DesignPatterns.tex
+++ b/Lectures/L36_DesignPatterns/DesignPatterns.tex
@@ -30,7 +30,7 @@
 
 \input{../def-beamer}
 
-\newcommand{\topic}{36 Design Patterns DRAFT}
+\newcommand{\topic}{36 Design Patterns}
 
 \input{../titlepage}
 
@@ -48,7 +48,7 @@
 \begin{itemize}
 \item Administrative details
 \item Debugging
-\item Verifying other qualities
+\item Verifying performance and reliability
 \item Design patterns
 \end{itemize}
 
@@ -63,24 +63,26 @@
 
 \item Today's slide are partially based on slides by Dr.\ Wassyng and on van Vliet (2000)
 
-\end{itemize}
-
-\end{frame}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\begin{frame}
-\frametitle{Debugging}
+\item A4
+\bi
+\item {Due April 5 at 11:59 pm}
+\ei
+
+\item Course evaluations
+\bi
+\item CS 2ME3: 42\%
+\item SE 2AA4: 56\%
+\item \href{https://evals.mcmaster.ca/login.php}{https://evals.mcmaster.ca/}
+\item Closes: 11:59 pm, Monday, April 10
+\ei
+
+\item CAS poster and demo competition
+\bi
+\item April 6, 4 - 6 pm
+\item \structure{ITB/201}
+\item 12 graduate student submissions
+\ei
 
-\begin{itemize}
-\item The activity of locating and correcting errors
-\item It can start once a failure has been detected
-\item The goal is closing the gap between a fault and a failure
-\begin{itemize}
-\item Memory dumps, watch points
-\item Intermediate assertions can help
-\item Tools like gdb, valgrind, etc.
-\end{itemize}
 \end{itemize}
 
 \end{frame}
@@ -92,11 +94,17 @@
 
 \begin{itemize}
 \item Worst case analysis versus average behaviour
-\item For worst case focus on proving that the system response time is bounded by some function of the external
-requests
+\item For worst case focus on proving that the system response time is bounded
+  by some function of the external requests
 \item Standard deviation
 \item Analytical versus experimental approaches
 \item Consider verifying the performance of a pacemaker
+\item Visualize performance via
+\bi
+\item Identify a measure of performance (time, storage, FLOPS, accuracy, etc.)
+\item Identify an independent variable (problem size, number of processors,
+  condition number, etc.)
+\ei
 \end{itemize}
 
 \end{frame}
@@ -107,10 +115,12 @@ requests
 \frametitle{Verifying Reliability}
 
 \begin{itemize}
-\item There are approaches to measuring reliability on a probabilistic basis, as in other engineering fields
+\item There are approaches to measuring reliability on a probabilistic basis, as
+  in other engineering fields
 \item Unfortunately there are some difficulties with this approach
 \item Independence of failures does not hold for software
-\item Reliability is concerned with measuring the probability of the occurrence of failure
+\item Reliability is concerned with measuring the probability of the occurrence
+  of failure
 \item Meaningful parameters include
 \begin{itemize}
 \item Average total number of failures observed at time $t$: $AF(t)$
@@ -124,6 +134,76 @@ requests
 
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
+\begin{frame}
+\frametitle{Verifying Subjective Qualities}
+
+\begin{itemize}
+\item \structure{What do you think is meant by empirical software engineering?}
+\item \structure{What problems might be studied by empirical software
+    engineering?}
+\item \structure{Does the usual engineering analogy hold for empirical software
+    engineering?}
+\end{itemize}
+
+\end{frame}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{frame}
+\frametitle{Verifying Subjective Qualities}
+
+\begin{itemize}
+\item Consider notions like simplicity, reusability, understandability …
+\item Software science (due to Halstead) has been an attempt
+\item Tries to measure some software qualities, such as
+abstraction level, effort, …
+\item by measuring some quantities on code, such as
+\bi
+\item $\eta_1$, number of distinct operators in the program
+\item $\eta_2$, number of distinct operands in the program
+\item $N_1$, number of occurrences of operators in the program
+\item $N_2$, number of occurrences of operands in the program
+\ei
+\item Extract information from repo, including number of commits, issues etc.
+\item Empirical software engineering
+\item Appropriate analogy switches from engineering to medicine
+\end{itemize}
+
+\end{frame}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{frame}
+\frametitle{Source Code Metric}
+
+\begin{itemize}
+\item \structure{What are the consequences of complex code?}
+\item \structure{How might you measure code complexity?}
+\end{itemize}
+
+\end{frame}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{frame}
+\frametitle{McCabe's Source Code Metric}
+
+\begin{itemize}
+\item Cyclomatic complexity of the control graph
+\bi
+\item $C = e - n + 2 p$
+\item $e$ is number of edges, $n$ is number of nodes, and $p$ is number of
+  connected components
+\ei
+\item McCabe contends that well-structured modules have $C$ in range $3 .. 7$,
+  and $C = 10$ is a reasonable upper limit for the complexity of a single module
+\item Confirmed by empirical evidence 
+\end{itemize}
+
+\end{frame}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
 \begin{frame}
 \frametitle{Design Patterns}