\documentclass[11pt]{article}
\usepackage{fullpage}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{graphicx}
\usepackage[]{algorithm2e}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{array}
\usepackage{multirow}
%, amsthm, amssymb, bm, natbib, longtable}
\input{6140ml-style.tex}
\newcommand\MyBox[2]{
\fbox{\lower0.75cm
\vbox to 1.7cm{\vfil
\hbox to 2.5cm{\hfil\parbox{5.0cm}{#1\\#2}\hfil}
\vfil}%
}%
}
\DeclareMathOperator*{\argmin}{arg\,min}
\begin{document}
% first argument is lecture number
% 2nd argument is lecture date
% 3rd argument is scribe name
\myheader{5}{March, 16}{Bilal Ahmed \& Virgil Pavlu}{The Perceptron Algorithm\footnote{These lecture notes are intended for in-class use only.}}
\section{Introduction}
Consider the problem of binary classification where we have $N$ data points as shown in Figure-\ref{fig:linseparableDataset}. We can observe that the decision boundary between the two classes (blue and red points) is a straight line. Datasets that can be separated by a straight line are known as linearly separable datasets. Generalizing the example from two to $m$ dimensions, a linearly separable dataset is one for which the decision boundary between the two classes is a linear function of the features $x$. Figure-\ref{fig:nonseparableDataset} shows an example where the two classes are not linearly separable.
\begin{figure}[!h]
\centering
\begin{subfigure}[b]{0.49\textwidth}
\centering
\includegraphics[scale=0.7]{linearDataset.jpg}
\caption{}
\label{fig:linseparableDataset}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.49\textwidth}
\centering
\includegraphics[scale=0.7]{nonlinearDataset.png}
\caption{}
\label{fig:nonseparableDataset}
\end{subfigure}
\caption{Linear (a) and non-linear (b) separability for binary classification in two-dimensions. The blue points represent the positive class and the red points belong to the negative class.}
\label{fig:separabilityPlots}
\end{figure}
More formally, for a data set having $N$ instances $(x_{i},y_{i})$, where $x_{i} \in \mathbb{R}^{m}, y_{i} \in \{-1,1\}$, a weight vector $w \in \mathbb{R}^m$ and a threshold $\theta$, if the following conditions are satisfied:
\[
\begin{cases}
\mathbf{w}^{T}\mathbf{x}_{i} > \theta \ ; & \text{if}\ y_{i}=1 \\
\mathbf{w}^{T}\mathbf{x}_{i} < \theta \ ; & \text{if}\ y_{i}=-1
\end{cases}
\]
then the dataset is linearly separable. Here $\mathbf{w}$ defines the normal vector for the hyperplane that separates the two classes in the $m$-dimensional feature space. The learning task in such a setting would be to learn the $m+1$ parameters corresponding to the weight vector $\mathbf{w}$ and the threshold $\theta$. We can absorb the threshold into the weight vector by observing that the above conditions can be written more compactly as $\mathbf{w}^{T}\mathbf{x}_{i} - \theta > 0$, so that we can define an augmented weight vector $\tilde{\mathbf{w}}=[ w_{0} \ w_{1} \ldots w_{m}]^{T}$ (note that $w_{0}=\theta$) and also augment our feature set with a constant feature that is always equal to one so that every instance is $\tilde{\mathbf{x}}=[ x_{0} \ x_{1} \ldots x_{m}]^{T}$, where $x_{0}=1$. The above conditions can now be stated as:
\begin{equation}
\begin{cases}
\mathbf{\tilde{w}}^{T}\mathbf{\tilde{x}}_{i} > 0 \ ; & \text{if}\ y_{i}=1 \\
\mathbf{\tilde{w}}^{T}\mathbf{\tilde{x}}_{i} < 0 \ ; & \text{if}\ y_{i}=-1
\end{cases}
\label{eqn:perceptronClassificationRule}
\end{equation}
It is more convenient to express the output of the perceptron in term of the $\text{sgn(.)}$ function, so that $l_{i}=\text{sgn}(\mathbf{\tilde{w}}^{T}\mathbf{\tilde{x}}_{i})$, where $l_{i}$ is the label predicted by the perceptron for the $i^{th}$ instance and the $\text{sgn}(.)$ function is defined as:
\[
\text{sgn}(x)=
\begin{cases}
1 & \ ; x>0\\
-1 & \ ; \text{otherwise}
\end{cases}
\]
In the rest of the discussion we will assume that we are working with augmented features and to keep the notation as clear as possible we are going to revert back to $\mathbf{w}$ and $\mathbf{x}$.
\section{The Perceptron}
A perceptron taken as input a set of $m$ real-valued features and calculates their linear combination. If the linear combination is above a pre-set threshold it outputs a $1$ otherwise it outputs a $-1$ as per Equation-\ref{eqn:perceptronClassificationRule} which is also called the perceptron classification rule. We can use the perceptron training algorithm to learn the decision boundary for linearly separable datasets. Algorithm-\ref{alg:ptronTraining} shows the perceptron training algorithm.
\begin{algorithm}[!b]
\KwData{Training Data:$(x_{i},y_{i});\ \forall i \in \{1,2, \ldots,N\}$, Learning Rate: $\eta$}
\KwResult{Separating Hyper-plane coefficients : $\mathbf{w}^{*}$}
Initialize $\mathbf{w} \leftarrow \mathbf{0}$\;
\Repeat{convergence}{
get example ($x_{i},y_{i}$)\;
$\hat{y}_{i} \leftarrow w^{T}x_{i}$ \;
\uIf {$\hat{y}_{i}y_{i} \leq 0$} {$w \leftarrow w + \eta y_{i} x_{i}$}
}
\caption{The perceptron training algorithm.}
\label{alg:ptronTraining}
\end{algorithm}
\subsection{Example: Learning the boolean AND function for two variables}
Consider the task of learning the AND function for two boolean variables $x_{1}$ and $x_{2}$. We can easily generate the data as there are only four possible instances, as shown in Table-\ref{tableAND}. These instances along with their labels are plotted in Figure-\ref{fig:perceptron0}. In order to apply the perceptron algorithm we will map an output of $0$ to $-1$ for the AND function.
\begin{table}[!t]
\caption{Data for learning the boolean AND function.}
\label{tableAND}
\begin{center}
\begin{tabular}{ |c|c|c|c|c|}
\hline
i & $x_{0}$ & $x_{1}$ & $x_{2}$ & AND($x_{1},x_{2}$)\\
\hline
1. & 1 & 0 & 0 & 0 \\
2. & 1 & 0 & 1 & 0\\
3. & 1 & 1 & 0 & 0\\
4. & 1 & 1 & 1 & 1\\
\hline
\end{tabular}
\end{center}
\end{table}
Below we show the updates to the weight vector as it makes it first pass through the training data processing one instance at a time. In neural network nomenclature, a complete pass of through the training data is known as an \emph{epoch}. During an epoch the updates made to the weight vector are dependent on the order in which the instances are presented to the algorithm. The first epoch of the perceptron training algorithm on the training data in Table-\ref{tableAND} (in the same order and a learning rate $\eta$ of 1) is:
\begin{enumerate}
\item $\hat{y}_{1}=\mathbf{w}^{T}\mathbf{x}_{1}=0;$ therefore, $\mathbf{w} \leftarrow \mathbf{w}+ (1)(-1)\mathbf{x}_{1} = [-1 \ 0 \ 0]^{T}$
\item $\hat{y}_{2}=\mathbf{w}^{T}\mathbf{x}_{2}=-1;$ therefore, no update to $\mathbf{w}$
\item $\hat{y}_{3}=\mathbf{w}^{T}\mathbf{x}_{3}=-1;$ therefore, no update to $\mathbf{w}$
\item $\hat{y}_{4}=\mathbf{w}^{T}\mathbf{x}_{4}=-1;$ therefore, $\mathbf{w} \leftarrow \mathbf{w}+ (1)(1)\mathbf{x}_{4} = [0 \ 1 \ 1]^{T}$
\end{enumerate}
The decision boundary corresponding to the weight vector at the end of the first epoch is shown in Figure-\ref{fig:fig:perceptron1}. Figure-\ref{fig:perceptronExamplePlots}(a-d) shows the decision boundary at the end of different epochs in the perceptron training algorithm and the final decision boundary after convergence. The final weight vector in this case is found to be $[-4\ 3\ 2]^{T}$, note that the first weight corresponds to the constant feature and hence defines the threshold $\theta$.
\begin{figure}[!h]
\centering
\begin{subfigure}[b]{0.49\textwidth}
\centering
\includegraphics[scale=0.4]{perc_AND_0.pdf}
\caption{}
\label{fig:perceptron0}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.49\textwidth}
\centering
\includegraphics[scale=0.4]{perc_AND_1.pdf}
\caption{}
\label{fig:fig:perceptron1}
\end{subfigure} \\
\begin{subfigure}[b]{0.49\textwidth}
\centering
\includegraphics[scale=0.4]{perc_AND_2.pdf}
\caption{}
\label{fig:fig:perceptron2}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.49\textwidth}
\centering
\includegraphics[scale=0.4]{perc_AND_Final.pdf}
\caption{}
\label{fig:fig:perceptronFinal}
\end{subfigure}
\caption{Learning the boolean AND function for two variables using the perceptron. (a) shows the initial decision boundary (b) the blue line shows the updated decision boundary after the first pass through the data (first epoch) (c) the green line is the decision boundary after the second epoch and (d) show the the final decision boundary (black line).}
\label{fig:perceptronExamplePlots}
\end{figure}
\subsection{Hypothesis space for the perceptron}
The hypothesis space for the perceptron is $\mathbb{R}^{m}$, and any $\mathbf{w} \in \mathbb{R}^{m}$ constitutes a valid hypothesis. The goal of the learning algorithm is to find the weight-vector $\mathbf{w}^{*} \in \mathbb{R}^{m}$ that separates the two classes. For linearly separable datasets there would be a continuum of weight vectors that fulfill this criterion, the perceptron algorithm is guaranteed to find converge to a separating hyperplane as we show next. The particular weight vector that the perceptron training algorithm converges to is dependent on the learning rate $\eta$ and the initial weight values.
\subsection{Convergence of the perceptron training algorithm}
Consider that we have training data as required by the perceptron algorithm. We are working with datasets that are linearly separable and hence we will assume $\exists \ \mathbf{w}_{opt} \in \mathbb{R}^{m}$ that perfectly separates the data and we will further assume that $\exists \ \gamma \in \mathbb{R}$ such that for any instance in the training set we have:
\begin{equation}
y_{i}\mathbf{w}_{opt}^{T} \mathbf{x}_{i} \geq \gamma \ ;\ \forall i \in \{1,2,\ldots,N\}
\label{eqn:margin}
\end{equation}
Since, we have finite data we can assume without loss of generality that:
\[
\| \mathbf{x}_{i} \| \leq R \ ; \forall i \in \{1,2,\ldots,N\}
\]
The quantity on the left in Equation-\ref{eqn:margin} is proportional to the distance of an instance from the separating hyperplane. For all instances belonging to the positive class i.e., $y_{i}=1$ we have $\mathbf{w}_{opt}^{T}\mathbf{x}_{i} \geq \gamma$ and $\mathbf{w}_{opt}^{T}\mathbf{x}_{i} \leq -\gamma$ for all instances with $y_{i}=-1$. This means that the optimal hyperplane $\mathbf{w}_{opt}$ separates the two classes with at least a distance proportional to $\gamma$.
Let $\mathbf{w}_{k}$ be the weight vector after the perceptron makes the $k^{th}$ mistake on an instance $(\mathbf{x}_{j},y_{j})$, so that
\[
\mathbf{w}_{k} = \mathbf{w}_{k-1} + y_{j}\mathbf{x}_{j}
\]
to show that the perceptron training algorithm will learn the decision boundary within a finite number of steps we will bound the number of mistakes the perceptron makes on the training dataset.
\noindent Consider the inner product between $\mathbf{w}_{opt}$ and $\mathbf{w}_{k}$.
\begin{eqnarray*}
\mathbf{w}_{k}^{T} \mathbf{w}_{opt} &=& (\mathbf{w}_{k-1} + y_{j}\mathbf{x}_{j})^{T} \mathbf{w}_{opt} \\
&=& \mathbf{w}_{k-1}^{T} \mathbf{w}_{opt} + y_{j} \mathbf{x}_{j} ^{T} \mathbf{w}_{opt} \\
&\geq& \mathbf{w}_{k-1}^{T} \mathbf{w}_{opt} + \gamma \ \ (\because y_{j}\mathbf{w}_{opt}^{T} \mathbf{x}_{j} \geq \gamma)
\end{eqnarray*}
since we started with $\mathbf{w}_{0}=\mathbf{0}$, by induction we have that,
\begin{equation}
\mathbf{w}_{k}^{T} \mathbf{w}_{opt} \geq k \gamma
\label{eqn:lowerBound}
\end{equation}
The Cauchy-Shwarz inequality states that:
\begin{equation}
(\mathbf{a}^{T}\mathbf{b})^{2} \leq \|a\|^{2}\|b\|^{2} \ \ \ ;\ \forall \mathbf{a},\mathbf{b} \in \mathbb{R}^{m}
\label{eqn:cauchyshwarz}
\end{equation}
Next we will look at the norm of $\mathbf{w}_{k}$:
\begin{eqnarray*}
\|\mathbf{w}_{k}\|^{2} =& (\mathbf{w}_{k-1} + y_{j}\mathbf{x}_{j})^{T} (\mathbf{w}_{k-1} + y_{j}\mathbf{x}_{j})& \\
=& \|\mathbf{w}_{k-1}\|^{2} + 2 y_{j} \mathbf{w}_{k-1}^{T}\mathbf{x}_{j} + \|\mathbf{x}_{j}\|^{2}& \\
\leq& \|\mathbf{w}_{k-1}\|^{2} + R^{2} & \ \ \ (\because y_{j} \mathbf{w}_{k-1}^{T}\mathbf{x}_{j} \leq 0)
\end{eqnarray*}
since we started with $\mathbf{w}_{0}=\mathbf{0}$, by induction we have that,
\begin{equation}
\|\mathbf{w}_{k}\|^{2} \leq kR^{2}
\label{eqn:upperBound}
\end{equation}
Combining Equations \ref{eqn:lowerBound}-\ref{eqn:upperBound}, we get:
\[
k^{2} \gamma^{2} \leq (\mathbf{w}_{k}^{T} \mathbf{w}_{opt})^2 \leq \|\mathbf{w}_{k}\|^{2} \|\mathbf{w}_{opt}\|^{2} \leq kR^{2} \|\mathbf{w}_{opt}\|^{2}
\]
from the above we can see that:
\begin{equation}
k \leq \big(\frac{R}{\gamma}\big)^{2} \|\mathbf{w}_{opt}\|^{2}
\label{eqn:mistakeBound}
\end{equation}
This shows that based on our initial assumptions, the perceptron training algorithm will stop after making a finite number of mistakes (Equation-\ref{eqn:mistakeBound}) irrespective of the sequence in which the samples are presented.
\subsection{Is there a loss function here?}
The loss function for the perceptron is given as:
\begin{equation}
\mathcal{L}_{p}(\mathbf{x},y,\mathbf{w})= \max(0, -y\mathbf{w}^{T}\mathbf{x})
\label{eqn:perceptronLoss}
\end{equation}
which is zero when the instance is classified correctly, and is proportional to the signed distance of the instance from the hyperplane when it is incorrectly classified. Note, that the instance is misclassified and is therefore on the wrong side of the hyperplane. Figure-\ref{fig:perceptronLoss} shows a plot of the perceptron loss function.
\begin{figure}
\centering
\includegraphics[scale=0.5]{percLoss.pdf}
\caption{The perceptron loss function.}
\label{fig:perceptronLoss}
\end{figure}
Perceptron loss is a special case of the Hinge loss, which we will encounter when discussing support vector machines. Also note that the perceptron loss involves the $\gamma$ from Equation-\ref{eqn:margin}, and optimizes the negative $\gamma$ of the misclassified instances.
\section{Inseparable Data}
What happens when the data is not linearly separable? Based on our previous discussion the training algorithm will not halt. We can impose a limit on the number of iterations to make sure that the algorithm stops, but in this case we have no guarantees about the final weight vector. A possible avenue is to change the loss function so that instead of looking for a perfect classification we find the ``best fit'' to the decision boundary. In essence we are looking for a weight vector that does not perfectly separate the data but defines a decision boundary that has the minimum number of misclassifications on the training data.
In order to learn such a decision boundary using the perceptron, we need to first change the loss function, and in this case we can use the squared loss between the true labels and the predictions of the perceptron. We will be working with an unthresholded perceptron i.e., we will compute the output of the perceptron as:
\[
o(\mathbf{x})=\mathbf{w}^{T}\mathbf{x}
\]
as opposed to $\text{sgn}(\mathbf{w}^{T}\mathbf{x})$. This is known as a linear unit.
The squared loss function over the training data is given as:
\begin{equation}
E(\mathbf{w})= \frac{1}{2}\sum_{i=1}^{N}(y_{i}-o_{i})^2
\label{eqn:squaredLoss}
\end{equation}
We can optimize this loss function by using gradient descent. The gradient of $E(.)$ can be calculated as:
\[
\nabla_{\mathbf{w}} E = \sum_{i=1}^{N} (y_{i}-o_{i}) (-\mathbf{x}_{i})
\]
and the gradient descent update to $\mathbf{w}$ is then simply,
\[
\mathbf{w} \leftarrow \mathbf{w} - \eta \nabla E
\]
The gradient descent algorithm is listed in Algorithm-\ref{alg:gradDescSqLoss}.
\begin{algorithm}[!t]
\KwData{Training Data:$(x_{i},y_{i});\ \forall i \in \{1,2, \ldots,N\}$, Learning Rate: $\eta$}
\KwResult{Optimal Hyper-plane coefficients based on squared loss: $\mathbf{w}^{*}$}
Initialize $\mathbf{w} \leftarrow $ random weights \;
\Repeat{convergence}{
calculate $\nabla E$ \;
$w \leftarrow w - \eta \nabla E$}
\caption{Gradient descent for training a linear unit. Note, that the update rule for the weight vector involves the calculation of the loss over the entire training set as compared to the perceptron training algorithm where we update the weight vector using one instance at a time.}
\label{alg:gradDescSqLoss}
\end{algorithm}
As we are calculating the gradient ($\nabla E$) based on the entire training dataset, the resulting gradient descent algorithm is also called the batch gradient descent algorithm. We can modify the batch update to work with single examples in which case the gradient is approximated as:
\[
\nabla_{\mathbf{w}} E(\mathbf{x}_{i}) = (y_{i}-o_{i}) (-\mathbf{x}_{i})
\]
This is also known as stochastic gradient descent where we update the parameters based on a single example. The resulting training algorithm for a linear unit is shown in Algorithm-\ref{alg:stochGDLU}.
\begin{algorithm}[!h]
\KwData{Training Data:$(x_{i},y_{i});\ \forall i \in \{1,2, \ldots,N\}$, Learning Rate: $\eta$}
\KwResult{Optimal Hyper-plane coefficients based on squared loss: $\mathbf{w}^{*}$}
Initialize $\mathbf{w} \leftarrow $ random weights \;
\Repeat{convergence}{
get example ($x_{i},y_{i}$)\;
$\hat{o}_{i} \leftarrow w^{T}x_{i}$ \;
$w \leftarrow w + \eta (y_{i}-o_{i}) \mathbf{x}_{i}$
}
\caption{Stochastic gradient descent algorithm for training a linear unit.}
\label{alg:stochGDLU}
\end{algorithm}
\noindent Stochasitc gradient descent works by processing a single example each time as opposed to the batch gradient descent that needs to process the entire dataset to carry out a single update. This makes the stochastic gradient descent algorithm faster than the conventional batch gradient descent.
\section{References:}
\begin{enumerate}
\item{Andrew Ng's notes on perceptron. (cs229.stanford.edu/notes/cs229-notes6.pdf)}
\item{Roni Khardon's notes. (www.cs.tufts.edu/~roni/Teaching/CLT/LN/lecture14.pdf)}
\item{\textit{Machine Learning}, Tom Mitchell.}
\end{enumerate}
\end{document}