% -*- compile-command: "rubber -d --unsafe hw-06-divide-and-conquer.tex" -*-
\documentclass[11pt]{tufte-handout}
\usepackage[num=6,date=October\ 18,topic=Divide\ and\ conquer]{algorithms-hw}
\begin{document}
\coversheet
\begin{question} \label{q:alg-choice} Suppose you are choosing between
the following three algorithms:
\begin{enumerate}
\item Algorithm A solves problems by dividing them into five
subproblems of half the size, recursively solving each subproblem,
and then combining the solutions in linear time.
\item Algorithm B solves problems of size $n$ by recursively solving
two subproblems of size $n-1$ and then combining the solutions in
constant time.
\item Algorithm C solves problems of size $n$ by dividing them into
nine subproblems of size $n/3$, recursively solving each subproblem,
and then combining the solutions in $O(n^{2})$ time.
\end{enumerate}
What are the running times of each of these algorithms (in asymptotic
notation) and which would you choose?
\end{question}
\begin{question}[K\&T 5.1]
You are interested in analyzing some hard-to-obtain data from two
separate databases. Each database contains $n$ numerical values---so
there are $2n$ values total---and you may assume that no two values
are the same. You'd like to determine the median of this set of $2n$
values, which we define to be the $n$th smallest
value.\marginnote{Notice that according to this definition, the median
of $[1,3,4,7]$ is $3$, \emph{not} $3.5$.}
However, the only way you can access these values is through
\emph{queries} to the databases. In a single query, you can specify a
value $k$ to one of the two databases, and the chosen database will
return the $k$th smallest value that it contains. Since queries are
expensive, you would like to compute the median using as few queries
as possible.
Give an algorithm that finds the median value using $O(\log n)$
queries. If it makes things easier, you may assume that $n$ is a
power of two.
As always, make sure that you justify the correctness of your
algorithm, and analyze its time complexity.
\end{question}
\begin{question} \label{q:fib}
Recall that the \emph{Fibonacci numbers} $F_n$ are defined
recursively by
\begin{align*}
F_0 &= 0 \\
F_1 &= 1 \\
F_n &= F_{n-1} + F_{n-2},
\end{align*}
with the first few given by $0,1,1,2,3,5,8,13,21,34,55,\dots$
As you may know, directly turning the definition of Fibonacci
numbers into a recursive implementation is a terrible idea; the
resulting algorithm takes $\Theta(\varphi^n)$ time (where
$\varphi = (1 + \sqrt 5)/2 \approx 1.618\dots$).
The usual iterative algorithm to repeatedly calculate the next
Fibonacci number from the previous two seems like it would take
$\Theta(n)$ time to compute $F_n$: it just loops from $1$ to $n$ and
does one addition each loop, right? Well\dots yes, it does one
addition each loop, but we can't really assume that these additions
take constant time, because the Fibonacci numbers involved can get
quite large! Let's analyze the situation more carefully.
\begin{enumerate}[label=(\alph*)]
\item Recall from a previous homework that the size of the $n$th
Fibonacci number $F_n$ is $\Theta(\varphi^n)$. Given this fact,
approximately how many bits (in terms of $\Theta$) are needed to
represent $F_n$? Simplify your answer as much as possible.
\item Now suppose we implement the usual iterative algorithm as
follows: initialize an array $F$ of size $n$, which can hold
arbitrary-size integers. Initialize $F[0] = 0$ and $F[1] = 1$.
Then loop $i$ from $2$ to $n$, and at each iteration, set
$F[i] \gets F[i-1] + F[i-2]$. Taking into account the time needed
to add integers of a given size, what is the running time of this
algorithm?
\item \label{q:fibrec} We can do better! In addition to their definition, Fibonacci
numbers satisfy the following recurrences (you can just take my
word for it):
\begin{align*}
F_{2n+1} & = F_n^2 + F_{n+1}^2 \\
F_{2n} &= 2F_n F_{n+1} - F_n^2
\end{align*}
For example, $F_7 = 13 = F_3^2 + F_4^2 = 2^2 + 3^2$, and $F_8 = 21
= 2F_4F_5 - F_4^2 = 2 \cdot 3 \cdot 5 - 3^2$.
Explain how to turn these recurrences into a
recursive algorithm for computing Fibonacci
numbers.\marginnote{\emph{\ref{q:fib}\ref{q:fibrec}}\quad\qrcode{Besides\
appropriate\ base\ cases,\ break\ it\ into\ two\ different\
cases:\ one\ when\ n\ is\ even\ and\ one\ when\ n\ is\ odd.}}
\item Analyze the running time of this algorithm. Be sure to
include the time needed to do any additions or multiplications.
Assume we will use Karatsuba's algorithm for multiplication.
\end{enumerate}
\end{question}
\begin{question} \label{q:maj} An array $A[1\ldots n]$ is said to have
a {\em majority element} if \emph{more than half} of its entries are
the same. Given an array, the task is to design an efficient
algorithm to tell whether the array has a majority element, and, if
so, to find that element. The \emph{only} thing you may assume
about the elements of the array is that you can test whether two of
them are equal (in constant time). In particular, the elements of the
array are \emph{not} necessarily from some ordered domain like the
integers, and so there can be no comparisons of the form
$A[i] > A[j]$. You also may not assume that there is a hash
function for the elements, so they cannot be used as the keys of a
dictionary/hash table.
\begin{enumerate}[label=(\alph*)]
\item What is a brute-force algorithm for this problem? How long
does it take to run?
\item Show how to solve this problem in $O(n \log n)$
time. \marginnote[-0.5in]{\emph{\ref{q:maj}b}\quad\qrcode{Split\
the\ array\ into\ two\ equal-size\ subarrays.\ Would\ it\
help\ to\ know\ their\ majority\ elements?}} Make sure to
prove that your algorithm is correct (via induction) and give a
recurrence relation for the running time of your algorithm.
\item (\textbf{Extra credit}) Can you give a linear-time algorithm?
\marginnote{\emph{\ref{q:maj}c}\quad\qrcode{Pair\ up\ A's\
elements.\ \ For\ each\ pair,\ if\ the\ two\ elements\ are\
different,\ discard\ both\ of\ them;\ if\ they\ are\ the\
same,\ keep\ just\ one.\ Show\ that\ afterwards,\ there\
are\ at\ most\ n/2\ elements\ left,\ and\ that\ they\ have\
a\ majority\ element\ if\ A\ does.}}
\end{enumerate}
\end{question}
% \subsection*{Extra Credit}
% This question adapted from the Design and Analysis of Algorithms
% course at the University of Konstanz by Dr. Ulrik Brandes and
% Dr. Sabine Cornelsen.
% \begin{question}[Least Common Ancestor] \label{q:lca} Let $T =(V,E)$
% be an oriented tree with root $r \in V$ and let $P \subseteq \{
% \{u,v\} \,|\, u,v \in V \}$ be a set of unordered pairs of
% vertices. For each $v \in V$ let $\Pi_{v} = \{r, . . . , v\}
% \subseteq V$ denote the sequence of vertices along the path from $r$
% to $v$ and let $d(v) = |\Pi_{v}| - 1$ denote the depth of $v \in T$.
% The {\em least common ancestor} of pair $\{u,v\} \in P$ is defined
% as $\overline{w} \in V$ with $\overline{w} \in \Pi_{v} \cap \Pi_{u}$
% and $d(\overline{w}) > d(w)$ for all $w \in \Pi_{v} \cap \Pi_{u}$.
% Algorithm LCA($r$) traverses $T$ to determine the least common
% ancestors of all pairs $\{u, v\} \in P$ . At the beginning, all
% vertices are unmarked.
% \begin{algorithm}[H]
% \SetKwFunction{ms}{Makeset}
% \SetKwFunction{union}{Union}
% \SetKwFunction{find}{Find}
% \SetKwData{anc}{ancestor}
% \ms($u$) \\
% $\anc[\find(u)] \longleftarrow u$\\
% \ForEach{child $v$ of $u$ in $T$} {
% LCA($v$) \\
% $\union(\find(u), \find(v))$ \\
% $\anc[\find(u)] \longleftarrow u$}
% mark $u$ \\
% \ForEach{$v$ with $\{u,v\} \in P$}{
% \If{$v$ is marked}{
% print ``LCA('' + $u$ + ``,'' + $v$ + ``) is'' + $\anc[\find(v)]$
% }
% }
% \caption{LCA($u$)}
% \end{algorithm}
% \begin{enumerate}[label=(\alph*)]
% \item Show that, when Line 8 in LCA($u$) is executed, the set {\sc
% Find($u$)} contains all vertices of the subtree $T_{u} \subseteq
% T$ with root $u$.
% \marginnote{\emph{\ref{q:lca}a}\quad\qrcode{Do\ an\ induction\
% on\ the\ height\ h(u)\ of\ u.\ (h(u)\ =\ 0\ if\ u\ is\ a\
% leaf.)}}
% \item Show that the number of sets in the Union-Find data structure at
% the time of a call to LCA($v$) equals $d(v)$.
% \marginnote[1in]{\emph{\ref{q:lca}b}\quad\qrcode{The\ recursive\ calls\
% of\ LCA\ specify\ a\ traversal\ order\ of\ T\ which\ induces\
% an\ order\ on\ V.\ Do\ an\ induction\ on\ the\ position\ of\ v\
% in\ this\ order.}}
% \item Prove that LCA($r$) determines the least common ancestors of all
% $\{u, v\} \in P$ correctly.
% \marginnote[1in]{\emph{\ref{q:lca}c}\quad\qrcode{Differentiate\ two\
% cases:\ when\ one\ of\ u,\ v\ is\ in\ the\ other's\ subtree;\
% and\ when\ neither\ is\ in\ the\ other's\ subtree.}}
% \item[(d)] Analyze the running time of LCA($r$).
% \end{enumerate}
% \end{question}
% \section*{Convolutions and the FFT}
% You are expecting to receive a Very Important Message over a
% network, encoded as a sequence of bits. The message will only be
% sent once. Receiving the message is so important that you decide to
% redundantly have \emph{two} computers both listening for the
% message, just in case one of them doesn't work.
% This turns out to be an excellent idea; however, you only have the
% idea at the last minute. When the message starts to arrive,
% computer A is listening, but you have not quite finished setting up
% computer B! So computer B does not record some beginning portion of
% the message. You do finally get computer B set up and it starts
% recording the message somewhere in the middle. And it's a good
% thing you do, because a few seconds later, computer A crashes!
% Thankfully computer B continues to work, and records the rest of the
% message.
% Here, then, is the situation: both computers recorded only part of
% the message. Computer A recorded from the beginning of the message
% to somewhere in the middle, and computer B recorded from a different
% point in the middle to the end. The bits recorded by computer A and
% computer B overlap, but you have no way to know how many bits are in
% the overlapping portion, since you do not know how long the message
% is supposed to be, and network transmission speeds are variable
% enough that you have no way to know how many bits were transmitted
% between the time when B started recording and the time when A
% crashed.
% To make things worse, there can be occasional transmission errors,
% where individual bits are flipped. So even during the portion of
% the message that both computers were recording, there can be
% positions where computer A recorded a 0 but computer B recorded a 1
% (for example, A might have correctly recorded the intended bit, but
% a glitch caused computer B to record the incorrect bit). The
% message uses an error-correcting code, so you will be able to fix
% these incorrect bits---but not until you have reconstructed the
% whole message!
% You need to find the correct \emph{alignment}, defined as the index
% of the bit where computer B started recording. So, for example, if
% the first bit recorded by computer B was the 973rd bit in the
% message, the correct alignment would be $972$. It is also useful to
% be able to talk about the \emph{overlap}, defined as the number of
% bits the two recordings have in common. The alignment and overlap
% are related by the simple equation \[ \mathit{alignment} +
% \mathit{overlap} = |A|, \] where $|A|$ denotes the number of bits in
% the portion of the message recorded by computer $A$.
% With no way to deduce the correct alignment, you decide to simply
% try \emph{all possible} alignments and find the one that gives the
% best match between the overlapping portions of A's bits and B's
% bits. Because of the occasional errors, the overlap will probably
% never be perfect, but the hope is that many more bits will
% correspond with the correct alignment than with any incorrect
% alignment.\footnote{Of course it is easy to imagine scenarios where
% this does not work---for example, if the middle of the message
% contains a very long sequence like $101010101010\dots$ then many
% different alignments could all match, but given that no one would
% bother to send a message with so much redundancy, it is reasonable
% to assume that the correct alignment will correspond to the best
% match.}
% Formally, let $A = a_0 a_1 a_2 \dots a_{n-1}$ be the sequence of bits
% recorded by computer $A$, and let $B = b_0 b_1 b_2 \dots b_{n-1}$ be
% those recorded by computer $B$. (For simplicity we assume that $A$
% and $B$ have the same length, but it does not really matter.) An
% alignment of $i$ means that $a_i$ is matched with $b_0$, $a_{i+1}$
% with $b_1$, \dots and in general $a_{i+k}$ is matched with $b_k$.
% We define the \emph{fit} of a given alignment as the average number
% of mismatches per bit in the overlapping portion, that is,
% \[ \mathit{fit}(i) = \frac{1}{n-i} \sum_{k=0}^{n-1-i} |a_{i+k} -
% b_k|. \]
% For example, suppose $A$ and $B$ have 40 bits each, and are given by
% \begin{align*}
% A &= 1100101101100111100010011000001011111100 \\
% B &= 0001000100001101101110011101010001001111
% \end{align*}
% With an alignment of, say, $35$, $A$ and $B$ overlap by $5$ bits,
% namely, the $11100$ at the end of $A$ overlaps with the $00010$ at
% the beginning of $B$. Four out of five of these bits do not match,
% giving a fit score of $4/5 = 0.8$. Note that the fit score will
% always be between $0$ and $1$. A fit score of $0$ means that the
% sequences match perfectly; a fit score of $1$ means that the
% overlapping portions are exactly inverted from each other, with the
% first having a 0 whenever the second has a 1, and vice versa. On
% average, we would expect that two randomly chosen sequences of bits
% will have a fit score of $0.5$ relative to each other.
% The absolute best fit between $A$ and $B$ is at alignments 38 and
% 39: namely, those alignments give a fit of zero, since the two 0
% bits at the end of $A$ match perfectly with the two 0 bits at the
% beginning of $B$. However, there is a 50\% chance that the
% sequences will align perfectly with an overlap of one, and this is
% unlikely to actually be the correct alignment. So we ignore
% alignments near the end like this (specifically, let's say that we
% will ignore any alignment with $10$ or fewer bits of
% overlap). Making a graph with alignment on the $x$-axis and fit on
% the $y$-axis, an alignment of 17 clearly jumps out as giving the
% best (lowest) fit value:
% \begin{center}
% \begin{diagram}[width=200]
% import Transmission
% fits :: [Double]
% fits = map (\i -> fit i as bs) [0 .. length as - 1]
% factor = 15
% fitData :: Diagram B
% fitData = atPoints (zipWith (\x y -> fromIntegral x ^& (factor*y)) [0 :: Int ..] fits)
% ((replicate 17 dot ++ [dot # fc red] ++ repeat dot) # fc black)
% where
% dot :: Diagram B
% dot = circle 0.3 # lw none
% dia :: Diagram B
% dia = mconcat
% [ fitData
% , hrule (fromIntegral (length as)) # alignL
% , vcat' (with & catMethod .~ Distrib & sep .~ factor) [text "1", text "0"]
% # translateY factor
% # translateX (-1)
% , hcat' (with & catMethod .~ Distrib & sep .~ 10)
% (map (text . show) [0, 10 .. 40])
% # translateY (-1)
% , text "17" # translateX 17 # translateY (-1)
% , vrule factor # alignB
% ]
% # lw medium
% # fontSize 6
% \end{diagram}
% \end{center}
% Writing $A$ and $B$ underneath each other using this alignment, we
% can see that they do indeed appear to match very well, with only a
% few differences; the average number of disagreements per bit is very
% low:
% \[ \arraycolsep=0.2pt
% \begin{array}{*{57}{c}}
% 1&1&0&0&1&0&1&1&0&1&1&0&0&1&1&1&1&0&0&0&1&0&0&1&1&0&0&0&0&0&1&0&1&1&1&1&1&1&0&0
% \\
% &&&&&&&&&&&&&&&&&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&1&1&0&1&1&1&0&0&1&1&1&0&1&0&1&0&0&0&1&0&0&1&1&1&1
% \end{array}
% \]
% On the other hand, if we pick another alignment (say, 12):
% \[ \arraycolsep=0.2pt
% \begin{array}{*{52}{c}}
% 1&1&0&0&1&0&1&1&0&1&1&0&0&1&1&1&1&0&0&0&1&0&0&1&1&0&0&0&0&0&1&0&1&1&1&1&1&1&0&0
% \\
% &&&&&&&&&&&&0&0&0&1&0&0&0&1&0&0&0&0&1&1&0&1&1&0&1&1&1&0&0&1&1&1&0&1&0&1&0&0&0&1&0&0&1&1&1&1
% \end{array}
% \]
% we can see that $A$ and $B$ do not match very well; the average
% number of disagreements per bit is relatively high.
% \begin{question}
% Before even thinking about designing an algorithm, you decide to do
% some quick back-of-the-envelope calculations. You note the
% following facts:
% \begin{itemize}
% \item Each part of the message is about 1GB in length, that is,
% each part contains $n \approx 2^{33}$ bits.
% \item Your computer can perform about $1$ billion operations per
% second.
% \end{itemize}
% You then imagine using algorithms with various running times and
% calculate how long they would take to run.
% \begin{enumerate}[label=(\alph*)]
% \item If your algorithm required exactly $n^2$ operations,
% approximately how long will it take to run? Express your answer
% in appropriate, human-comprehensible units (\emph{e.g.} say ``10
% hours'', not ``36000 seconds'').
% \item If your algorithm required exactly $n \log_2 n$ operations,
% approximately how long will it take to run?
% \end{enumerate}
% \end{question}
% \begin{question} \label{q:fft}
% \marginnote{\emph{\ref{q:fft}a}\quad\qrcode{Remember\ that\ using\ the\
% FFT,\ we\ can\ multiply\ two\ degree-n\ polynomials\ in\ O(n\ log\ n).}}
% \marginnote[0.5in]{\emph{\ref{q:fft}b}\quad\qrcode{Try\ using\ the\
% polynomials\ A(x)\ =\ a_n{-}1\ +\ a_n-2\ x\ +\ ...\ and\ B(x)\ =\ b_0\ +\ b_1\ x\ +\ ...}}
% Design and analyze an efficient algorithm which, given two
% length-$n$ sequences of bits, finds the alignment with the best fit
% value. Note that ``design and analyze'' means to describe the
% algorithm, prove/justify why it is correct, and analyze its
% asymptotic running time.
% \end{question}
\end{document}