\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}
\usepackage{amsfonts}
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{framed}
\usepackage{comment}
\usepackage{color}
\usepackage{hyperref}
\hypersetup{hidelinks}
\usepackage{ifthen}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{needspace}
\usepackage{tabls}
\usepackage{graphicx}
\setcounter{MaxMatrixCols}{30}

\newcounter{exer}
\theoremstyle{definition}
\newtheorem{theo}{Theorem}[section]
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[theo]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{condition}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{Question}
\newenvironment{algorithm}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{conclusion}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{exam}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exam}[#1]\begin{leftbar}}
{\end{leftbar}\end{exam}}
\newtheorem{exmp}[exer]{Exercise}
\newenvironment{exercise}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\newenvironment{statement}{\begin{quote}}{\end{quote}}

\let\sumnonlimits\sum
\let\prodnonlimits\prod
\let\cupnonlimits\bigcup
\let\capnonlimits\bigcap
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\renewcommand{\bigcup}{\cupnonlimits\limits}
\renewcommand{\bigcap}{\capnonlimits\limits}
\setlength\tablinesep{3pt}
\setlength\arraylinesep{3pt}
\setlength\extrarulesep{3pt}
\voffset=0cm
\hoffset=-0.7cm
\setlength\textheight{22.5cm}
\setlength\textwidth{15.5cm}
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\newcommand{\id}{\operatorname{id}}
\newcommand{\rev}{\operatorname{rev}}
\newcommand{\conncomp}{\operatorname{conncomp}}
\newcommand{\conn}{\operatorname{conn}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\powset}[2][]{\ifthenelse{\equal{#2}{}}{\mathcal{P}\left(#1\right)}{\mathcal{P}_{#1}\left(#2\right)}}
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\tup}[1]{\left( #1 \right)}
\newcommand{\ive}[1]{\left[ #1 \right]}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\verts}[1]{\operatorname{V}\left( #1 \right)}
\newcommand{\edges}[1]{\operatorname{E}\left( #1 \right)}
\newcommand{\arcs}[1]{\operatorname{A}\left( #1 \right)}
\newcommand{\underbrack}[2]{\underbrace{#1}_{\substack{#2}}}
\newcommand{\are}{\ar@{-}}
\newcommand{\arebi}[1][]{\ar@{<-}@/_/[#1] \ar@/^/[#1]}
\ihead{Math 4990 Fall 2017 (Darij Grinberg): homework set 9 solutions}
\ohead{page \thepage}
\cfoot{}

\begin{document}

\begin{center}
\textbf{Math 4990 Fall 2017 (Darij Grinberg): homework set 9 solutions}
\end{center}

\begin{center}
\textbf{The solutions below were written by GPT-5.5 and edited by
myself (DG).}
They are less detailed than what I usually write.
\end{center}

\subsection*{One last binomial sum}

\begin{exercise}
\label{exe.sum-2choosek.sol}Let $n\in\NN$. Prove that
\[
\sum_{k=0}^{n}\dbinom{-2}{k}=\left(  -1\right)  ^{n}\left\lfloor \dfrac
{n+2}{2}\right\rfloor .
\]
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.sum-2choosek.sol}.]
For every $k\in\NN$, we have
\[
\dbinom{-2}{k}=\frac{\left( -2\right) \left( -3\right) \cdots\left( -k-1\right) }{k!}
=\left( -1\right) ^k\frac{2\cdot3\cdots\left( k+1\right) }{k!}
=\left( -1\right) ^k\left( k+1\right) .
\]
Hence,
\begin{equation}
\sum_{k=0}^{n}\dbinom{-2}{k}=\sum_{k=0}^{n}\left( -1\right) ^k\left( k+1\right) .
\label{eq.sol.sum-2choosek.rewrite}
\end{equation}

Now, we shall prove that
\begin{equation}
\sum_{k=0}^{n}\left( -1\right) ^k\left( k+1\right)
= \begin{cases}
m+1, & \text{ if } n = 2m \text{ for some } m \in \NN; \\
-\tup{m+1}, & \text{ if } n = 2m+1 \text{ for some } m \in \NN.
\end{cases}
\label{eq.sol.sum-2choosek.4}
\end{equation}

\begin{proof}[Proof of \eqref{eq.sol.sum-2choosek.4}.]
A detailed proof of \eqref{eq.sol.sum-2choosek.4} can be found in
\cite[Exercise 2.9]{detnotes}, so we will only give a sketch.

We distinguish two cases.

\textit{Case 1:} We have $n=2m$ for some $m\in\NN$.

In this case,
\begin{align*}
\sum_{k=0}^{n}\left( -1\right) ^k\left( k+1\right)
&=\sum_{k=0}^{2m}\left( -1\right) ^k\left( k+1\right) \\
&=\sum_{i=0}^{m-1}\left( \left( 2i+1\right) -\left( 2i+2\right) \right) +\left( 2m+1\right) \\
&=\sum_{i=0}^{m-1}\left( -1\right) +\left( 2m+1\right)
=-m+2m+1=m+1.
\end{align*}
This proves \eqref{eq.sol.sum-2choosek.4} in Case 1.

\textit{Case 2:} We have $n=2m+1$ for some $m\in\NN$.

In this case,
\begin{align*}
\sum_{k=0}^{n}\left( -1\right) ^k\left( k+1\right)
&=\sum_{k=0}^{2m+1}\left( -1\right) ^k\left( k+1\right) \\
&=\sum_{i=0}^{m}\left( \left( 2i+1\right) -\left( 2i+2\right) \right)
=\sum_{i=0}^{m}\left( -1\right) \\
&= -\left( m+1\right) .
\end{align*}
This proves \eqref{eq.sol.sum-2choosek.4} in Case 2.

So \eqref{eq.sol.sum-2choosek.4} is proved in both cases.
\end{proof}

Now, \eqref{eq.sol.sum-2choosek.rewrite} becomes
\begin{align*}
\sum_{k=0}^{n}\dbinom{-2}{k}
&=\sum_{k=0}^{n}\left( -1\right) ^k\left( k+1\right)
\\
&= \begin{cases}
m+1, & \text{ if } n = 2m \text{ for some } m \in \NN; \\
-\tup{m+1}, & \text{ if } n = 2m+1 \text{ for some } m \in \NN
\end{cases}
\qquad \quad \tup{\text{by \eqref{eq.sol.sum-2choosek.4}}} \\
&= \left(  -1\right)  ^{n}\left\lfloor \dfrac
{n+2}{2}\right\rfloor
\end{align*}
(the last equality sign can be verified by straightforward
distinction of cases: if $n = 2m$ for some $m \in \NN$,
then $\tup{-1}^n = 1$ and $\floor{\dfrac{n+2}{2}} = m+1$;
on the other hand, if $n = 2m+1$ for some $m \in \NN$,
then $\tup{-1}^n = -1$ and $\floor{\dfrac{n+2}{2}} = m+1$).

We have thus proved Exercise \ref{exe.sum-2choosek.sol}.
\end{proof}

\subsection*{The Cartesian product of two permutations}

We recall the following definition.

\begin{definition}
\label{def.permX.sign.sol}Let $X$ be a finite set. Choose a bijection
$\phi : X \to \ive{n}$ for some $n\in\NN$. For every permutation $\sigma$
of $X$, define
\[
\left( -1\right) _\phi^\sigma=\left( -1\right) ^{\phi\circ\sigma\circ\phi^{-1}}.
\]
It is known that this number depends only on $\sigma$, and not on the choice
of $\phi$. This number is called the \textit{sign} of $\sigma$, and is
denoted by $\left( -1\right) ^\sigma$.
\end{definition}

\begin{exercise}
\label{exeadd.perm.sigmacrosstau.sol}Let $U$ and $V$ be two finite sets. Let
$\sigma$ be a permutation of $U$. Let $\tau$ be a permutation of $V$. We
define a map $\sigma\times\tau : U\times V \to U\times V$ by
\[
\left( \sigma\times\tau\right) \left( a,b\right) =\left( \sigma\left( a\right) ,\tau\left( b\right) \right)
\qquad\text{for every }\left( a,b\right) \in U\times V.
\]

\textbf{(a)} Prove that $\sigma\times\tau$ is a well-defined permutation.

\textbf{(b)} Prove that
\[
\sigma\times\tau=\left( \sigma\times\id\right) \circ\left( \id\times\tau\right) .
\]

\textbf{(c)} Prove that
\[
\left( -1\right) ^{\sigma\times\tau}=\left( \left( -1\right) ^\sigma\right) ^{\abs{V}}
\left( \left( -1\right) ^\tau\right) ^{\abs{U}}.
\]
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exeadd.perm.sigmacrosstau.sol}.]
A detailed solution can be found in \cite[Exercise 5.27]{detnotes}.
What follows is an outline.

\textbf{(a)} The map $\sigma\times\tau$ is well-defined, because for every
$\left( a,b\right) \in U\times V$, we have $\sigma\left( a\right) \in U$
and $\tau\left( b\right) \in V$, and thus
$\left( \sigma\left( a\right) ,\tau\left( b\right) \right) \in U\times V$.

Moreover, the inverse map of $\sigma\times\tau$ is
$\sigma^{-1}\times\tau^{-1}$. Indeed, for every $\left( a,b\right) \in U\times V$,
we have
\begin{align*}
\left( \left( \sigma^{-1}\times\tau^{-1}\right) \circ\left( \sigma\times\tau\right) \right) \left( a,b\right)
&=\left( \sigma^{-1}\times\tau^{-1}\right) \left( \sigma\left( a\right) ,\tau\left( b\right) \right) \\
&=\left( \sigma^{-1}\left( \sigma\left( a\right) \right) ,\tau^{-1}\left( \tau\left( b\right) \right) \right)
=\left( a,b\right)
\end{align*}
and
\begin{align*}
\left( \left( \sigma\times\tau\right) \circ\left( \sigma^{-1}\times\tau^{-1}\right) \right) \left( a,b\right)
&=\left( \sigma\times\tau\right) \left( \sigma^{-1}\left( a\right) ,\tau^{-1}\left( b\right) \right) \\
&=\left( \sigma\left( \sigma^{-1}\left( a\right) \right) ,\tau\left( \tau^{-1}\left( b\right) \right) \right)
=\left( a,b\right) .
\end{align*}
Thus,
\[
\left( \sigma^{-1}\times\tau^{-1}\right) \circ\left( \sigma\times\tau\right) =\id
\qquad\text{and}\qquad
\left( \sigma\times\tau\right) \circ\left( \sigma^{-1}\times\tau^{-1}\right) =\id .
\]
Hence $\sigma\times\tau$ is a permutation.

\textbf{(b)} For every $\left( a,b\right) \in U\times V$, we have
\begin{align*}
\left( \left( \sigma\times\id\right) \circ\left( \id\times\tau\right) \right) \left( a,b\right)
&=\left( \sigma\times\id\right) \left( a,\tau\left( b\right) \right) \\
&=\left( \sigma\left( a\right) ,\tau\left( b\right) \right)
=\left( \sigma\times\tau\right) \left( a,b\right) .
\end{align*}
Thus,
$\left( \sigma\times\id\right) \circ\left( \id\times\tau\right) =\sigma\times\tau$.
This proves part \textbf{(b)}.

\textbf{(c)} Set $m=\abs{U}$ and $n=\abs{V}$.
Choose bijections $\alpha : U\to\ive{m}$ and
$\beta : V\to\ive{n}$.

Define a map $\chi : U\times V \to \ive{mn}$ by
\[
\chi\left( u,v\right) =\alpha\left( u\right) +m\left( \beta\left( v\right) -1\right) .
\]
This map is a bijection: indeed, each integer $t\in\ive{mn}$ can be written in
one and only one way in the form
\[
t=i+m\left( j-1\right)
\qquad\text{with }i\in\ive{m}\text{ and }j\in\ive{n},
\]
namely with $j=\left\lceil t/m\right\rceil$ and
$i=t-m\left( j-1\right)$. Thus $t=\chi\left( \alpha^{-1}\left( i\right) ,\beta^{-1}\left( j\right) \right)$.
Hence $\chi$ is indeed a bijection. Also, for each fixed $v\in V$, the subset
$U\times\set{v}$ is sent by $\chi$ onto one consecutive block of $m$ integers%
\footnote{namely, of the $m$ integers
$1+m\tup{\beta\tup{v}-1},\ 2+m\tup{\beta\tup{v}-1},
\ \ldots,\ m\beta\tup{v}$}.

Now consider the permutation $\sigma\times\id$ of $U\times V$. For each fixed
$v\in V$, this permutation leaves the subset $U\times\set{v}$ stable, since
\[
\left( \sigma\times\id\right) \left( u,v\right) =\left( \sigma\left( u\right) ,v\right)
\qquad\text{for every }\left( u,v\right) \in U\times\set{v}.
\]
Moreover, under the identification\footnote{``Identifying'' things
in mathematics generally means finding a bijection between two
sets and pretending -- at least notationally -- that each element
of its domain is the same as its image under this bijection.
Of course, this is always risky, since pretending that different
things are the same is a quick way to contradiction.
Make sure you know what you are doing and be ready to explain
yourself without such shortcuts.
\par
In our specific case, we are considering the bijection
\[
\gamma_v : U\times\set{v}\longrightarrow U,\qquad \left( u,v\right) \mapsto u
\]
for a fixed $v \in V$.
Then, the restriction of the permutation $\sigma\times\id$ to $U\times\set{v}$
is the map $\gamma_v^{-1} \circ \sigma \circ \gamma_v$ (as you
can easily check by comparing how both maps act on a given
element $\tup{u,v}$). Hence, the sign of the former restriction
equals the sign of $\sigma$ (because of the general fact that if
$X$ and $Y$ are two finite sets and $\gamma : X \to Y$ is a
bijection, then, for any permutation $\pi$ of $Y$, the sign of
$\gamma^{-1} \circ \pi \circ \gamma$ equals the sign of $\pi$).
}
\[
U\times\set{v}\longrightarrow U,\qquad \left( u,v\right) \mapsto u,
\]
the restriction of $\sigma\times\id$ to $U\times\set{v}$ becomes exactly the
permutation $\sigma$ of $U$. In this sense, the restriction of $\sigma\times\id$
to $U\times\set{v}$ acts just like $\sigma$.

Therefore, the permutation
\[
\chi\circ\left( \sigma\times\id\right) \circ\chi^{-1}
\]
of $\ive{mn}$ is a product of $n$ permutations with pairwise disjoint
supports\footnote{The \emph{support} of a permutation $\pi$ of a set $X$
is the set of all elements $x \in X$ that are not fixed by $\pi$.
A permutation of $X$ can always be restricted to a permutation of its
support (since it sends elements of its support to other elements of
its support), and moreover, this restriction has the same sign as
$\pi$.},
one on each of the above consecutive blocks of size $m$. Each of these $n$
permutations has sign $\left( -1\right) ^\sigma$. Therefore,
\begin{equation}
\left( -1\right) ^{\sigma\times\id}=\left( \left( -1\right) ^\sigma\right) ^n.
\label{eq.sol.sigmacrosstau.sigmaxid}
\end{equation}

Likewise, define a map $\chi' : U\times V \to \ive{mn}$ by
\[
\chi'\left( u,v\right) =\beta\left( v\right) +n\left( \alpha\left( u\right) -1\right) .
\]
By the same argument as for $\chi$, this map $\chi'$ is a bijection. For each
fixed $u\in U$, the subset $\set{u}\times V$ is sent by $\chi'$ onto one
consecutive block of $n$ integers. Also, the restriction of $\id\times\tau$ to
$\set{u}\times V$ corresponds, under the identification
\[
\set{u}\times V\longrightarrow V,\qquad \left( u,v\right) \mapsto v,
\]
to the permutation $\tau$ of $V$. Hence, by the same argument as above,
\begin{equation}
\left( -1\right) ^{\id\times\tau}=\left( \left( -1\right) ^\tau\right) ^m.
\label{eq.sol.sigmacrosstau.idxtau}
\end{equation}

But part \textbf{(b)} yields
\[
\sigma\times\tau=\left( \sigma\times\id\right) \circ\left( \id\times\tau\right) .
\]
Since sign is multiplicative, we thus obtain
\begin{align*}
\left( -1\right) ^{\sigma\times\tau}
&=\left( -1\right) ^{\sigma\times\id}\cdot\left( -1\right) ^{\id\times\tau} \\
&=\left( \left( -1\right) ^\sigma\right) ^n\left( \left( -1\right) ^\tau\right) ^m
\qquad\left( \text{by \eqref{eq.sol.sigmacrosstau.sigmaxid} and \eqref{eq.sol.sigmacrosstau.idxtau}}\right) \\
&=\left( \left( -1\right) ^\sigma\right) ^{\abs{V}}\left( \left( -1\right) ^\tau\right) ^{\abs{U}}
\qquad  \tup{\text{since $m=\abs{U}$ and $n=\abs{V}$}}.
\end{align*}
This proves part \textbf{(c)}.
\end{proof}

\subsection*{Non-cut vertices I and II}

We recall a few definitions.

\begin{definition}
A \textit{multigraph} means a triple $\left( V,E,\varphi\right) $, where
$V$ and $E$ are finite sets and $\varphi : E \to \powset[2]{V}$.
\end{definition}

\begin{definition}
The \emph{vertex set} of a multigraph $G = \tup{V, E, \varphi}$
means the set $V$. It shall be denoted by $\verts{G}$.
\end{definition}

\begin{definition}
Let $G=\left( V,E,\varphi\right) $ and
$G' =\left( V',E',\varphi'\right) $ be two multigraphs. Then, $G'$ is said
to be a \textit{subgraph} of $G$ if and only if $V'\subseteq V$, $E'\subseteq E$,
and $\varphi'\left( e\right) =\varphi\left( e\right) $ for each $e\in E'$.
\end{definition}

\begin{definition}
If $G=\left( V,E,\varphi\right) $ is a multigraph and $v\in V$, then
$G\setminus v$ denotes the subgraph obtained from $G$ by removing the vertex
$v$ and all edges containing $v$.
\end{definition}

\begin{definition}
Let $G$ be a multigraph. A vertex $v$ of $G$ is called \textit{non-cut} if and
only if $G\setminus v$ is connected or has no vertices.
\end{definition}

\begin{definition}
If $G$ is a connected multigraph and if $v$ and $w$ are two vertices of $G$,
then $d\left( v,w\right) $ denotes the smallest length of a path from $v$ to
$w$.
\end{definition}

\begin{exercise}
\label{exe.graph.non-cut.1.sol}Let $G=\left( V,E,\varphi\right) $ be a connected
multigraph. Let $v\in V$ be any vertex.

\textbf{(a)} Pick any $w\in V$ such that $d\left( v,w\right) $ is maximum
among all $w\in V$. Prove that $w$ is a non-cut vertex of $G$.

\textbf{(b)} Let $n=\abs{V}$. Prove that
\[
\sum_{u\in V} d\left( v,u\right) \leq \dbinom{n}{2} .
\]
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.graph.non-cut.1.sol}.]
\textbf{(a)} We must prove that $G\setminus w$ is connected or has no vertices.
If $\verts{G}=\set{w}$, then $G\setminus w$ has no vertices, and we are done.
Thus, for the rest of this proof, we assume that
$\verts{G}\neq\set{w}$.
Thus, $G$ has more than one vertex; therefore, $w\neq v$
(since $w$ has maximum distance from $v$).

Let $x\in \verts{G}\setminus\set{w}$. Since $G$ is connected, there exists a
shortest path from $v$ to $x$. Fix such a path $P$.

We claim that $P$ does not pass through $w$. Indeed, assume the contrary. Then,
since $x\neq w$, the vertex $w$ is a proper internal vertex of the path $P$.
Therefore, the initial segment of $P$ from $v$ to $w$ is strictly shorter than
$P$, and thus
\[
d\left( v,w\right) < d\left( v,x\right) .
\]
This contradicts the maximality of $d\left( v,w\right) $. Hence our claim is proved.

Thus, the path $P$ avoids $w$; therefore, $P$ is also a path from $v$ to $x$ in
$G\setminus w$. We have proved that every vertex
$x\in \verts{G}\setminus\set{w}$ is path-connected with $v$ inside $G\setminus w$.
Hence any two vertices of $G\setminus w$ are path-connected to each other inside
$G\setminus w$ (namely, through $v$). Therefore, $G\setminus w$ is connected.
So $w$ is a non-cut vertex.

\textbf{(b)} We shall prove part \textbf{(b)} by induction on $n$.

If $n=1$, then $\verts{G}=\set{v}$, so that
\[
\sum_{u\in \verts{G}} d\left( v,u\right) =d\left( v,v\right) =0=\dbinom{1}{2} .
\]
Thus part \textbf{(b)} is proved in this case.

Now assume that $n>1$, and let us prove part \textbf{(b)}. Choose a vertex
$w\in \verts{G}$ such that $d\left( v,w\right) $ is maximum among all
$w\in \verts{G}$. Part \textbf{(a)} shows that $w$ is non-cut. Hence the
multigraph $H=G\setminus w$ is connected. Clearly, $\verts{H}$ has $n-1$
elements.

Now let $u\in \verts{G}\setminus\set{w}$. We claim that every shortest path
from $v$ to $u$ in $G$ avoids $w$.
Indeed, if a shortest path from $v$ to $u$ in $G$ passed
through $w$, then $u\neq w$ would force $w$ to be a proper internal vertex of
this path, and we would obtain $d\left( v,w\right) < d\left( v,u\right) $,
contradicting the maximality of $d\left( v,w\right) $.

Thus, a shortest path from $v$ to $u$ in $G$ is already a path in $H$. Consequently,
\begin{equation}
d_H\left( v,u\right) =d_G\left( v,u\right)
\qquad\text{for every }u\in \verts{G}\setminus\set{w}.
\label{eq.sol.noncut.dist-preserved}
\end{equation}
Here, of course, the subscript indicates in which multigraph the distance is
 taken.

Applying the induction hypothesis to the connected multigraph $H$
(which has $n-1$ vertices and vertex set $\verts{G}\setminus\set{w}$),
we obtain
\[
\sum_{u\in \verts{G}\setminus\set{w}} d_H\left( v,u\right)
\leq \dbinom{n-1}{2}.
\]
Using \eqref{eq.sol.noncut.dist-preserved}, we can rewrite this as
\begin{align}
\sum_{u\in \verts{G}\setminus\set{w}} d_G\left( v,u\right)
\leq \dbinom{n-1}{2}.
\label{eq.sol.noncut.dist-n-1-sum}
\end{align}
Also, since a path cannot visit more
than $n$ vertices, we have $d_G\left( v,w\right) \leq n-1$. Therefore,
\begin{align*}
\sum_{u\in \verts{G}} d_G\left( v,u\right)
&=\sum_{u\in \verts{G}\setminus\set{w}} d_G\left( v,u\right)
+ d_G\left( v,w\right) \\
&\leq \dbinom{n-1}{2}+\left( n-1\right)
\qquad \tup{\text{by $d_G\left( v,w\right) \leq n-1$
and \eqref{eq.sol.noncut.dist-n-1-sum}}} \\
&=\dbinom{n}{2} .
\end{align*}
This proves part \textbf{(b)} and thus completes the solution.
\end{proof}

\begin{exercise}
\label{exe.graph.non-cut.2.sol}Let $G$ be a connected multigraph. Let $H$ be a
connected subgraph of $G$. Prove that the number of non-cut vertices of $H$ is
$\leq$ the number of non-cut vertices of $G$.
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.graph.non-cut.2.sol}.]
For any connected multigraph $K$, let $c\left( K\right) $ denote the number of
non-cut vertices of $K$.

We shall use three simple lemmas:

\begin{lemma}
\label{lem.sol.noncut.adj-vert}
Let $K$ be a connected subgraph of the connected multigraph $G$. Assume
that $\verts{K} \neq \verts{G}$. Then there exists a vertex
$z\in \verts{G}\setminus \verts{K}$ that is adjacent in $G$ to a vertex of $K$.
\end{lemma}

\begin{proof}
Choose any vertex $x\in \verts{G}\setminus \verts{K}$
(this exists since $\verts{K} \neq \verts{G}$). Since $G$
is connected and $\verts{K}\neq\varnothing$ (because $K$ is connected), there
exists a path from $x$ to some vertex of $K$. Among all such paths, choose one
of shortest length:
\[
\mathbf{x}=\tup{x_0,x_1,\ldots,x_{\ell}}.
\]
Then $x_{\ell}\in \verts{K}$ and $x_0=x\notin \verts{K}$. Hence not all vertices
of this path belong to $\verts{K}$. Let $j$ be the largest index such that
$x_j\notin \verts{K}$. Then $j<\ell$ (since $x_\ell \in \verts{K}$),
so that $x_{j+1}\in \verts{K}$ (since $j$ is the largest).
Thus the vertex $z:=x_j$ belongs to $\verts{G}\setminus \verts{K}$ and is
adjacent in $G$ to the vertex $x_{j+1}\in \verts{K}$
(since $x_{j+1}$ follows $x_j$ on the path $\mathbf{x}$).
This proves the lemma.
\end{proof}

\begin{lemma}
\label{lem.sol.noncut.add-edge}Let $K$ be a connected multigraph. Let $K'$ be a
multigraph obtained from $K$ by adding some extra edges but no new vertices.
Then $c\left( K'\right) \geq c\left( K\right) $.
\end{lemma}

\begin{proof}
Let $x$ be a non-cut vertex of $K$. Then $K\setminus x$ is connected or has no
vertices. But $K'\setminus x$ is obtained from $K\setminus x$ merely by adding
some edges. Hence $K'\setminus x$ is again connected or has no vertices.
Therefore $x$ is a non-cut vertex of $K'$. Thus every non-cut vertex of $K$ is
also a non-cut vertex of $K'$, and so
$c\left( K'\right) \geq c\left( K\right) $.
\end{proof}

\begin{lemma}
\label{lem.sol.noncut.add-vertex}Let $K$ be a connected multigraph. Let $K'$ be
a multigraph obtained from $K$ by adjoining one new vertex $z$ together with
a new edge that connects this vertex $z$ with a vertex $w$ of $K$.
Then $c\left( K'\right) \geq c\left( K\right)$.
\end{lemma}

\begin{proof}
If $K$ has only one vertex, then the claim is clear (since $c\tup{K} = 1$
and $c\tup{K'} = 2$).
Thus, from now on, we assume that $K$ has at least two vertices.

The new vertex $z$ is itself non-cut in $K'$, because removing $z$
leaves the connected multigraph $K$.

Now let $x$ be a non-cut vertex of $K$ such that $x \neq w$.
Then, the graph $K \setminus x$ is connected or has no vertices.
Since it cannot have no vertices (because $K$ has at least two
vertices), we thus see that it is connected.

The graph $K' \setminus x$ is obtained from this connected
graph $K \setminus x$ by adding the new vertex $z$ and a new edge
that connects this vertex $z$ with the existing vertex $w$
(here we use $x \neq w$, which ensures that $w$ is really a vertex
of $K \setminus x$).
This shows that $K' \setminus x$ is again connected
(since a connected graph remains connected if we add a new
vertex and join it to an existing vertex).
Hence the vertex $x$ remains non-cut in $K'$.

Forget that we fixed $x$.
We thus have shown that any non-cut vertex $x$ of $K$
satisfying $x \neq w$ remains non-cut in $K'$.
Thus, we cannot lose more than one non-cut vertex when
we go from $K$ to $K'$.
However, we always gain the new non-cut vertex $z$.
Thus,
$c\left( K'\right) \geq c\left( K\right) $.
\end{proof}

Now we solve the exercise.
We must prove that $c\tup{H} \leq c\tup{G}$,
that is, $c\tup{G} \geq c\tup{H}$.

If $\verts{G}=\verts{H}$, then $G$ is obtained from $H$ merely by adding
edges. In this case, Lemma~\ref{lem.sol.noncut.add-edge}
immediately yields
$c\left( G\right) \geq c\left( H\right) $, and we are done.

Thus we may assume that $\verts{G}\neq \verts{H}$.
We shall now recursively construct a sequence
$\tup{K_0, K_1, \ldots, K_m}$ of connected subgraphs
of $G$ such that $K_0 = H$ and $\verts{K_m} = \verts{G}$,
and such that
each $K_i$ (for $i>0$) is obtained from $K_{i-1}$ by
adjoining a new vertex and a new edge that joins this
new vertex with an existing vertex of $K_{i-1}$.

Set $K_0=H$. If $K_i$ has already been constructed and satisfies
$\verts{K_i}\neq\verts{G}$, then
Lemma~\ref{lem.sol.noncut.adj-vert} (applied to $K=K_i$)
shows that there exists a vertex
$z\in \verts{G}\setminus \verts{K_i}$ that is adjacent in
$G$ to a vertex of $K_i$.
Let $K_{i+1}$ be the multigraph obtained from $K_i$ by adjoining the new vertex
$z$ together with an edge of $G$ joining $z$ to a vertex of $K_i$.
Then, $K_{i+1}$ is connected. Thus Lemma~\ref{lem.sol.noncut.add-vertex}
yields
\[
c\left( K_{i+1}\right) \geq c\left( K_i\right) .
\]

Repeating this process, we eventually obtain a connected spanning subgraph $K_m$
of $G$ having the same vertex set as $G$, and satisfying
\begin{equation}
c\tup{K_m} \geq c\tup{K_{m-1}} \geq c\tup{K_{m-2}}
\geq \cdots \geq c\tup{K_0} = c\tup{H}.
\label{eq.sol.noncut.2.middle}
\end{equation}

Finally, the multigraph $G$ is obtained from $K_m$ by adding further edges and no
new vertices (since $K_m$ is a spanning subgraph of $G$).
Hence Lemma~\ref{lem.sol.noncut.add-edge} yields
\begin{equation}
c\left( G\right) \geq c\left( K_m\right) .
\label{eq.sol.noncut.2.finalstep}
\end{equation}
Combining \eqref{eq.sol.noncut.2.middle} with
\eqref{eq.sol.noncut.2.finalstep}, we find
\[
c\left( G\right) \geq c\left( H\right) .
\]
In other words, the number of non-cut vertices of $H$ is at most the number of
non-cut vertices of $G$. This proves Exercise \ref{exe.graph.non-cut.2.sol}.
\end{proof}

\subsection*{When do transpositions generate all permutations?}

\begin{exercise}
\label{exe.graph.transpositions-gen.sol}Let $G=\left( V,E,\varphi\right) $ be a
connected multigraph.

For each $e=\set{u,v}\in\powset[2]{V}$, let $t_e$ be the permutation of $V$
that switches $u$ with $v$ and leaves all other elements unchanged.

An $E$-\textit{transposition} means a permutation of the form $t_e$ for some
$e\in\varphi\left( E\right) $.

Prove that every permutation of $V$ can be written as a composition of some
$E$-transpositions.
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.graph.transpositions-gen.sol}.]
Given two distinct elements $u,v \in V$,
the permutation $t_{\set{u,v}}$
(that is, the transposition of $V$ that swaps $u$ with $v$)
shall be denoted $t_{u,v}$.
We first prove two lemmas.

\begin{lemma}
\label{lem.sol.transpositions.3}Let $u,v,w$ be three distinct
elements of $V$. Then,
\[
t_{u,v} \circ t_{v,w} \circ t_{u,v} = t_{u,w}.
\]
\end{lemma}

\begin{proof}
This can be shown by directly verifying that both sides
send $u,v,w$ to $w,v,u$, respectively, while leaving all other
elements of $V$ fixed (since each of the transpositions
$t_{u,v}, \ t_{v,w}, \ t_{u,w}$ leaves these other elements
fixed).
\end{proof}

\begin{lemma}
\label{lem.sol.transpositions.path}Let $k$ be a positive integer, and let
$v_0,v_1,\ldots,v_k$
be $k+1$ distinct elements of the set $V$.
For each $i\in\set{1,2,\ldots,k}$, set
$e_i = \set{v_{i-1},v_i}$. Then
\begin{equation}
t_{v_0,v_k}
= t_{e_1}\circ t_{e_2}\circ\cdots\circ t_{e_{k-1}}\circ t_{e_k}
\circ t_{e_{k-1}}\circ\cdots\circ t_{e_2}\circ t_{e_1}
\label{eq.sol.transpositions.path}
\end{equation}
(where the subscripts on the right hand side are $e_1, e_2, \ldots, e_k$
followed by $e_{k-1}, e_{k-2}, \ldots, e_1$).
\end{lemma}

\begin{proof}
We shall prove Lemma~\ref{lem.sol.transpositions.path} by induction on $k$.

The base case is $k=1$. In this case, \eqref{eq.sol.transpositions.path}
reduces to $t_{v_0,v_1} = t_{e_1}$, which is true because
$e_1 = \set{v_0,v_1}$.

Now let $k>1$, and assume that Lemma~\ref{lem.sol.transpositions.path} has
already been proved for $k-1$ in place of $k$.
Applying this induction hypothesis to the $k$ distinct elements
$v_1,v_2,\ldots,v_k$, we obtain
\begin{align}
t_{v_1, v_k} = t_{e_2} \circ t_{e_3} \circ \cdots \circ
t_{e_{k-1}}\circ t_{e_k} \circ t_{e_{k-1}} \circ \cdots \circ
t_{e_3} \circ t_{e_2}.
\label{eq.sol.transpositions-gen.4}
\end{align}
Now, $e_1 = \set{v_0, v_1}$, so that
$t_{e_1} = t_{v_0, v_1}$. Furthermore,
\begin{align*}
&t_{e_1}\circ t_{e_2}\circ\cdots\circ t_{e_{k-1}}\circ t_{e_k}
\circ t_{e_{k-1}}\circ\cdots\circ t_{e_2}\circ t_{e_1} \\
&= \underbrace{t_{e_1}}_{=t_{v_0, v_1}} \circ
\underbrace{\tup{t_{e_2} \circ t_{e_3} \circ \cdots \circ
t_{e_{k-1}}\circ t_{e_k} \circ t_{e_{k-1}} \circ \cdots \circ
t_{e_3} \circ t_{e_2}}}_{\substack{= t_{v_1, v_k} \\
\text{(by \eqref{eq.sol.transpositions-gen.4})}}}
\circ \underbrace{t_{e_1}}_{=t_{v_0, v_1}} \\
&= t_{v_0, v_1} \circ t_{v_1, v_k} \circ t_{v_0, v_1}
= t_{v_0, v_k}
\end{align*}
(by Lemma~\ref{lem.sol.transpositions.3}, applied to $u=v_0$,
$v=v_1$ and $w=v_k$).
Thus \eqref{eq.sol.transpositions.path} is proved for the present $k$. This
completes the induction.
\end{proof}

We next show that every transposition $t_{a,b}$ with
$a,b\in V$ can be written as a composition of $E$-transpositions.

Indeed, let $a,b\in V$ be two distinct vertices of $G$.
Since $G$ is connected, there exists a path
$\tup{a=v_0,v_1,\ldots,v_k=b}$
in $G$ from $a$ to $b$. Since this is a path, the vertices
$v_0,v_1,\ldots,v_k$ are distinct. For every $i\in\set{1,2,\ldots,k}$, set
$e_i=\set{v_{i-1},v_i}$. Then each $e_i$ belongs to $\varphi\left( E\right) $
(since $v_{i-1}$ and $v_i$ are consecutive vertices of our path
and thus adjacent in $G$),
so that $t_{e_i}$ is an $E$-transposition.
Thus, Lemma~\ref{lem.sol.transpositions.path}
shows that $t_{v_0,v_k}$ is a composition of
$E$-transpositions.
In other words, $t_{a,b}$ is a composition of
$E$-transpositions (since $a=v_0$ and $b=v_k$).

Thus we have shown that every transposition of the set
$V$ is a composition of $E$-transpositions.

But every permutation of a finite set can be written as
a composition of transpositions\footnote{See, e.g.,
\cite[Exercise 5.15 \textbf{(b)}]{detnotes} for the proof
of this fact}.
Hence, every permutation of $V$ can be written
as a composition of transpositions of $V$,
while each of the latter transpositions,
in turn, is a composition of $E$-transpositions
(by the preceding paragraph).
Thus, every permutation of $V$ can be written
as a composition of $E$-transpositions.
This is exactly what had to be proved.
\end{proof}

\subsection*{Watersheds in digraphs}

The following exercise is a result known as the finite case of
\emph{Newman's lemma} or \emph{diamond lemma}:

\begin{exercise}
\label{exe.confluence.diamond.sol}Let $D$ be a finite multidigraph having no
cycles. A vertex $v$ of $D$ is called a \textit{sink} if there is no arc of $D$
with source $v$.

If $u$ and $v$ are vertices of $D$, then:
\begin{itemize}
\item we write $u\longrightarrow v$ if and only if $D$ has an arc with source
$u$ and target $v$;
\item we write $u\overset{\ast}{\longrightarrow}v$ if and only if $D$ has a
path from $u$ to $v$.
\end{itemize}

Assume that the following no-watershed condition holds:

\begin{quote}
For any three vertices $u$, $v$ and $w$ of $D$ satisfying $u\longrightarrow v$
and $u\longrightarrow w$, there exists a vertex $t$ of $D$ such that
$v\overset{\ast}{\longrightarrow}t$ and $w\overset{\ast}{\longrightarrow}t$.
\end{quote}

Prove that for each vertex $p$ of $D$, there exists a \textbf{unique} sink $q$
of $D$ such that $p\overset{\ast}{\longrightarrow}q$.
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.confluence.diamond.sol}.]
For each vertex $x$ of $D$, let $h\left( x\right) $ be the length of a longest
path starting at $x$. This number is well-defined,
since any path of $D$ has length at most $\abs{\verts{D}}-1$
(and of course, at least one path starting at $x$ exists:
the trivial path $\tup{x}$).
We note that $D$ has no cycles, so each walk of $D$ is a path
(since otherwise, it would contain a cycle).
Thus, the paths of $D$ are the same as the walks of $D$.
% indeed, since $D$ has no
% directed cycles, no directed path can visit the same vertex twice; thus every
% directed path starting at $x$ has length at most $\abs{\verts{D}}-1$. Since $D$
% has only finitely many vertices, there are only finitely many possible lengths
% of such paths, so a largest one exists.

The relation $\overset{\ast}{\longrightarrow}$ on the vertex
set of $D$ is called \emph{reachability}:
We say that a vertex $q$ is \emph{reachable} from a vertex
$p$ if $p \overset{\ast}{\longrightarrow} q$.

We note that if $u \longrightarrow v$ is an arc of $D$, then
\begin{align}
h\tup{u} > h\tup{v}.
\label{eq.confluence.diamond.sol.huhv}
\end{align}
(Indeed, each path starting at $v$ can be extended to a path
starting at $u$ by adding this arc $u \longrightarrow v$
at its front (since any walk of $D$ is a path).
Of course, the latter path is longer than the
former. Thus, whatever path exists starting at $v$, there
will exist a longer path starting at $u$.
Thus, $h\tup{u} > h\tup{v}$. This proves
\eqref{eq.confluence.diamond.sol.huhv}.)

Thus, we can see that if $u, v$ are two vertices of $D$
such that $u \overset{\ast}{\longrightarrow} v$, then
\begin{align}
h\tup{u} \geq h\tup{v}.
\label{eq.confluence.diamond.sol.huhv2}
\end{align}
(Indeed, $u \overset{\ast}{\longrightarrow} v$ means that
there is a path from $u$ to $v$ in $D$, and then
\eqref{eq.confluence.diamond.sol.huhv} shows that the
vertices $p_0, p_1, \ldots, p_k$ of this path satisfy
$h\tup{u} = h\tup{p_0} > h\tup{p_1} > h\tup{p_2}
> \cdots > h\tup{p_k} = h\tup{v}$. This yields
\eqref{eq.confluence.diamond.sol.huhv2}.)

We must prove the following statement:

\begin{quote}
\textit{Statement A:}
Let $p$ be a vertex of $D$.
Then, there exists a unique sink $q$ such that
$p\overset{\ast}{\longrightarrow}q$.
\end{quote}

\begin{proof}[Proof of Statement A]
We shall prove Statement A by strong
induction on $h\tup{p} $:

\textit{Base case:} Assume that $h\tup{p} =0$.
Then there is no path of positive length starting at $p$,
hence no walk of positive length starting at $p$ either
(since paths are the same as walks in $D$).
Thus, there is no arc with source $p$. Hence $p$ itself
is a sink. Clearly, $p\overset{\ast}{\longrightarrow}p$,
and $p$ is the unique sink reachable from $p$
(for lack of arcs). So Statement A is proved in this case.

\textit{Induction step:} Let $N>0$, and assume that Statement A is already
proved for every vertex $p$ with $h\tup{p} <N$. Now let $p$ be a vertex
with $h\tup{p} =N$. We must prove Statement A for $p$.

Since $N>0$, the vertex $p$ is not a sink.
Hence there exists at least one arc $p\longrightarrow r$.

Consider this arc and its target $r$.
Thus, \eqref{eq.confluence.diamond.sol.huhv} shows that
$h\tup{p} > h\tup{r}$.
Therefore, $h\tup{r} <h\tup{p} =N$,
so that we can apply the induction hypothesis to $r$
instead of $p$, and conclude
that there exists a unique sink reachable from $r$.
Let us denote this sink by $q_r$.

We now claim that the sink $q_r$ does not depend on
the chosen outgoing arc $p\longrightarrow r$.
Indeed, let $p\longrightarrow r$ and $p\longrightarrow s$ be two outgoing arcs
from $p$. By the no-watershed condition, there exists a vertex $t$ such that
$r\overset{\ast}{\longrightarrow}t$ and $s\overset{\ast}{\longrightarrow}t$.
Then, \eqref{eq.confluence.diamond.sol.huhv2}
shows $h\tup{r} \geq h\tup{t}$,
so that $h\tup{t} \leq h\tup{r} < N$,
and therefore the induction hypothesis yields
that there is a unique sink reachable from $t$;
call it $q_t$.

Now $r\overset{\ast}{\longrightarrow}t\overset{\ast}{\longrightarrow}q_t$,
so $q_t$ is a sink reachable from $r$. But recall that
there is only one sink reachable from $r$, namely $q_r$. Hence $q_r=q_t$.
Likewise, $q_s=q_t$, where $q_s$ denotes the unique sink reachable from $s$
(this exists for the same reason as $q_r$). Therefore $q_r=q_s$.
This proves our claim that $q_r$ does not depend on $r$.

Thus all outgoing neighbors of $p$ lead to the same sink. Let $q$ denote this
common sink.
Then $p\overset{\ast}{\longrightarrow}q$, because $p$ has an outgoing arc to
some vertex $r$, and then $r\overset{\ast}{\longrightarrow}q$.

It remains to prove uniqueness (i.e., that $q$ is the only sink reachable
from $p$). Let $q'$ be any sink satisfying
$p\overset{\ast}{\longrightarrow}q'$. Thus, $q' \neq p$, since $p$ is not a sink.
Hence the path from $p$ to $q'$ has positive length,
so it begins with some arc $p\longrightarrow r$. Then $q'$ is a sink reachable
from $r$. But by the induction hypothesis, the unique sink reachable from $r$ is
$q_r=q$. Thus $q'=q$.

So $q$ is the unique sink reachable from $p$. This completes the induction step,
and therefore the proof of Statement A.
\end{proof}

Thus, the exercise is solved.
\end{proof}

\subsection*{Acyclic orientations and source pushing}

We next recall the relevant definitions.

\begin{definition}
Let $G=\left( V,E,\psi\right) $ be a multigraph.

\textbf{(a)} An \textit{orientation} of $G$ is a map $\phi : E \to V\times V$
such that, whenever $\phi\left( e\right) =\left( u,v\right) $, we have
$\psi\left( e\right) =\set{u,v}$.
Given such an orientation $\phi$ and an edge $e \in E$, we say
that $e$ is \emph{oriented from $u$ toward $v$} in $\phi$,
where $\tup{u,v} = \phi\tup{e}$.
Each orientation $\phi$ gives rise to
a multidigraph $\tup{V, E, \phi}$, which will often be called $\phi$
again (so, e.g., a ``cycle in $\phi$'' means a cycle in this multidigraph).

\textbf{(b)} An orientation $\phi$ is called \textit{acyclic} if the
multidigraph $\left( V,E,\phi\right) $ has no directed cycles.
\end{definition}

\begin{definition}
Let $G=\left( V,E,\psi\right) $ be a multigraph, and let $\phi$ be an orientation
of $G$.

A vertex $v\in V$ is called a \textit{source} of $\phi$ if no arc of the
multidigraph $\left( V,E,\phi\right) $ has target $v$.

If $v$ is a source of $\phi$, then the orientation obtained from $\phi$ by
\textit{pushing the source $v$} is the orientation $\phi'$ defined by the following rules:
\begin{itemize}
\item if $e\in E$ satisfies $v\in\psi\left( e\right) $ and
$\phi\left( e\right) =\left( v,u\right) $, then $\phi'\left( e\right) =\left( u,v\right) $;
\item all other edges keep their orientations.
\end{itemize}
In other words, we reverse all edges containing $v$.
\end{definition}

\begin{exercise}
\label{exe.aco.source-push.finite.sol}Let $G=\left( V,E,\psi\right) $ be a
connected multigraph. Set $n=\abs{V}$.

Let $\left( \phi_0,\phi_1,\ldots,\phi_k\right) $ be a sequence of orientations
of $G$, and let $\left( v_1,v_2,\ldots,v_k\right) $ be a sequence of vertices of
$G$ such that, for each $i\in\set{1,2,\ldots,k}$, the orientation $\phi_i$ is
obtained from $\phi_{i-1}$ by pushing the source $v_i$.

\textbf{(a)} Prove that if $u$ and $w$ are two mutually adjacent vertices of
$G$, then between any two consecutive appearances of $u$ in the sequence
$\left( v_1,v_2,\ldots,v_k\right) $, the vertex $w$ must appear at least once.

Now assume that $k>\dbinom{n}{2}$.

\textbf{(b)} Prove that each vertex of $G$ appears at least once in the
sequence $\left( v_1,v_2,\ldots,v_k\right) $.

\textbf{(c)} Prove that the orientations $\phi_0,\phi_1,\ldots,\phi_k$ are acyclic.
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.aco.source-push.finite.sol}.]
\textbf{(a)} Let $u$ and $w$ be two mutually adjacent vertices of $G$.
Assume that $u$ appears in the sequence $\left( v_1,v_2,\ldots,v_k\right) $ at
two positions $a<b$ (that is, $v_a = v_b = u$),
and that these two appearances are consecutive,
meaning that $u$ does not appear anywhere between these two positions
(though $u$ might appear at other positions in this sequence).
We must prove that $w$ appears somewhere among
$v_{a+1},v_{a+2},\ldots,v_{b-1}$.

Assume the contrary. Then $w$ does not appear among
$v_{a+1},v_{a+2},\ldots,v_{b-1}$.

When we pass from $\phi_{a-1}$ to $\phi_a$, we push the source $u=v_a$.
Therefore every edge joining $u$ to $w$ is oriented toward $u$ in $\phi_a$.
Now, as long as neither $u$ nor $w$ is pushed, the orientations of these edges
cannot change. Since neither $u$ nor $w$ appears among
$v_{a+1},v_{a+2},\ldots,v_{b-1}$, it follows that in $\phi_{b-1}$ every edge
joining $u$ to $w$ is still oriented toward $u$.

But $u=v_b$ is a source of $\phi_{b-1}$, so no edge of $\phi_{b-1}$ can be
oriented toward $u$. This contradiction proves that $w$ must indeed appear
between the two consecutive appearances of $u$.
This proves part \textbf{(a)}.

\textbf{(b)} Assume that some vertex $v\in V$ does not appear in the sequence
$\left( v_1,v_2,\ldots,v_k\right) $. For each $u\in V$, let $N_u$ denote the
number of times that $u$ appears in this sequence.
Thus, $N_v = 0$ by our assumption. Also,
$k=\sum\limits_{u\in V} N_u$.

We shall prove, by induction on $d\left( v,u\right) $, that
\begin{equation}
N_u\leq d\left( v,u\right)
\qquad\text{for every }u\in V.
\label{eq.sol.sourcepush.countbound}
\end{equation}

\textit{Base case:}
If $d\left( v,u\right) =0$, then $u=v$, so $N_u=N_v=0=d\left( v,u\right) $.
Thus \eqref{eq.sol.sourcepush.countbound} holds in this case.

\textit{Induction step:}
Now let $u\in V$ satisfy $d\left( v,u\right) >0$. Choose a shortest path%
\footnote{We write all paths as lists of vertices, since the edges are
not relevant to our proof.}
\[
\tup{v=x_0,x_1,\ldots,x_{\ell}=u}
\]
from $v$ to $u$. Then $\ell=d\left( v,u\right) $, and the penultimate vertex
$x_{\ell-1}$ is adjacent to $u$. Thus, setting $w=x_{\ell-1}$, we obtain
\[
d\left( v,w\right) =\ell-1
\]
(indeed, $\tup{v=x_0,x_1,\ldots,x_{\ell-1}=w}$ must be a shortest path from
$v$ to $w$, because if there were a shorter path, then we could extend it
by the edge $x_{\ell-1} \to x_\ell$ to obtain a path from $v$ to $u$
shorter than the above path $\tup{v=x_0,x_1,\ldots,x_{\ell}=u}$).

Part \textbf{(a)} shows that between any two consecutive appearances of $u$
in $\left( v_1,v_2,\ldots,v_k\right) $,
there must be an appearance of $w$. Therefore,
\[
N_u\leq N_w+1
\]
(since the $N_u$ appearances of $u$ create $N_u-1$ gaps,
and each of these gaps must contain an appearance of $w$
by the preceding sentence).
But $d\left( v,w\right) = \ell-1 = d\left( v,u\right) -1$
(since $\ell = d\tup{v,u}$), so we can apply the
induction hypothesis to $w$ instead of $u$. We obtain
$N_w \leq d\tup{v,w}$. Hence,
\[
N_u\leq N_w + 1
\leq d\tup{v,w} + 1 = d\tup{v,u}
\]
(since $d\left( v,w\right) = d\left( v,u\right) -1$).
Thus \eqref{eq.sol.sourcepush.countbound} is proved.

Now summing \eqref{eq.sol.sourcepush.countbound} over all $u\in V$, we obtain
\[
k=\sum_{u\in V} N_u\leq \sum_{u\in V} d\left( v,u\right) .
\]
Exercise~\ref{exe.graph.non-cut.1.sol} \textbf{(b)} yields
$\sum_{u\in V} d\left( v,u\right) \leq \dbinom{n}{2}$.
Therefore $k\leq \dbinom{n}{2}$, contradicting the assumption
$k>\dbinom{n}{2}$.

This contradiction shows that no vertex can be missing from the
sequence $\left( v_1,v_2,\ldots,v_k\right) $. Hence
each vertex of $G$ appears at least once.
This proves part \textbf{(b)}.

\textbf{(c)} We first show that, for each $i\in\set{1,2,\ldots,k}$, the
orientations $\phi_{i-1}$ and $\phi_i$ either both have a directed cycle or
both have none.

Indeed, fix such an $i$. Since $\phi_i$ is obtained from $\phi_{i-1}$ by
pushing the source $v_i$, the only edges whose directions change are the edges
that contain $v_i$.

Now, because $v_i$ is a source of $\phi_{i-1}$, no directed cycle in
$\phi_{i-1}$ can pass through $v_i$ or even use an edge containing $v_i$.
Hence every directed cycle in $\phi_{i-1}$ is still a directed cycle in $\phi_i$.

Likewise, after the push, the vertex $v_i$ becomes a sink of $\phi_i$.
Therefore no directed cycle in $\phi_i$ can pass through $v_i$ or use an edge
containing $v_i$. Hence every directed cycle in $\phi_i$ is already a
directed cycle in $\phi_{i-1}$.

Thus, the directed cycles of $\phi_{i-1}$ are precisely the directed cycles
of $\phi_i$.
Repeating this for all $i$, we conclude that the orientations
$\phi_0,\phi_1,\ldots,\phi_k$ all have the same set of directed cycles.
Therefore, either all orientations
$\phi_0,\phi_1,\ldots,\phi_k$ are acyclic, or all of them have a directed
cycle in common.

Assume for the sake of contradiction that they all have a directed cycle
in common.
Choose one such cycle, and let $x$ be a vertex on this cycle.
Then $x$ has an incoming arc in each $\phi_i$ (namely, the arc
of our cycle that enters $x$), so $x$ is not a source of $\phi_i$.
Consequently, $x$ can never be one of the pushed vertices
$v_1,v_2,\ldots,v_k$.

This contradicts part \textbf{(b)}, which says that every vertex of $G$ appears
at least once in the sequence $\left( v_1,v_2,\ldots,v_k\right) $.
Hence our assumption was false. Therefore all orientations
$\phi_0,\phi_1,\ldots,\phi_k$ are acyclic.
\end{proof}

\begin{exercise}
\label{exe.aco.source-push.confluent.sol}Let $G=\left( V,E,\psi\right) $ be a
connected multigraph, and fix a vertex $v\in V$.

If $\phi$ and $\phi'$ are two orientations of $G$, then we write
$\phi\overset{v}{\longrightarrow}\phi'$ if and only if $\phi'$ is obtained from
$\phi$ by repeatedly pushing sources without ever pushing the source $v$.

An orientation $\phi$ is called $v$-\textit{fleeing} if $\phi$ has no source
other than $v$.

Prove that for any orientation $\phi$ of $G$, there is a \textbf{unique}
$v$-fleeing orientation $\phi'$ such that
$\phi\overset{v}{\longrightarrow}\phi'$.
\end{exercise}

\begin{proof}[Solution to Exercise \ref{exe.aco.source-push.confluent.sol}.]
We define a multidigraph $O_v$ as follows:

\begin{itemize}
\item The vertices of $O_v$ are all orientations of $G$.
\item There is an arc $\phi\longrightarrow\psi$ in $O_v$ whenever $\psi$ is
obtained from $\phi$ by pushing a source of $\phi$ different from $v$.
\end{itemize}

Since $G$ has only finitely many edges, it has only finitely many orientations.
Therefore $O_v$ is a finite multidigraph.

We shall prove two facts about $O_v$.

\begin{statement}
\textit{First fact:} The multidigraph $O_v$ has no directed cycles.
\end{statement}

\begin{proof}
Assume the contrary. Then $O_v$ has a directed cycle. Traversing this cycle
repeatedly, we can obtain, for any positive integer $N$, a sequence of more
than $N$ pushes of sources, none of which is equal to $v$, starting from some
orientation of $G$.

Choose $N=\dbinom{\abs{V}}{2}$. Then Exercise
\ref{exe.aco.source-push.finite.sol} \textbf{(b)} shows that in such a long
sequence, every vertex of $G$ must appear at least once among the pushed
sources. In particular, the vertex $v$ must appear. But by construction of
$O_v$, the vertex $v$ is never pushed. This contradiction proves that $O_v$ has
no directed cycles.
\end{proof}

\begin{statement}
\textit{Second fact:} The multidigraph $O_v$ satisfies the no-watershed
condition (see Exercise~\ref{exe.confluence.diamond.sol}).
\end{statement}

\begin{proof}
Indeed, let $\phi$, $\psi$ and $\psi'$ be three vertices of $O_v$ such that
$\phi\longrightarrow\psi$ and $\phi\longrightarrow\psi'$. Then there exist two
sources $x$ and $y$ of $\phi$, both distinct from $v$, such that $\psi$ is
obtained from $\phi$ by pushing $x$, whereas $\psi'$ is obtained from $\phi$ by
pushing $y$.

If $x=y$, then $\psi=\psi'$. In this case, to verify the no-watershed
condition for $O_v$, we can simply take $t=\psi$ itself, since then both
$\psi\overset{\ast}{\longrightarrow}t$ and $\psi'\overset{\ast}{\longrightarrow}t$
hold trivially. Thus we may assume that $x\neq y$.

We claim that $x$ and $y$ are not adjacent in the orientation $\phi$. In fact,
if there were an edge between $x$ and $y$, then this edge would have to be
oriented either from $x$ towards $y$ or from $y$ towards $x$. In the first case, $y$
would not be a source; in the second case, $x$ would not be a source. Both are
impossible. Thus there is no edge between $x$ and $y$.

Hence pushing $x$ does not affect any edge containing $y$, and so $y$
remains a source after $x$ has been pushed. Likewise, $x$ remains a source
after $y$ has been pushed.

Now let $\theta$ be the orientation obtained from $\phi$ by reversing all edges
containing $x$ and all edges containing $y$. This makes sense because no
edge contains both $x$ and $y$ (as $x$ and $y$ are not adjacent).
Pushing $x$ first (in $\phi$) and then pushing $y$ produces exactly this orientation $\theta$; and
pushing $y$ first (in $\phi$) and then $x$ also produces exactly this same orientation
$\theta$. In other words, these two pushes commute. Therefore, if we push $y$
in $\psi$, and if we push $x$ in $\psi'$, we arrive at the same orientation
$\theta$. Thus
\[
\psi\longrightarrow\theta
\qquad\text{and}\qquad
\psi'\longrightarrow\theta
\]
in $O_v$. Therefore
\[
\psi\overset{\ast}{\longrightarrow}\theta
\qquad\text{and}\qquad
\psi'\overset{\ast}{\longrightarrow}\theta
\]
in $O_v$ as well.
Thus, the no-watershed condition holds for $O_v$.
\end{proof}

Now Exercise~\ref{exe.confluence.diamond.sol} applies to the finite acyclic
multidigraph $O_v$. It shows that for any vertex $\phi$ of $O_v$, there is a
unique sink $\phi'$ of $O_v$ such that $\phi\overset{\ast}{\longrightarrow}\phi'$ in $O_v$.
By the definition of the arcs of $O_v$, the relation
$\phi\overset{\ast}{\longrightarrow}\phi'$ in $O_v$ is precisely the relation
$\phi\overset{v}{\longrightarrow}\phi'$ from the statement of the exercise.

Finally, an orientation $\phi'$ is a sink of $O_v$ if and only if no source of
$\phi'$ distinct from $v$ can be pushed. This is equivalent to saying that
$\phi'$ has no source other than $v$, i.e., that $\phi'$ is $v$-fleeing.

Thus, for every orientation $\phi$ of $G$, there is a unique $v$-fleeing
orientation $\phi'$ such that $\phi\overset{v}{\longrightarrow}\phi'$. This
solves the exercise.
\end{proof}

\begin{thebibliography}{999999999}                                                                                        %
\bibitem[Grinbe15]{detnotes}\href{https://arxiv.org/abs/2008.09862v3}{Darij
Grinberg, \textit{Notes on the combinatorial fundamentals of algebra}, 15
September 2022, arXiv:2008.09862v3.}

\end{thebibliography}

\end{document}
