\documentclass[12pt,final,notitlepage,onecolumn]{article}%
\usepackage[all,cmtip]{xy}
\usepackage{lscape}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{hyperref}
\usepackage{amsmath}%
\setcounter{MaxMatrixCols}{30}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{CSTFile=LaTeX article (bright).cst}
%TCIDATA{Created=Sat Mar 27 17:33:36 2004}
%TCIDATA{LastRevised=Wednesday, July 17, 2013 17:22:15}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\newcommand{\id}{\operatorname*{id}}
\def\wt{\operatorname{wt}}
\def\Mat{\operatorname{Mat}}
\def\Id{\operatorname{Id}}
\def\sgn{\operatorname{sgn}}
\def\tail{\operatorname{tail}}
\def\P{\mathcal{P}}
\def\PP{\mathbf{P}}
\def\C{\mathcal{C}}
\def\CC{\mathbf{C}}
\def\F{\mathcal{F}}
\def\FF{\mathbf{F}}
\voffset=-1.5cm
\hoffset=-1cm
\setlength\textheight{22cm}
\setlength\textwidth{14cm}
\begin{document}
\begin{center}
\textbf{Determinants of weighted path matrices}
\textit{Kelli Talaska}
version of 14 February 2012 (\href{https://arxiv.org/abs/1202.3128v1}{\texttt{arXiv:1202.3128v1}})
(archived as \texttt{1202.3128v1.pdf})
\textbf{Errata and comments (Darij Grinberg, 17 July 2013)}
\textbf{[detailed version]}
\bigskip
\end{center}
\subsection{Errata}
\begin{itemize}
\item \textbf{Page 2, before Definition 2.1:} Your notion of a ``path'' in a
graph does \textbf{not} require the path to have no self-intersections (it may
even traverse the same edge twice). Since this is not the usual way how graph
theorists understand the word ``path'', I'd explicitly define it if I were you.
(As far as I understand, your definition of a path is something like this: If
$v$ and $w$ are two vertices of a graph $G$, then a \textit{path} starting at
$v$ and ending at $w$ means a sequence $\left( e_{1},e_{2},...,e_{m}\right)
$ of edges of $G$ satisfying the following two properties:
-- Every $i\in\left\{ 1,2,...,m-1\right\} $ satisfies $\operatorname*{head}%
\left( e_{i}\right) =\operatorname*{tail}\left( e_{i+1}\right) $.
-- If $m\geq1$, then $\operatorname*{tail}\left( e_{1}\right) =v$ and
$\operatorname*{head}\left( e_{m}\right) =w$.
Note that the path is allowed to be the empty sequence; this is why the
``$m\geq1$'' condition in the second property cannot be left out.
I believe that what graph theorists call a ``path'' is what you call a
``self-avoiding path''.)
\item \textbf{Page 3, Definition 2.3:} You write: ``path collections
$\mathbf{P}=\left( P_{1},...,P_{k}\right) $''. At this point it would be very
good to point out that ``collection'' is being used as a synonym for ``set'' here,
and the notation ``$\left( P_{1},...,P_{k}\right) $'' is being used as a
synonym for the notation ``$\left\{ P_{1},P_{2},...,P_{k}\right\} $''.
(Otherwise, the notation ``$\left( P_{1},...,P_{k}\right) $'' looks like an
ordered $k$-tuple, and since the word ``collection'' can mean both a set and a
family, the reader is misled into thinking that path collections are ordered tuples.)
\item \textbf{Page 4, proof of Theorem 2.5:} At the very beginning of this
proof, it would help to add some sentences like the following: ``We write the
set $A$ as $A=\left\{ a_{1},a_{2},...,a_{k}\right\} $ with $a_{1}$, $a_{2}$,
$...$, $a_{k}$ being pairwise distinct. We write the set $B$ as $B=\left\{
b_{1},b_{2},...,b_{k}\right\} $ with $b_{1}$, $b_{2}$, $...$, $b_{k}$ being
pairwise distinct. We identify the set of vertices of $G$ with $\left\{
1,2,...,\left\vert V\right\vert \right\} $ in such a way that $i=v_{i}$ for
every $i\in\left\{ 1,2,...,\left\vert V\right\vert \right\} $''.
\item \textbf{Page 4, proof of Theorem 2.5:} Replace ``$\prod\limits_{1\leq
i\leq k}M_{i,\pi\left( i\right) }$'' by ``$\prod\limits_{1\leq i\leq
k}m_{a_{i},b_{\pi\left( i\right) }}$''.
\item \textbf{Page 5, proof of Theorem 2.5:} You write: ``Choose the smallest
$q$ such that the vertex $\operatorname{tail}(e_{q})$ lies in $\mathbf{C}$ or
in some $P_{i^{\prime}}$ with $i^{\prime}>i$, or $\operatorname{tail}%
(e_{q})=\operatorname{tail}(e_{r})$ for some $r>q$.'' In order for this
definition to make sense, the following convention should be made once and for
all: If $a$ and $b$ are two vertices of the graph $G$, and if $\left(
u_{1},u_{2},...,u_{\ell}\right) $ is a path in $G$, then $\operatorname{tail}%
\left( u_{\ell+1}\right) $ will be understood to mean the vertex $b$
(despite the edge $u_{\ell+1}$ not being defined). This convention has the
consequence that, if $\left( u_{1},u_{2},...,u_{\ell}\right) $ is a path in
$G$, the vertices lying along the path $\left( u_{1},u_{2},...,u_{\ell
}\right) $ are precisely the vertices of the form $\operatorname{tail}\left(
e_{k}\right) $ for some $k\in\left\{ 1,2,...,\ell+1\right\} $.
\item \textbf{Page 5, proof of Theorem 2.5:} Replace ``$P_{i}^{\ast}%
=(e_{1},\ldots,e_{q-1}h_{q^{\prime}}h_{q^{\prime}+1},\ldots,h_{m^{\prime}})$''
by ``$P_{i}^{\ast}=(e_{1},\ldots,e_{q-1},h_{q^{\prime}},h_{q^{\prime}+1}%
,\ldots,h_{m^{\prime}})$'' (there were commas missing).
\item \textbf{Page 6, proof of Theorem 2.5:} You write: ``It is easy to see
that, with this definition, the image $(\mathbf{P}^{\ast},\mathbf{C}^{\ast})$
is again a pair of the required kind, i.e. $\mathbf{P}^{\ast}\in
\mathcal{P}_{A,B}(G)$, $\mathbf{C}^{\ast}\in\mathcal{C}(G)$, and
$(\mathbf{P}^{\ast},\mathbf{C}^{\ast})\notin\mathcal{F}_{A,B}(G)$.'' Here,
``$\mathcal{P}_{A,B}(G)$'' should be replaced by ``$\widetilde{\mathcal{P}}%
_{A,B}(G)$''.
\item \textbf{Page 6, proof of Theorem 2.5:} You prove that $\varphi$ is an
involution in the second case. Your proof of this fact tacitly relies on the
observation that, in the second case, the path $P_{i}^{\ast}$ begins with
$e_{1},e_{2},...,e_{q}$ (so, none of the first $q$ edges of the path $P_{i}$
get removed when passing to $P_{i}^{\ast}$). This is a direct consequence of
the following (easily proven) observation: In the second case, the integer $q$
is smaller than each of $t$ and $u$, and if $t\neq\infty$, it satisfies $q\leq
s$. In my opinion, both of these (arguably very obvious) observations deserve
to be explicitly stated.
\item \textbf{Page 7, proof of Theorem 2.5:} Replace ``its first cycles'' by
``its first cycle''.
\end{itemize}
\subsection{Comment on relation to [Tal08]}
On page 1, you write: ``The involution is adapted from the author's work on
total positivity in Grassmannians [Tal08], in which planar graphs with
directed cycles play a key role, though the analogues of weighted path
matrices are rather different in the Grassmannian setting.''.
In case you aren't already aware of this: I believe Theorem 3.2 of [Tal08]
\textbf{can} be deduced from Theorem 2.5; that is, I have an argument that is
complete up to some combinatorial-geometric considerations about points on a
circle which, knowing that both theorems are valid, should be correct and not
too difficult to prove. Here is a sketch.
Label $I$ as $\left\{ i_{1},i_{2},...,i_{k}\right\} $, and $J$ as $\left\{
j_{1},j_{2},...,j_{k}\right\} $.
I don't think you ever say in [Tal08] what ring you are working over, but
since you are proving identites between formal power series, we can WLOG
assume that the ground ring is $\mathbb{Z}$. Hence, we can WLOG assume that
the ground ring is $\mathbb{C}$. Assume the latter.
An edge will be called a \textit{boundary edge} if it is incident to a
boundary vertex. By performing an isotopy, we can WLOG assume that every
boundary edge is normal to the circle. Assume this.
Since our graph $G$ is perfectly oriented, we can WLOG assume that every walk
is a smooth curve (because at every point, either there is only one outgoing
edge and we need only to adjust the ends of all incoming edges, or there is
only one incoming edge and we need only to adjust the ends of all outgoing
edges). Assume this. (By the way, don't you use such an assumption when you
define the winding index? Or, if a walk can fail to be smooth, how do you tell
apart a rotation by $50^{\circ}$ from a rotation by $310^{\circ}$ ?)
For every walk $w$, let $\Delta\measuredangle\left( w\right) $ denote the
angular increment of the direction of the tangent vector to $w$ at a point on
$w$, as the point traverses $w$ from its beginning to its end. This is a
well-defined angle in $\mathbb{R}$, not just in $\mathbb{R}\diagup\left(
2\pi\mathbb{R}\right) $, due to the smoothness of $w$. Consider edges of $G$
as $1$-edge walks. Then, of course, $\Delta\measuredangle\left( e_{1}%
,e_{2},...,e_{m}\right) =\Delta\measuredangle\left( e_{1}\right)
+\Delta\measuredangle\left( e_{2}\right) +...+\Delta\measuredangle\left(
e_{m}\right) $ whenever $\left( e_{1},e_{2},...,e_{m}\right) $ is a walk.
If a walk $w$ is a cycle, then $\Delta\measuredangle\left( w\right) =2\pi$.
For any path $p$, denote the beginning and the end of $p$ by
$\operatorname*{head}p$ and $\operatorname*{tail}p$, respectively.
Assume the boundary circle to be the unit circle in $\mathbb{C}$. WLOG assume
that $i_{1}=1$.
For any two points $u$ and $v$ on the unit circle, let $\overrightarrow{u,v}$
be the length of the counterclockwise arc from $u$ to $v$ on the circle; this
length is an element of the interval $\left[ 0,2\pi\right) $. Let
$\operatorname*{arc}\left( u,v\right) $ be this arc itself (including the
endpoints). For every point $t$ on the unit circle, let $R\left( t\right) $
be the complex number%
\[
\exp\left( \dfrac{1}{2}i\left( \overrightarrow{1,t}-\pi\right) \right)
=-i\exp\left( \dfrac{1}{2}i\overrightarrow{1,t}\right) =-i\sqrt{-w}%
\]
(where we are taking the square root that has argument $<\pi$). Then, any two
points $u$ and $v$ on the unit circle satisfy%
\begin{equation}
\exp\left( \dfrac{1}{2}i\left( \overrightarrow{u,v}-\pi\right) \right)
=-i\dfrac{R\left( v\right) }{R\left( u\right) }\cdot\left( -1\right)
^{\left[ 1\in\operatorname*{arc}\left( u,v\right) \right] }, \tag{A0}%
\end{equation}
where $\left[ \mathcal{A}\right] $ denotes the truth value of an assertion
$\mathcal{A}$ (that is, $\left\{
\begin{array}
[c]{c}%
1\text{, if }\mathcal{A}\text{ is true;}\\
0\text{, if }\mathcal{A}\text{ is false}%
\end{array}
\right. $).
Now, apply Theorem 2.5 of ``Determinants of weighted path matrices'' to $A=I$
and $B=J$. This yields%
\begin{equation}
\Delta_{I,J}\left( \widetilde{M}\right) =\dfrac{\sum\limits_{\mathbf{F}%
\in\mathcal{F}_{I,J}\left( G\right) }\operatorname*{sgn}\mathbf{F}%
\cdot\operatorname*{wt}\mathbf{F}}{\sum\limits_{\mathbf{C}\in\mathcal{C}%
\left( G\right) }\operatorname*{sgn}\mathbf{C}\cdot\operatorname*{wt}%
\mathbf{C}}, \tag{A1}%
\end{equation}
where $\widetilde{M}$ is the same matrix as $A\left( N\right) $ except with
no $\left( -1\right) ^{s\left( i_{t},j\right) }$ factors and no $\left(
-1\right) ^{\operatorname*{wind}\left( P\right) }$ factors in its entries.
Now, in this formula, for every edge $e$ of $G$, substitute $\exp\left(
\dfrac{i}{2}\Delta\measuredangle\left( e\right) \right) x_{e}$ for $x_{e}$.
As a consequence, the weight of any path, any self-avoiding flow and any cycle
collection gets multiplied by a scalar factor. Let us see what the factors
are. For a cycle collection $\mathbf{C}$, the factor by which
$\operatorname*{wt}\mathbf{C}$ changes is precisely $\left( -1\right)
^{\left\vert \mathbf{C}\right\vert }=\operatorname*{sgn}\mathbf{C}$, whence
the $\operatorname*{sgn}\mathbf{C}$ in the denominator of the right hand side
of (A1) disappears. So the denominator on the right hand side becomes
precisely the one in the formula in Theorem 3.2.
Let us see what happens to the left hand side of (A1). For a walk $w$ from one
boundary vertex to another, the scalar factor by which $\operatorname*{wt}w$
changes is (unless I have made a mistake)%
\begin{align}
& \left( -1\right) ^{\operatorname*{wind}w}\cdot\exp\left( \dfrac{1}%
{2}i\left( \overrightarrow{\operatorname*{tail}w,\operatorname*{head}w}%
-\pi\right) \right) \tag{A2}\\
& =\left( -1\right) ^{\operatorname*{wind}w}\cdot\left( -i\dfrac{R\left(
\operatorname*{head}w\right) }{R\left( \operatorname*{tail}w\right) }%
\cdot\left( -1\right) ^{\left[ 1\in\operatorname*{arc}\left(
\operatorname*{tail}w,\operatorname*{head}w\right) \right] }\right)
\ \ \ \ \ \ \ \ \ \ \left( \text{by (A0)}\right) .\nonumber
\end{align}
Now, before the substitution, $\Delta_{I,J}\left( \widetilde{M}\right) $ was
the determinant of a matrix whose $\left( \alpha,\beta\right) $-th entry is
the weighted sum of all walks from $i_{\alpha}$ to $j_{\beta}$. So, after our
substitution, this $\left( \alpha,\beta\right) $-th entry becomes
\[
-i\dfrac{R\left( i_{\alpha}\right) }{R\left( j_{\beta}\right) }%
\cdot\left( -1\right) ^{\left[ 1\in\operatorname*{arc}\left( i_{\alpha
},j_{\beta}\right) \right] }%
\]
times the weighted \textbf{signed} sum of all walks $w$ from $i_{\alpha}$ to
$j_{\beta}$, with the sign being $\left( -1\right) ^{\operatorname*{wind}w}%
$. The sum being \textbf{signed} is a good sign, because the same sign
$\left( -1\right) ^{\operatorname*{wind}w}$ appears in the $\left(
\alpha,\beta\right) $-th entry of $A\left( N\right) $. But we have to still
get rid of the $-i\dfrac{R\left( i_{\alpha}\right) }{R\left( j_{\beta
}\right) }\cdot\left( -1\right) ^{\left[ 1\in\operatorname*{arc}\left(
i_{\alpha},j_{\beta}\right) \right] }$ factor, and get the $\left(
-1\right) ^{s\left( i_{t},j\right) }$ factor of $A\left( N\right) $.
Let us forget about the $-i\dfrac{R\left( i_{\alpha}\right) }{R\left(
j_{\beta}\right) }$ part of the $-i\dfrac{R\left( i_{\alpha}\right)
}{R\left( j_{\beta}\right) }\cdot\left( -1\right) ^{\left[ 1\in
\operatorname*{arc}\left( i_{\alpha},j_{\beta}\right) \right] }$ factor,
because this part has the form ``something of $\alpha$ divided by something of
$\beta$'' and thus just contributes scalar factors to the determinant. There
remains the $\left( -1\right) ^{\left[ 1\in\operatorname*{arc}\left(
i_{\alpha},j_{\beta}\right) \right] }$ factor, which is not of this form.
For every point $t$ on the unit circle, let $s\left( t\right) $ be the
number of elements of $I$ on $\operatorname*{arc}\left( 1,t\right) $. Then,
we have something like $\left( -1\right) ^{s\left( i_{\alpha},j_{\beta
}\right) }=\left( -1\right) ^{s\left( j_{\beta}\right) -s\left(
j_{\alpha}\right) +\left[ 1\in\operatorname*{arc}\left( i_{\alpha}%
,j_{\beta}\right) \right] }$, with perhaps a correction term in the case
when $j_{\beta}\in I$ (but this case is trivial anyway, since the
corresponding column of $\widetilde{M}$ has only one $1$ and lots of $0$'s).
This, of course, is $\left( -1\right) ^{-s\left( j_{\alpha}\right) }%
\cdot\left( -1\right) ^{s\left( j_{\beta}\right) }\cdot\left( -1\right)
^{\left[ 1\in\operatorname*{arc}\left( i_{\alpha},j_{\beta}\right) \right]
}$, which has the form ``something of $\alpha$ multiplied by something of
$\beta$ multiplied by $\left( -1\right) ^{\left[ 1\in\operatorname*{arc}%
\left( i_{\alpha},j_{\beta}\right) \right] }$ ''. So the $\left( -1\right)
^{\left[ 1\in\operatorname*{arc}\left( i_{\alpha},j_{\beta}\right) \right]
}$ sign we couldn't get rid of in the $\left( \alpha,\beta\right) $-th entry
of $\Delta_{I,J}\left( \widetilde{M}\right) $ is precisely the $\left(
-1\right) ^{s\left( i_{\alpha},j_{\beta}\right) }$ sign in the $\left(
i_{\alpha},j_{\beta}\right) $-th entry of $A\left( N\right) $, up to a
scalar factor that can be pulled out of the determinant. As a consequence,
$\Delta_{I,J}\left( \widetilde{M}\right) $ and $A\left( N\right) $ differ
at most by a rather manageable scalar factor. This means that the left hand
side of (A1) is more or less what we want.
Finally, we need to see what happens to the numerator of the right hand side
of (A1). For any self-avoiding path $p$ from one boundary vertex to another,
the scalar factor by which $\operatorname*{wt}p$ changes under our
substitution is
\begin{align*}
& \underbrace{\left( -1\right) ^{\operatorname*{wind}p}}%
_{\substack{=1\\\text{(since }p\text{ is self-avoiding}\\\text{and thus has
winding number }0\text{)}}}\cdot\exp\left( \dfrac{1}{2}i\left(
\overrightarrow{\operatorname*{tail}p,\operatorname*{head}p}-\pi\right)
\right) \ \ \ \ \ \ \ \ \ \ \left( \text{by (A2)}\right) \\
& =\exp\left( \dfrac{1}{2}i\left( \overrightarrow{\operatorname*{tail}%
p,\operatorname*{head}p}-\pi\right) \right) .
\end{align*}
Thus, for a self-avoiding flow $\mathbf{F}$, the scalar factor by which
$\operatorname*{wt}\mathbf{F}$ changes is
\[
\underbrace{\left( -1\right) ^{\left\vert \mathbf{C}\right\vert }%
}_{=\operatorname*{sgn}\mathbf{C}}\cdot\prod\limits_{\ell=1}^{k}\exp\left(
\dfrac{1}{2}i\left( \overrightarrow{i_{\ell},j_{\sigma\left( \ell\right) }%
}-\pi\right) \right) =\operatorname*{sgn}\mathbf{C\cdot}\prod\limits_{\ell
=1}^{k}\exp\left( \dfrac{1}{2}i\left( \overrightarrow{i_{\ell}%
,j_{\sigma\left( \ell\right) }}-\pi\right) \right) ,
\]
where we write the self-avoiding flow $\mathbf{F}$ as $\left( \mathbf{P}%
,\mathbf{C}\right) $ and where $\sigma$ is the permutation of $\left\{
1,2,...,k\right\} $ corresponding to $\mathbf{P}$ (it would be a bad idea to
call it $\pi$ here...). So our substitution changed the $\operatorname*{sgn}%
\mathbf{F}$ on the right hand side of (A1) into%
\begin{align*}
& \underbrace{\operatorname*{sgn}\mathbf{F}}_{=\operatorname*{sgn}%
\mathbf{P}\cdot\operatorname*{sgn}\mathbf{C}}\cdot\operatorname*{sgn}%
\mathbf{C\cdot}\prod\limits_{\ell=1}^{k}\exp\left( \dfrac{1}{2}i\left(
\overrightarrow{i_{\ell},j_{\sigma\left( \ell\right) }}-\pi\right) \right)
\\
& =\underbrace{\operatorname*{sgn}\mathbf{P}}_{=\operatorname*{sgn}\sigma
}\cdot\underbrace{\operatorname*{sgn}\mathbf{C}\cdot\operatorname*{sgn}%
\mathbf{C}}_{=1}\cdot\prod\limits_{\ell=1}^{k}\exp\left( \dfrac{1}{2}i\left(
\overrightarrow{i_{\ell},j_{\sigma\left( \ell\right) }}-\pi\right) \right)
\\
& =\operatorname*{sgn}\sigma\cdot\prod\limits_{\ell=1}^{k}\exp\left(
\dfrac{1}{2}i\left( \overrightarrow{i_{\ell},j_{\sigma\left( \ell\right) }%
}-\pi\right) \right) .
\end{align*}
Now, with the hindsight of knowing that Theorem 3.2 is true, there is no other
option than this complex number being exactly the scalar factor we got on the
left hand side. Of course, the details of this are going to be ugly -- but
they \textbf{can}, with sufficient will, be filled in; as far as I am
concerned, I am happy knowing that yet another theorem about planar graphs
factors into a theorem about arbitrary graphs and a (messy, admittedly)
argument about planarity.
\end{document}