\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{framed}
\usepackage{amsmath}
\usepackage{comment}
\usepackage{needspace}
\usepackage{xcolor}
\usepackage[breaklinks=true]{hyperref}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{amsthm}
\usepackage{ytableau}
\usepackage{tabu}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Tuesday, June 19, 2018 23:05:50}
%TCIDATA{SuppressPackageManagement}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\theoremstyle{definition}
\newtheorem{theo}{Theorem}[section]
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[theo]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{condition}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{TODO}
\newenvironment{todo}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{conclusion}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{questn}[theo]{Question}
\newenvironment{question}[1][]
{\begin{questn}[#1]\begin{leftbar}}
{\end{leftbar}\end{questn}}
\newtheorem{exmp}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\newenvironment{statement}{\begin{quote}}{\end{quote}}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\fi
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\newenvironment{obsolete}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\excludecomment{obsolete}
\newcommand{\kk}{\mathbf{k}}
\newcommand{\id}{\operatorname{id}}
\newcommand{\ev}{\operatorname{ev}}
\newcommand{\Comp}{\operatorname{Comp}}
\newcommand{\Sym}{\operatorname{Sym}}
\newcommand{\Mat}{\operatorname{M}}
\newcommand{\bk}{\mathbf{k}}
\newcommand{\Nplus}{\mathbb{N}_{+}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\calX}{\mathcal{X}}
\newcommand{\calJ}{\mathcal{J}}
\newcommand{\calS}{\mathcal{S}}
\newcommand{\calQ}{\mathcal{Q}}
\newcommand{\calT}{\mathcal{T}}
\newcommand\arxiv[1]{\href{http://www.arxiv.org/abs/#1}{\texttt{arXiv:#1}}}
\newcommand\arcstr{\ar@/^1pc/}
\newcommand{\arinj}{\ar@{_{(}->}}
\newcommand{\arsurj}{\ar@{->>}}
\newcommand{\arelem}{\ar@{|->}}
\let\sumnonlimits\sum
\let\prodnonlimits\prod
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\setlength\textheight{22.5cm}
\setlength\textwidth{15cm}
\ihead{Errata to ``Notes for Math 740''}
\ohead{\today}
\begin{document}
\begin{center}
\textbf{Notes for Math 740 (Symmetric Functions)}
\textit{Steven V. Sam}
version of 27 April 2018
\url{https://www.math.wisc.edu/~svs/740/notes.pdf}
\textbf{Errata and addenda by Darij Grinberg}
\bigskip
\end{center}
This is a (slightly haphazard) list of corrections and comments I have to the
\textquotedblleft Notes for Math 740 (Symmetric Functions)\textquotedblright.
I will refer to the results appearing in these notes by the numbers under
which they appear in the notes (specifically, in their version linked above).
(I have read most of the notes, minus the geometric parts of \S 5, a part of
\S 8 that got too confusing for me, and \S 9.)
\setcounter{section}{12}
\section{Errata}
\begin{itemize}
\item \textbf{page 1, \S 1.1:} On the last line of page 1, you use the
notation $\Lambda$, but you don't define it until later. It might be better to
first define the homomorphism $\pi_{n}:R\rightarrow\mathbf{Z}\left[
x_{1},x_{2},\ldots,x_{n}\right] $, and only later restrict this $\pi_{n}$ to
$\Lambda$ (once $\Lambda$ is defined).
\item \textbf{page 2, Remark 1.1.1:} \textquotedblleft inverse limit of the
$\Lambda\left( n\right) $\textquotedblright\ $\rightarrow$ \textquotedblleft
inverse limit of the rings $\Lambda\left( n\right) $\textquotedblright.
In fact, $\Lambda$ is the inverse limit of the \textbf{graded} rings
$\Lambda\left( n\right) $ (that is, the inverse limit of the $\Lambda\left(
n\right) $ in the category whose objects are the graded rings and whose
morphisms are the degree-preserving homomorphisms of graded rings). This is
actually important, as you later use it to define the Hall-Littlewood
symmetric function $P_{\lambda}\left( x;t\right) $ on page 53.
\item \textbf{page 3, Example 1.2.3, second bullet point:} \textquotedblleft%
$h_{n}$\textquotedblright\ $\rightarrow$ \textquotedblleft$h_{d}%
$\textquotedblright.
\item \textbf{page 3, Example 1.2.3, third bullet point:} \textquotedblleft%
$e_{n}$\textquotedblright\ $\rightarrow$ \textquotedblleft$e_{d}%
$\textquotedblright.
\item \textbf{page 4, footnote }$^{1}$\textbf{:} After \textquotedblleft In
the \textbf{English convention}\textquotedblright, add \textquotedblleft(which
is the one we will use)\textquotedblright.
\item \textbf{page 5, \S 1:} Before defining the three partial orders, you
need to explain that if $\lambda=\left( \lambda_{1},\lambda_{2}%
,\ldots,\lambda_{k}\right) $ is a partition, then $\lambda_{i}$ is understood
to mean $0$ for each $i>k$. (This is sort-of forced by the second sentence of
\S 1.3, but this might be not explicit enough to clear up reader-side confusion.)
\item \textbf{page 6, \S 2.1:} In the (displayed) formula for $m_{3,2,1}$,
replace \textquotedblleft$x_{i}x_{j}x_{k}$\textquotedblright\ by
\textquotedblleft$x_{i}^{3}x_{j}^{2}x_{k}$\textquotedblright.
\item \textbf{page 7, proof of Theorem 2.2.3:} \textquotedblleft Then there is
$\left( 0,1\right) $-matrix\textquotedblright\ $\rightarrow$
\textquotedblleft Then there is a $\left( 0,1\right) $-matrix $A$%
\textquotedblright.
\item \textbf{page 7, proof of Theorem 2.2.3:} \textquotedblleft as least as
many\textquotedblright\ $\rightarrow$ \textquotedblleft at least as
many\textquotedblright.
\item \textbf{page 7, proof of Theorem 2.3.1:} Directly after (2.3.2), erplace
\textquotedblleft$\prod_{n\geq1}$\textquotedblright\ by \textquotedblleft%
$\prod_{i\geq1}$\textquotedblright. And do the same on the second-to-last-line
of page 7.
\item \textbf{page 8, \S 2.5:} After \textquotedblleft For a partition
$\lambda=\left( \lambda_{1},\ldots,\lambda_{k}\right) $\textquotedblright,
add \textquotedblleft(with all of $\lambda_{1},\ldots,\lambda_{k}$
positive)\textquotedblright. The definition of $p_{\lambda}$ doesn't tolerate
trailing zeroes (unless you set $p_{0}=1$, which is somewhat artificial).
\item \textbf{page 9, proof of Theorem 2.5.1:} \textquotedblleft be
reordering\textquotedblright\ $\rightarrow$ \textquotedblleft by
reordering\textquotedblright.
\item \textbf{page 9, proof of Theorem 2.5.1:} I know how this is proven, but
I must say I don't understand your argument (beginning with \textquotedblleft
For each $\lambda_{j}$ with $j\leq i$\textquotedblright).
\item \textbf{page 11, \S 2.6:} You write: \textquotedblleft we need to work
in two sets of variables $x$ and $y$ and in the ring $\Lambda\otimes\Lambda$
where the $x$'s and $y$'s are separately symmetric\textquotedblright.
I think you want to work in the ring $\mathbf{Z}\left[ \left[ x_{1}%
,x_{2},\ldots,y_{1},y_{2},\ldots\right] \right] $ instead. The ring
$\Lambda\otimes\Lambda$ does not contain \textbf{infinite} sums such as
$\sum_{\lambda}u_{\lambda}\left( x\right) v_{\lambda}\left( y\right) $;
they only exist in its completion, which is a whole new can of worms to open.
Doing it justice would require showing that the $s_{\lambda}\left( x\right)
\otimes s_{\mu}\left( y\right) $ for distinct pairs $\left( \lambda
,\mu\right) $ of partitions are linearly independent in this completion, in
an appropriate sense (i.e., even infinite linear combinations don't vanish).
Meanwhile, in $\mathbf{Z}\left[ \left[ x_{1},x_{2},\ldots,y_{1},y_{2}%
,\ldots\right] \right] $, everything is fairly simple.
\item \textbf{page 11, Lemma 2.6.1:} Be careful with this -- convergence isn't
guaranteed! For example, $\left( 1+s_{\lambda}\right) _{\lambda\text{ is a
partition}}$ is certainly a basis of $\Lambda_{\mathbf{Q}}$, but if you take
both $\left( u_{\lambda}\right) $ and $\left( v_{\lambda}\right) $ to be
this basis, then the sum $\sum_{\lambda}u_{\lambda}\left( x\right)
v_{\lambda}\left( y\right) =\sum_{\lambda}\left( 1+s_{\lambda}\left(
x\right) \right) \left( 1+s_{\lambda}\left( y\right) \right) $ is not
well-defined (it has infinitely many $1$ addends once you expand the
parentheses). The safest way to dispel this problem is to require the bases
$\left( u_{\lambda}\right) $ and $\left( v_{\lambda}\right) $ to be
\textbf{graded} (i.e., for each partition $\lambda$, both $u_{\lambda}$ and
$v_{\lambda}$ should be homogeneous symmetric functions of degree $\left\vert
\lambda\right\vert $). Then, the sum $\sum_{\lambda}u_{\lambda}\left(
x\right) v_{\lambda}\left( y\right) $ converges (in the formal sense).
\item \textbf{page 11, proof of Lemma 2.6.1:} On the first line of this proof,
you write \textquotedblleft$u_{\lambda}=\sum_{\alpha}a_{\lambda,\rho}%
m_{\alpha}$\textquotedblright. Replace \textquotedblleft$a_{\lambda,\rho}%
$\textquotedblright\ by \textquotedblleft$a_{\lambda,\alpha}$%
\textquotedblright\ here.
\item \textbf{page 11, proof of Lemma 2.6.1:} The matrices $A$ and $B^{T}$ are
infinite; thus, it is not immediately clear why $AB^{T}=I$ is equivalent to
$B^{T}A=I$ (and why $AB^{T}$ and $B^{T}A$ are well-defined to begin with). If
you have required the bases $\left( u_{\lambda}\right) $ and $\left(
v_{\lambda}\right) $ to be graded in the lemma, then this is easy to resolve
(just notice that $A$ and $B$ are block-diagonal matrices, with each block
being a finite square matrix corresponding to a certain graded component of
$\Lambda$ or $\Lambda_{\mathbf{Q}}$). This also shows why all sums appearing
in this proof converge.
\item \textbf{page 12, proof of Proposition 2.6.4:} On the third line of the
computation, \textquotedblleft$\exp\left( \dfrac{p_{n}\left( x\right)
p_{n}\left( y\right) }{n}\right) $\textquotedblright\ should be
\textquotedblleft$\exp\left( \sum_{n\geq1}\dfrac{p_{n}\left( x\right)
p_{n}\left( y\right) }{n}\right) $\textquotedblright.
\item \textbf{page 12, proof of Proposition 2.6.4:} On the fourth line of the
computation, \textquotedblleft$\sum_{d\geq0}\dfrac{p_{n}\left( x\right)
^{d}p_{n}\left( y\right) ^{d}}{d!n^{d}}$\textquotedblright\ should be
\textquotedblleft$\prod_{n\geq1}\sum_{d\geq0}\dfrac{p_{n}\left( x\right)
^{d}p_{n}\left( y\right) ^{d}}{d!n^{d}}$\textquotedblright.
\item \textbf{page 12, proof of Corollary 2.6.5:} At the end of the displayed
equation, \textquotedblleft$=\varepsilon_{\lambda}\varepsilon_{\mu}%
\delta_{\lambda,\mu}$\textquotedblright\ should be \textquotedblleft%
$=z_{\lambda}\varepsilon_{\lambda}\varepsilon_{\mu}\delta_{\lambda,\mu}%
$\textquotedblright. Also, remove the period at the end of this equation,
since the sentence goes on after it.
\item \textbf{page 12, proof of Corollary 2.6.5:} \textquotedblleft is the
same as $\delta_{\lambda,\mu}$\textquotedblright\ $\rightarrow$
\textquotedblleft is the same as $z_{\lambda}\delta_{\lambda,\mu}%
$\textquotedblright.
\item \textbf{page 13, \S 2.7:} Add a period at the end of the second
displayed equation of \S 2.7.
\item \textbf{page 14, \S 3.1:} \textquotedblleft A \textbf{semistandard Young
tableaux}\textquotedblright\ $\rightarrow$ \textquotedblleft A
\textbf{semistandard Young tableau}\textquotedblright. Note that
\textquotedblleft tableaux\textquotedblright\ is the plural form.
\item \textbf{page 14, \S 3.1:} It is best to explain what a \textquotedblleft
natural number\textquotedblright\ is. I suspect you don't count $0$ as a
natural number.
\item \textbf{page 14, \S 3.1:} \textquotedblleft The type of a
SSYT\textquotedblright\ $\rightarrow$ \textquotedblleft The \textbf{type} of a
SSYT\textquotedblright\ (this is a definition).
\item \textbf{page 14, \S 3.1:} \textquotedblleft natural numbers of this
Young diagram\textquotedblright\ $\rightarrow$ \textquotedblleft natural
numbers to the boxes of this Young diagram\textquotedblright.
\item \textbf{page 15, proof of Theorem 3.1.4:} \textquotedblleft Let $T$ be a
SSYT of shape $\alpha$\textquotedblright\ $\rightarrow$ \textquotedblleft Let
$T$ be a SSYT of shape $\lambda/\mu$ and type $\alpha$\textquotedblright.
\item \textbf{page 17, proof of Proposition 3.2.2:} \textquotedblleft If not,
then $T_{i+1,j}T_{i,j^{\prime}}$\textquotedblright%
\ should be \textquotedblleft$b=T_{i,j}\leq T_{i,j^{\prime}}$%
\textquotedblright.
\item \textbf{page 17:} Somewhere you should say that your matrix $A$ is
infinite, with both rows and columns indexed by positive integers; but in the
examples, you are only showing a northwestern corner of it that contains all
the nonzero entries.
\item \textbf{page 17:} \textquotedblleft This value gets added to some new
box\textquotedblright\ $\rightarrow$ \textquotedblleft The tableau $P\left(
t+1\right) $ has a new box that $P\left( t\right) $ does not
have\textquotedblright. (Otherwise, what is \textquotedblleft this
value\textquotedblright? It isn't $\left( w_{A}\right) _{2,t+1}$.)
\item \textbf{page 18, proof of Lemma 3.2.5:} You write: \textquotedblleft
i.e., that $\left( w_{A}\right) _{1,k}=\left( w_{A}\right) _{1,k+1}%
$\textquotedblright. But it doesn't suffice to consider only two consecutive
insertion steps here; perhaps a column of $Q$ has two equal values coming from
$\left( w_{A}\right) _{1,k}$ and $\left( w_{A}\right) _{1,k+3}$ ? It is
probably best to argue not by contradiction, but instead say something like
\textquotedblleft We shall prove that if $\left( w_{A}\right) _{1,k}=\left(
w_{A}\right) _{1,k+1}=\cdots=\left( w_{A}\right) _{1,m}$, then the boxes
that get added to $P\left( k-1\right) $ to obtain $P\left( k\right)
,P\left( k+1\right) ,\ldots,P\left( m\right) $ (in this order) move
further and further right (i.e., if $k\leq i1$.)
\item \textbf{page 35, \S 5.2:} The word \textquotedblleft
clear\textquotedblright\ in \textquotedblleft it is clear that this is a
unique way\textquotedblright\ is an exaggeration. This claim is equivalent to
the uniqueness of the row reduced echelon form for a matrix (at least for a
surjective matrix, but I don't think this case is any easier than the general
case), and is one of the harder results in a standard linear algebra course.
\item \textbf{page 35, Example 5.2.1:} \textquotedblleft shorthand for an
arbitrary complex number\textquotedblright\ $\rightarrow$ \textquotedblleft
shorthand for arbitrary complex numbers\textquotedblright.
\item \textbf{page 36, proof of Proposition 5.2.4:} You are only proving
$\bigcup_{\mu\supseteq\lambda}X_{\mu}^{\circ}\subseteq\overline{X_{\lambda
}^{\circ}}$ here. The reverse inclusion also needs to be proven.
\item \textbf{page 36, proof of Proposition 5.2.4:} After \textquotedblleft is
in the closure\textquotedblright, add \textquotedblleft of $X_{\lambda}%
^{\circ}$\textquotedblright.
\item \textbf{page 36:} In the definition of a \textquotedblleft complete
flag\textquotedblright, replace \textquotedblleft$F_{1}\subset F_{2}%
\subset\cdots\subset F_{n-1}\subset V$\textquotedblright\ by \textquotedblleft%
$0=F_{0}\subset F_{1}\subset F_{2}\subset\cdots\subset F_{n-1}\subset F_{n}%
=V$\textquotedblright. You do need $F_{0}$ and $F_{n}$, as you refer to them
several times below.
\item \textbf{page 37, proof of Lemma 5.2.6:} \textquotedblleft where
$\mu_{r-j+1}\geq\lambda_{r-j+1}$\textquotedblright\ $\rightarrow$
\textquotedblleft where $\mu_{j}\geq\lambda_{j}$\textquotedblright\ (since
$i_{j}$ has nothing to do with $\lambda_{r-j+1}$).
\item \textbf{page 37, proof of Lemma 5.2.7:} \textquotedblleft From what
we've shown\textquotedblright\ $\rightarrow$ \textquotedblleft From the
definition\textquotedblright.
\item \textbf{page 37, proof of Lemma 5.2.7:} Replace \textquotedblleft%
$E,F\subset\mathbf{C}^{n}$\textquotedblright\ by \textquotedblleft%
$E,F\subseteq\mathbf{C}^{n}$\textquotedblright\ (since the $\subset$ sign
suggests proper containedness).
\item \textbf{page 38, proof of Theorem 5.3.2:} At the beginning of the proof
of Claim 1, I'd add the following sentence: \textquotedblleft Both $C$ and
$\bigcap_{i=0}^{r}\left( A_{i}+B_{r-i}\right) $, as well as each of the
spaces $A_{i}+B_{r-i}$, are spans of some of the standard basis vectors
$e_{1},e_{2},\ldots,e_{n}$.\textquotedblright\ (This justifies focussing on
the basis vectors $e_{p}$ contained in these spaces when proving their mutual inclusions.)
\item \textbf{page 38, proof of Theorem 5.3.2:} In the proof of Claim 1,
\textquotedblleft Pick $e_{p}\in C_{j}$\textquotedblright\ should be
\textquotedblleft Pick $j\in\left\{ 1,2,\ldots,r\right\} $ and $e_{p}\in
C_{j}$\textquotedblright.
\item \textbf{page 38, proof of Theorem 5.3.2:} In the proof of Claim 1, in
the second displayed equation, \textquotedblleft$n-r+j-\lambda_{j}\leq
p$\textquotedblright\ should be \textquotedblleft$n-r+j-\lambda_{j}\geq
p$\textquotedblright.
\item \textbf{page 38, proof of Theorem 5.3.2:} In the proof of Claim 1, you
write: \textquotedblleft Pick $j$ minimal so that $p\leq n-r+j-\lambda_{j}%
$\textquotedblright. First of all, I'd replace \textquotedblleft Pick
$j$\textquotedblright\ by \textquotedblleft Pick $j\in\left\{ 1,2,\ldots
,r\right\} $\textquotedblright\ here. Also, maybe you should say a few words
about why such a $j$ exists (namely, we have $e_{p}\in\bigcap_{i=0}^{r}\left(
A_{i}+B_{r-i}\right) \subseteq A_{r}+B_{0}=A_{r}$, so that $p\leq
n-r+r-\lambda_{r}$).
\item \textbf{page 39, proof of Theorem 5.3.2:} In the proof of Claim 2,
replace the \textquotedblleft$\subset$\textquotedblright\ sign in
\textquotedblleft$W\subset A_{i}+B_{r-i}$\textquotedblright\ by
\textquotedblleft$\subseteq$\textquotedblright\ (otherwise, it sounds like
\textquotedblleft proper subset\textquotedblright, which you probably don't mean).
\item \textbf{page 39, proof of Theorem 5.3.2:} In the proof of Claim 2,
replace \textquotedblleft$W\subseteq A_{i}+B_{i+1}$ again\textquotedblright%
\ by \textquotedblleft$W\subseteq A_{i}+B_{r-i}$ again\textquotedblright.
\item \textbf{page 39, proof of Theorem 5.3.2:} In Claim 3, it is better to
remove the words \textquotedblleft$\lambda\subseteq\mu$ and\textquotedblright.
After all, you've already assumed that $\lambda_{i}\leq\mu_{i}$ for all $i$
(which means that $\lambda\subseteq\mu$). Better to say \textquotedblleft
Recall that $\lambda\subseteq\mu$ by our above assumption.\textquotedblright.
(If you hadn't assumed $\lambda\subseteq\mu$, then Claim 3 would be false --
for example, $C=C_{1}+\cdots+C_{r}$ is also a direct sum if $\mu_{i}%
<\lambda_{i}$ for all $i$, because in this case all of the $C_{i}$ are $0$.)
\item \textbf{page 39, proof of Theorem 5.3.2:} In the proof of Claim 3, I
suggest replacing \textquotedblleft if and only if $C_{i}\cap C_{j}=0$ for all
$i,j$\textquotedblright\ by \textquotedblleft if and only if no two of the
spaces $C_{1},C_{2},\ldots,C_{r}$ have a basis vector in
common\textquotedblright.
\item \textbf{page 39, proof of Theorem 5.3.2:} The comma before
\textquotedblleft let $c_{i}$ be any nonzero vector\textquotedblright\ should
be a semicolon.
\item \textbf{page 39, proof of Theorem 5.3.2:} After \textquotedblleft If
$W\in X\left( L_{\bullet}\right) _{k}$ as well\textquotedblright, add
\textquotedblleft(where $L_{\bullet}$ is any flag such that $L_{n-k-r+1}%
=L$)\textquotedblright.
\item \textbf{page 39, proof of Theorem 5.3.2:} I am finding the last
paragraph of this proof rather confusing. Why, for example, must the
projection of $c$ to $C_{i}$ be a multiple of $c_{i}$ ? (Why does it lie in
$W$ in the first place?)
\item \textbf{page 40, \S 5.4:} Here, you suddenly start denoting
$\mathbf{Gr}_{r}\left( \mathbf{C}^{n}\right) $ by $\mathbf{Gr}\left(
r,\mathbf{C}^{n}\right) $.
\item \textbf{page 42, proof of Theorem 6.1.1:} After \textquotedblleft Recall
from \S 3.4\textquotedblright, add \textquotedblleft(where our $k$ is taking
the role of the $n$ from \S 3.4)\textquotedblright.
\item \textbf{page 42, proof of Theorem 6.1.1:} In
\[
\sum_{\sigma\in\Sigma_{k}}\operatorname*{sgn}\left( \sigma\right) \dfrac
{n!}{\left( \ell_{1}-k+\sigma\left( 1\right) \right) !\cdots\left(
\lambda_{k}-k+\sigma\left( k\right) \right) !},
\]
replace \textquotedblleft$\left( \lambda_{k}-k+\sigma\left( k\right)
\right) !$\textquotedblright\ by \textquotedblleft$\left( \ell_{k}%
-k+\sigma\left( k\right) \right) !$\textquotedblright.
\item \textbf{page 42, proof of Theorem 6.1.1:} \textquotedblleft the binomial
coefficients make sense\textquotedblright\ $\rightarrow$ \textquotedblleft the
multinomial coefficient makes sense\textquotedblright.
\item \textbf{page 42, proof of Theorem 6.1.1:} After the matrix in the last
displayed equation, add a period.
\item \textbf{page 42, proof of Theorem 6.1.1:} \textquotedblleft and reduce
it to the matrix $a_{\rho}\left( \ell_{1},\ldots,\ell_{k}\right)
$\textquotedblright\ $\rightarrow$ \textquotedblleft and reduce it to the
Vandermonde determinant $a_{\rho}\left( \ell_{1},\ldots,\ell_{k}\right)
$\textquotedblright.
\item \textbf{page 42, proof of Theorem 6.1.1:} \textquotedblleft its hook is
the set\textquotedblright\ $\rightarrow$ \textquotedblleft its \textbf{hook}
is the set\textquotedblright.
\item \textbf{page 42, proof of Theorem 6.1.1:} \textquotedblleft in the
book\textquotedblright\ $\rightarrow$ \textquotedblleft in the
hook\textquotedblright.
\item \textbf{page 43, Example 6.1.3:} After \textquotedblleft in Theorem
6.1.1\textquotedblright, add \textquotedblleft(where we take $k=3$%
)\textquotedblright.
\item \textbf{page 43, proof of Theorem 6.1.2:} \textquotedblleft in the other
boxes\textquotedblright\ $\rightarrow$ \textquotedblleft of the other
boxes\textquotedblright.
\item \textbf{page 44, Theorem 6.2.1:} At the beginning of this theorem, add
\textquotedblleft Let $k\geq\ell\left( \lambda\right) $.
Then\textquotedblright.
\item \textbf{page 44, proof of Theorem 6.2.1:} \textquotedblleft let's us
get\textquotedblright\ $\rightarrow$ \textquotedblleft lets us
get\textquotedblright.
\item \textbf{page 44, proof of Theorem 6.2.1:} Replace \textquotedblleft%
$\dfrac{\det\left( q^{i\left( \lambda_{j}+k-j\right) }\right) _{i,j=1}%
^{k}}{\det\left( q^{i\left( k-j\right) }\right) _{i,j=1}^{k}}%
$\textquotedblright\ by \newline\textquotedblleft$\dfrac{\det\left(
q^{\left( i-1\right) \left( \lambda_{j}+k-j\right) }\right) _{i,j=1}^{k}%
}{\det\left( q^{\left( i-1\right) \left( k-j\right) }\right)
_{i,j=1}^{k}}$\textquotedblright.
\item \textbf{page 44, Corollary 6.2.2:} Have you defined $n\lambda$ ?
\item \textbf{page 44, Theorem 6.2.4:} At the beginning of this theorem, add
\textquotedblleft Let $k\in\mathbf{N}$. Then\textquotedblright.
\item \textbf{page 45, proof of Theorem 6.2.4:} Your argument here works only
when $k\geq\ell\left( \lambda\right) $. The case $k<\ell\left(
\lambda\right) $ requires a different (but simpler) argument: In this case,
$s_{\lambda}\left( 1,\ldots,1\right) =0$ (since there are no semistandard
tableaux of shape $\lambda$ with entries $1,2,\ldots,k$) and also
$\prod_{\left( i,j\right) \in Y\left( \lambda\right) }\dfrac{k+c\left(
i,j\right) }{h\left( i,j\right) }=0$ (since the cell $\left( k+1,1\right)
$ in the $\left( k+1\right) $-st row of $Y\left( \lambda\right) $ yields a
factor of $\dfrac{k+c\left( k+1,1\right) }{h\left( k+1,1\right) }=0$ in
the product).
\item \textbf{page 45, \S 6.3:} It is worth pointing out that the
\textquotedblleft$w_{1}w_{2}\cdots w_{n}$\textquotedblright\ in
\textquotedblleft Let $w=w_{1}w_{2}\cdots w_{n}$\textquotedblright\ is
shorthand for $\left( w_{1},w_{2},\ldots,w_{n}\right) $.
\item \textbf{page 45, \S 6.3:} \textquotedblleft Littlewood--Richardson
tableau\textquotedblright\ should be boldfaced the first time it appears.
\item \textbf{page 46, Remark 6.3.3:} After \textquotedblleft if $\nu/\mu$ is
a horizontal strip\textquotedblright, add \textquotedblleft of size
$d$\textquotedblright.
Same after \textquotedblleft if $\nu/\mu$ is a vertical
strip\textquotedblright.
\item \textbf{page 47, Lemma 7.1:} The comma before \textquotedblleft let
$\mu_{i}$\textquotedblright\ should be a semicolon.
\item \textbf{page 47, proof of Lemma 7.1.1:} \textquotedblleft$\#\left\{
\lambda_{j}\mid\lambda_{j}\geq i\right\} $\textquotedblright\ should be
\textquotedblleft$\#\left\{ j\mid\lambda_{j}\geq i\right\} $%
\textquotedblright\ (if you only count the distinct $\lambda_{j}$, then you
often undercount).
\item \textbf{page 47:} \textquotedblleft number of
submodules\textquotedblright\ $\rightarrow$ \textquotedblleft number of
$\mathbf{Z}$-submodules\textquotedblright\ (or of abelian subgroups).
\item \textbf{page 47, Theorem 7.1.2:} After \textquotedblleft such
that\textquotedblright, add \textquotedblleft every prime $p$
satisfies\textquotedblright.
\item \textbf{page 48, proof of Proposition 7.1.6:} At the beginning of this
proof, add the sentence \textquotedblleft Let $r$ be the length of $\lambda
$\textquotedblright. (This $r$ is used on the third line of the proof.)
\item \textbf{page 48, proof of Proposition 7.1.6:} \textquotedblleft abelian
group of type $\lambda$\textquotedblright\ $\rightarrow$ \textquotedblleft
abelian $p$-group of type $\lambda$\textquotedblright.
\item \textbf{page 48, proof of Proposition 7.1.6:} On the first line of the
proof, \textquotedblleft$N\subset M$\textquotedblright\ $\rightarrow$
\textquotedblleft$N\subseteq M$\textquotedblright.
\item \textbf{page 48, proof of Proposition 7.1.6:} \textquotedblleft Now we
count\textquotedblright\ $\rightarrow$ \textquotedblleft Now we assume that
$\lambda/\mu$ is a vertical strip of size $k$, and we count\textquotedblright.
\item \textbf{page 48, proof of Proposition 7.1.6:} \textquotedblleft
submodules\textquotedblright\ $\rightarrow$ \textquotedblleft
subgroups\textquotedblright\ (twice).
\item \textbf{page 48, proof of Proposition 7.1.6:} The claim
\textquotedblleft$N/N_{i}\cong\left( N+p^{i}M\right) /p^{i}M$%
\textquotedblright\ took me a while to justify. Here is how I prove it: We
have%
\[
N_{i}=N\cap\underbrace{S_{i}}_{=S\cap p^{i}M}=\underbrace{N\cap S}%
_{\substack{=N\\\text{(since }N\subseteq S\text{)}}}\cap p^{i}M=N\cap p^{i}M
\]
and thus%
\[
N/\underbrace{N_{i}}_{=N\cap p^{i}M}=N/\left( N\cap p^{i}M\right) =\left(
N+p^{i}M\right) /p^{i}M
\]
(by the second isomorphism theorem).
\item \textbf{page 48--49, proof of Proposition 7.1.6:} The second display on
page 49 says%
\[
\ell\left( W_{i-1}\right) \ell\left( S_{i-1}/N_{i-1}\right) =\left(
\lambda_{i}^{\dag}-\mu_{i}^{\dag}\right) \left( \sum_{j\geq i}\mu_{j}^{\dag
}-\sum_{j\geq i+1}\lambda_{j}^{\dag}\right) .
\]
It should instead say%
\[
\ell\left( W_{i-1}\right) \ell\left( S_{i}/N_{i}\right) =\left(
\lambda_{i}^{\dag}-\mu_{i}^{\dag}\right) \left( \sum_{j\geq i+1}\mu
_{j}^{\dag}-\sum_{j\geq i+2}\lambda_{j}^{\dag}\right) .
\]
More importantly: The construction of $N$ needs more details. First of all,
you should say that each $i\geq\lambda_{1}$ satisfies $S_{i}=0$, and therefore
$N_{i}$ must necessarily be $0$. It thus remains to construct $N_{i}$ for all
$i\in\left\{ 0,1,\ldots,\lambda_{1}-1\right\} $ (because then, $N$ is
determined by $N=N_{0}$). You intend to do this by recursion in the order of
decreasing $i$. You want to construct them in such a way that for each $i$, we
have $N_{i}\subseteq N_{i-1}$ and $N_{i-1}\cap S_{i}=N_{i}$, and the image of
the canonical map $N_{i-1}/N_{i}\rightarrow S_{i-1}/S_{i}$ (which, by the way,
is injective because of $N_{i-1}\cap S_{i}=N_{i}$) is $W_{i-1}$ (which then
automatically entails $\ell\left( N_{i-1}/N_{i}\right) =\ell\left(
W_{i-1}\right) =\lambda_{i}^{\dag}-\mu_{i}^{\dag}$).
So you fix some positive integer $i$, and assume that $N_{i}$ (and $W_{i-1}$)
are already chosen; now you need to construct $N_{i-1}$. You say that
\textquotedblleft we take any preimages of a basis for $W_{i-1}$ under the map
$S_{i-1}/S_{i}$ and take its span with $N_{i}$\textquotedblright. I am not
convinced that I understand this; different bases might lead to identical
spaces. Instead, I argue as follows:
First, we observe that if $V$ is a finite abelian $p$-group, and if $U$ is a
subgroup of $V$ such that $p\left( V/U\right) =0$, then%
\begin{equation}
\left( \text{the number of complements to }U\text{ in }V\right)
=p^{\ell\left( U\right) \cdot\ell\left( V/U\right) }.
\label{p48.num-comps}%
\end{equation}
\footnote{\textit{Proof of (\ref{p48.num-comps}):} Fix a basis $\left(
b_{1},b_{2},\ldots,b_{k}\right) $ of the $\mathbf{Z}/p$-vector space $V/U$;
then, any complement to $U$ in $V$ has a unique basis $\left( \beta_{1}%
,\beta_{2},\ldots,\beta_{k}\right) $ with the property that the projection of
each $\beta_{i}$ onto $V/U$ is $b_{i}$. Conversely, every $k$-tuple $\left(
\beta_{1},\beta_{2},\ldots,\beta_{k}\right) $ of vectors in $V$ with this
property is a basis of a unique complement to $U$ in $V$. Thus, the number of
complements to $U$ in $V$ equals the number of ways to pick $k$ elements
$\beta_{1},\beta_{2},\ldots,\beta_{k}$ of $V$ such that the projection of each
$\beta_{i}$ onto $V/U$ is $b_{i}$. But the latter number is $\left\vert
U\right\vert ^{k}=p^{\ell\left( U\right) \cdot\ell\left( V/U\right) }$
(since $\left\vert U\right\vert =p^{\ell\left( U\right) }$ and
$k=\ell\left( V/U\right) $).} Using this fact, it is easy to see that if $V$
is a finite abelian $p$-group, and if $U$ and $W$ are two subgroups of $V$
satisfying $W\subseteq U$ and $p\left( V/U\right) =0$, then%
\begin{align}
& \left( \text{the number of subgroups }U^{\prime}\text{ of }V\text{
satisfying }U^{\prime}\cap U=W\text{ and }U^{\prime}+U=V\right) \nonumber\\
& =p^{\ell\left( U/W\right) \cdot\ell\left( V/U\right) }.
\label{p48.num-compsW}%
\end{align}
(Indeed, such subgroups $U^{\prime}$ are in bijection with the complements to
the subgroup $U/W$ in the $p$-group $V/W$; therefore, (\ref{p48.num-compsW})
follows from (\ref{p48.num-comps}).)
Now, we want to choose a subgroup $N_{i-1}$ of $S_{i-1}$ such that
$N_{i}\subseteq N_{i-1}$ and $N_{i-1}\cap S_{i}=N_{i}$ and the image of the
canonical map $N_{i-1}/N_{i}\rightarrow S_{i-1}/S_{i}$ is $W_{i-1}$. Such a
subgroup $N_{i-1}$ will be called a \textit{helpful} subgroup. Note that the
requirement $N_{i}\subseteq N_{i-1}$ in the definition of a helpful subgroup
is redundant, since it follows from $N_{i-1}\cap S_{i}=N_{i}$. Thus, a
subgroup $N_{i-1}$ of $S_{i-1}$ is helpful if and only if it satisfies
$N_{i-1}\cap S_{i}=N_{i}$ and the image of the canonical map $N_{i-1}%
/N_{i}\rightarrow S_{i-1}/S_{i}$ is $W_{i-1}$.
In order to count all helpful subgroups, we first let $\widehat{W}_{i-1}$
denote the preimage of $W_{i-1}$ under the canonical projection $S_{i-1}%
\rightarrow S_{i-1}/S_{i}$. Note that $S_{i}\subseteq\widehat{W}%
_{i-1}\subseteq S_{i-1}$; thus, every subgroup of $\widehat{W}_{i-1}$ is a
subgroup of $S_{i-1}$. Also, from $\widehat{W}_{i-1}\subseteq S_{i-1}$, we
obtain $\widehat{W}_{i-1}/S_{i}\subseteq S_{i-1}/S_{i}$, so that $p\left(
\widehat{W}_{i-1}/S_{i}\right) \subseteq p\left( S_{i-1}/S_{i}\right) =0$
and thus $p\left( \widehat{W}_{i-1}/S_{i}\right) =0$.
But a subgroup $N_{i-1}$ of $S_{i-1}$ satisfies $N_{i-1}+S_{i}=\widehat{W}%
_{i-1}$ if and only if the image of the canonical map $N_{i-1}/N_{i}%
\rightarrow S_{i-1}/S_{i}$ is $W_{i-1}$. Hence, a subgroup $N_{i-1}$ of
$S_{i-1}$ is helpful if and only if it satisfies $N_{i-1}\cap S_{i}=N_{i}$ and
$N_{i-1}+S_{i}=\widehat{W}_{i-1}$. Thus, the helpful subgroups $N_{i-1}$ of
$S_{i-1}$ are precisely those subgroups of $\widehat{W}_{i-1}$ that satisfy
$N_{i-1}\cap S_{i}=N_{i}$ and $N_{i-1}+S_{i}=\widehat{W}_{i-1}$ (indeed, every
helpful subgroup $N_{i-1}$ of $S_{i-1}$ must be a subgroup of $\widehat{W}%
_{i-1}$, since it satisfies $N_{i-1}\subseteq N_{i-1}+S_{i}=\widehat{W}_{i-1}%
$; conversely, every subgroup of $\widehat{W}_{i-1}$ is a subgroup of
$S_{i-1}$). But (\ref{p48.num-compsW}) (applied to $V=\widehat{W}_{i-1}$ and
$U=S_{i}$ and $W=N_{i}$) yields that the number of the latter subgroups is
$p^{\ell\left( S_{i}/N_{i}\right) \cdot\ell\left( \widehat{W}_{i-1}%
/S_{i}\right) }$. Hence, the number of helpful subgroups of $S_{i-1}$ is%
\begin{align*}
p^{\ell\left( S_{i}/N_{i}\right) \cdot\ell\left( \widehat{W}_{i-1}%
/S_{i}\right) } & =p^{\ell\left( S_{i}/N_{i}\right) \cdot\ell\left(
W_{i-1}\right) }\ \ \ \ \ \ \ \ \ \ \left( \text{since }\widehat{W}%
_{i-1}/S_{i}\cong W_{i-1}\right) \\
& =p^{\ell\left( W_{i-1}\right) \ell\left( S_{i}/N_{i}\right) }.
\end{align*}
In other words, the number of ways to choose $N_{i-1}$ is $p^{\ell\left(
W_{i-1}\right) \ell\left( S_{i}/N_{i}\right) }$.
I don't see a way to make this shorter (and it took me 2 hours to figure out)...
\item \textbf{page 49, proof of Proposition 7.1.6:} \textquotedblleft The
binomial coefficient\textquotedblright\ $\rightarrow$ \textquotedblleft The
$p$-binomial coefficient\textquotedblright.
\item \textbf{page 49, proof of Proposition 7.1.6:} Every \textquotedblleft%
$m$\textquotedblright\ in the last paragraph of this proof should be a
\textquotedblleft$k$\textquotedblright.
\item \textbf{page 50, Proposition 7.1.8:} After \textquotedblleft there exist
unique polynomials $g_{\mu,\nu}^{\lambda}\left( t\right) $\textquotedblright%
, add \textquotedblleft$\in\mathbf{Z}\left[ t\right] $ independent of
$p$\textquotedblright.
\item \textbf{page 50, proof of Proposition 7.1.8:} \textquotedblleft such
that $M$ is an abelian $p$-group of type $\mu$\textquotedblright%
\ $\rightarrow$ \textquotedblleft such that $M$ is a fixed abelian $p$-group
of type $\mu$ (chosen once and for all)\textquotedblright.
\item \textbf{page 50, proof of Proposition 7.1.8:} In \textquotedblleft the
change of basis matrix between $u_{\lambda}$ and $v_{\lambda}$ is
lower-unitriangular\textquotedblright, replace \textquotedblleft$v_{\lambda}%
$\textquotedblright\ by \textquotedblleft$v_{\lambda^{\dag}}$%
\textquotedblright.
\item \textbf{page 50, proof of Proposition 7.1.8:} After \textquotedblleft
there exists a polynomial $a_{\lambda,\mu}\left( t\right) $%
\textquotedblright, add \textquotedblleft$\in\mathbf{Z}\left[ t\right] $
independent of $p$\textquotedblright.
\item \textbf{page 50, proof of Proposition 7.1.8:} Replace \textquotedblleft
such that $A_{\lambda,\mu}\left( p\right) =a_{\lambda,\mu}\left( p\right)
$ and $a_{\lambda,\lambda^{\dag}}\left( t\right) =1$\textquotedblright\ by
\textquotedblleft such that $A_{\lambda,\mu}\left( p\right) =a_{\lambda,\mu
}\left( p\right) $ for all primes $p$. Since there are infinitely many
primes, we conclude that $a_{\lambda,\lambda^{\dag}}\left( t\right) =1$
(because $A_{\lambda,\lambda^{\dag}}\left( p\right) =1$ for all primes $p$),
and similarly $a_{\lambda,\mu}\left( t\right) =0$ whenever we don't have
$\mu^{\dag}\geq\lambda$\textquotedblright.
\item \textbf{page 50:} In the definition of the universal Hall algebra,
replace \textquotedblleft$\mathbf{H}=H\otimes\mathbf{Z}\left[ t\right] $
where\textquotedblright\ by \textquotedblleft$\mathbf{H}$, which is
$H\otimes\mathbf{Z}\left[ t\right] $ as a $\mathbf{Z}\left[ t\right]
$-module, but where\textquotedblright. Or, better: \textquotedblleft%
$\mathbf{H}$; this is the $\mathbf{Z}\left[ t\right] $-algebra defined as
the free $\mathbf{Z}\left[ t\right] $-module with basis $u_{\lambda}$ (with
$\lambda$ ranging over all partitions) endowed with the same multiplication
law as $H$ except that\textquotedblright.
\item \textbf{page 51, proof of Theorem 7.1.10:} On the first two lines of
this proof, replace \textquotedblleft by Lemma 7.1.4 and Lemma
7.1.1\textquotedblright\ by \textquotedblleft by Lemma 7.1.4
(b)\textquotedblright. (I don't see where you are using Lemma 7.1.1 here.)
\item \textbf{page 51, proof of Theorem 7.1.10:} On the third line of this
proof, replace \textquotedblleft$\lambda^{\left( i\right) ^{\dag}}%
$\textquotedblright\ by \textquotedblleft$\lambda^{\left( i\right) \dag}%
$\textquotedblright\ (there should not be any nested superscripts here).
\item \textbf{page 51, \S 7.2:} \textquotedblleft This is a polynomial in $t$
which does not\textquotedblright\ $\rightarrow$ \textquotedblleft These are
polynomials in $t$ which do not\textquotedblright.
\item \textbf{page 52, \S 7.2:} In the first display on page 52, add a
\textquotedblleft$\operatorname*{sgn}\left( \sigma\right) $%
\textquotedblright\ factor immediately after the summation sign.
\item \textbf{page 52, \S 7.2:} After \textquotedblleft and of the same
degree\textquotedblright, add \textquotedblleft in the variables $x_{1}%
,\ldots,x_{n}$\textquotedblright.
\item \textbf{page 52, proof of Lemma 7.2.1:} You should say that you are
treating $t$ as a constant here, so that \textquotedblleft coefficient of
$x_{1}^{n-1}x_{2}^{n-2}\cdots x_{n-1}$\textquotedblright\ does not mean that
powers of $t$ get discarded.
\item \textbf{page 52, proof of Lemma 7.2.1:} \textquotedblleft from a
permutation in $\tau\in\Sigma_{n-1}$\textquotedblright\ has an
\textquotedblleft in\textquotedblright\ too much.
\item \textbf{page 52, proof of Lemma 7.2.1:} \textquotedblleft insert
if\textquotedblright\ $\rightarrow$ \textquotedblleft insert
it\textquotedblright.
\item \textbf{page 52, Lemma 7.2.2:} I'd add \textquotedblleft For every
partition $\lambda=\left( \lambda_{1},\lambda_{2},\ldots,\lambda_{n}\right)
$\textquotedblright\ at the beginning of this lemma.
\item \textbf{page 52, proof of Lemma 7.2.2:} In the last displayed equation
on page 52, insert \textquotedblleft$\beta_{\lambda_{1}}\beta_{\lambda_{1}%
-1}\cdots\beta_{0}$\textquotedblright\ immediately after the \textquotedblleft%
$\sum_{\left( \beta_{\lambda_{1}},\ldots,\beta_{0}\right) }$%
\textquotedblright\ sign.
Also, I'd insert an extra step between the middle-hand side and the right-hand
side of this computation (for the sake of clarity), namely%
\[
\left( \prod_{u=0}^{\lambda_{1}}\sum_{\beta\in\Sigma_{m_{u}\left(
\lambda\right) }}\beta_{u}\left( \prod_{\substack{1\leq i\lambda_{j}%
}}\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}}.
\]
\item \textbf{page 53, Proposition 7.2.3:} It is worth adding a fifth claim,
\textbf{(e)}, saying that $P_{\lambda}$ is homogeneous of degree $\left\vert
\lambda\right\vert $ in the variables $x_{1},x_{2},\ldots,x_{n}$. (This is
easy to check from either expression in Lemma 7.2.2; but it's crucial to the
construction of $P_{\lambda}\left( x;t\right) \in\Lambda\left[ t\right] $
later. If the degree of $P_{\lambda}\left( x_{1},\ldots,x_{n};t\right) $
could grow with $n$, then there wouldn't be a $P_{\lambda}\left( x;t\right)
\in\Lambda\left[ t\right] $ that projects down to all of these $P_{\lambda
}\left( x_{1},\ldots,x_{n};t\right) $.)
\item \textbf{page 53, proof of Proposition 7.2.3 (b):} Lemma 7.2.1 yields a
simpler reason why $v_{n}\left( 0\right) =1$.
\item \textbf{page 53, proof of Proposition 7.2.3 (d):} Why is this
\textquotedblleft clear\textquotedblleft?
\item \textbf{page 53:} The definition of the Hall-Littlewood symmetric
function $P_{\lambda}\left( x;t\right) \in\Lambda\left[ t\right] $ has the
consequence that a Hall-Littlewood polynomials $P_{\lambda}\left( x_{1}%
,x_{2},\ldots,x_{n};t\right) $ also becomes defined when $n<\ell\left(
\lambda\right) $ (although Lemma 7.2.2 does not hold in this case). It is
worth mentioning that these polynomials aren't very interesting: Namely, for
every partition $\lambda$ and any $n<\ell\left( \lambda\right) $, we have%
\begin{equation}
P_{\lambda}\left( x_{1},x_{2},\ldots,x_{n};t\right) =0.
\label{p53.degenerate-P}%
\end{equation}
(This is easy to prove: Just notice that $P_{\lambda}\left( x_{1}%
,x_{2},\ldots,x_{\ell\left( \lambda\right) };t\right) $ is a multiple of
$x_{1}x_{2}\cdots x_{\ell\left( \lambda\right) }$ (this follows from either
of the two expressions in Lemma 7.2.2), and thus becomes $0$ when
$x_{\ell\left( \lambda\right) }$ is set to $0$.)
\item \textbf{page 54, proof of Lemma 7.2.7:} At the beginning of this proof,
add \textquotedblleft We WLOG assume that $q$ is a prime
power.\textquotedblright. (Otherwise there is no $\mathbf{F}_{q}$.)
\item \textbf{page 54, Proposition 7.2.8:} \textquotedblleft
If\textquotedblright\ $\rightarrow$ \textquotedblleft If $\lambda/\mu$ is a
vertical strip and\textquotedblright\ at the beginning of this proposition.
Also, add \textquotedblleft Otherwise, $f_{\mu,1^{m}}^{\lambda}\left(
t\right) =0$.\textquotedblright\ at the end.
\item \textbf{page 54, proof of Proposition 7.2.8:} \textquotedblleft in
finitely many variables $n$\textquotedblright\ $\rightarrow$ \textquotedblleft
in finitely many variables $x_{1},x_{2},\ldots,x_{n}$\textquotedblright.
\item \textbf{page 54, proof of Proposition 7.2.8:} \textquotedblleft with
$n\geq\ell\left( \mu\right) +m$\textquotedblright\ $\rightarrow$
\textquotedblleft with $n\geq\left\vert \mu\right\vert +m$\textquotedblright%
\ (at least this is safer; maybe $n\geq\ell\left( \mu\right) +m$ is
sufficient too).
\item \textbf{page 54, proof of Proposition 7.2.8:} Remove the period at the
end of the first displayed equation in this proof.
\item \textbf{page 54, proof of Proposition 7.2.8:} \textquotedblleft If
$X_{i}=\left\{ y_{1},\ldots,y_{r_{i}}\right\} $\textquotedblright\ should be
\textquotedblleft If $X_{i}=\left\{ y_{1},\ldots,y_{m_{i}\left( \mu\right)
}\right\} $\textquotedblright.
\item \textbf{page 54, proof of Proposition 7.2.8:} In the second-to-last
display on page 54, replace \textquotedblleft$\operatorname*{Aut}\left(
\left\{ 1,\ldots,r_{i}\right\} \right) $\textquotedblright\ by
\textquotedblleft$\operatorname*{Aut}\left( \left\{ 1,\ldots,m_{i}\left(
\mu\right) \right\} \right) $\textquotedblright\ (though I'm also wondering
why you don't just say \textquotedblleft$\Sigma_{m_{i}\left( \mu\right) }%
$\textquotedblright).
\item \textbf{pages 54--55, proof of Proposition 7.2.8:} In the last display
on page 54, replace \textquotedblleft$v_{m_{i}\left( \mu\right) }\left(
t\right) $\textquotedblright\ by \textquotedblleft$v_{m_{i}\left(
\mu\right) -r_{i}}\left( t\right) $\textquotedblright. The same typo also
appears 5 times on page 55.
\item \textbf{page 55, proof of Proposition 7.2.8:} The equality%
\begin{align*}
& P_{\mu}\left( x;t\right) e_{r_{0}}\left( X_{0}\right) \cdots e_{r_{k}%
}\left( X_{k}\right) \\
& =\left( \prod_{i=0}^{k}v_{r_{i}}\left( t\right) v_{m_{i}\left(
\mu\right) }\left( t\right) \right) ^{-1}\sum_{\sigma\in\Sigma_{n}}%
\sigma\left( x_{1}^{\lambda\left( \mathbf{r}\right) _{1}}\cdots
x_{n}^{\lambda\left( \mathbf{r}\right) _{n}}\prod_{i\mu_{j}}}\dfrac
{x_{i}-tx_{j}}{x_{i}-x_{j}}\right) \underbrace{e_{m}\left( x_{1}%
,\ldots,x_{n}\right) }_{=\sigma\left( e_{m}\left( x_{1},\ldots
,x_{n}\right) \right) }\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{by the second expression for }P_{\mu
}\left( x;t\right) \text{ in Lemma 7.2.2}\right) \\
& =\sum_{\sigma\in\Sigma_{n}/\Sigma_{n}^{\mu}}\sigma\left( x_{1}^{\mu_{1}%
}\cdots x_{n}^{\mu_{n}}\prod_{\substack{i,j;\\\mu_{i}>\mu_{j}}}\dfrac
{x_{i}-tx_{j}}{x_{i}-x_{j}}\right) \sigma\left( \underbrace{e_{m}\left(
x_{1},\ldots,x_{n}\right) }_{=\sum_{\mathbf{r}=\left( r_{0},\ldots
,r_{k}\right) }e_{r_{0}}\left( X_{0}\right) \cdots e_{r_{k}}\left(
X_{k}\right) }\right) \\
& =\sum_{\mathbf{r}=\left( r_{0},\ldots,r_{k}\right) }\sum_{\sigma\in
\Sigma_{n}/\Sigma_{n}^{\mu}}\sigma\left( x_{1}^{\mu_{1}}\cdots x_{n}^{\mu
_{n}}\prod_{\substack{i,j;\\\mu_{i}>\mu_{j}}}\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}%
}\right) \\
& \ \ \ \ \ \ \ \ \ \ \sigma\left( \underbrace{e_{r_{0}}\left(
X_{0}\right) \cdots e_{r_{k}}\left( X_{k}\right) }_{\substack{=\left(
\prod_{i=0}^{k}v_{r_{i}}\left( t\right) v_{m_{i}\left( \mu\right) -r_{i}%
}\left( t\right) \right) ^{-1}\sum_{\tau\in\Sigma_{n}^{\mu}}\tau\left(
x_{1}^{\lambda\left( \mathbf{r}\right) _{1}-\mu_{1}}\cdots x_{n}%
^{\lambda\left( \mathbf{r}\right) _{n}-\mu_{n}}\prod_{\substack{i\mu_{j}}}\dfrac{x_{i}-tx_{j}%
}{x_{i}-x_{j}}}_{=\tau\left( x_{1}^{\mu_{1}}\cdots x_{n}^{\mu_{n}}%
\prod_{\substack{i,j;\\\mu_{i}>\mu_{j}}}\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}%
}\right) }\right) \\
& \ \ \ \ \ \ \ \ \ \ \sigma\left( \tau\left( x_{1}^{\lambda\left(
\mathbf{r}\right) _{1}-\mu_{1}}\cdots x_{n}^{\lambda\left( \mathbf{r}%
\right) _{n}-\mu_{n}}\prod_{\substack{i\mu_{j}}}\dfrac
{x_{i}-tx_{j}}{x_{i}-x_{j}}x_{1}^{\lambda\left( \mathbf{r}\right) _{1}%
-\mu_{1}}\cdots x_{n}^{\lambda\left( \mathbf{r}\right) _{n}-\mu_{n}}%
\prod_{\substack{i0$. When $r=0$, the
right hand side is $1-t^{2}$, whereas the left hand side is $1$.
\item \textbf{page 58, Lemma 8.1.2:} This only holds for $\lambda
\neq\varnothing$.
\item \textbf{page 58, proof of Lemma 8.1.2:} At the beginning of this proof,
add \textquotedblleft We WLOG assume that $n\geq\ell\left( \lambda\right) $,
since otherwise both sides equal $0$ (as can be easily shown using
(\ref{p53.degenerate-P})).\textquotedblright.
\item \textbf{page 58, proof of Lemma 8.1.2:} \textquotedblleft the leading
coefficient\textquotedblright\ $\rightarrow$ \textquotedblleft the fraction
before the summation sign\textquotedblright.
\item \textbf{page 58, proof of Lemma 8.1.2:} I don't understand where the
last equality sign on page 58 comes from. Here is an argument I would suggest
instead (starting with the first line of the last display on page 58, slightly
modified to make it more obvious):%
\begin{align}
& Q_{\lambda}\left( x_{1},\ldots,x_{n};t\right) \nonumber\\
& =\left( 1-t\right) ^{k-1}\sum_{\sigma\in\Sigma_{n}/\Sigma_{n-k}}%
\sigma_{1}\left( x_{1}^{\lambda_{1}}g_{1}\sigma_{2}\left( x_{2}^{\lambda
_{2}}\cdots x_{k}^{\lambda_{k}}\prod_{i=2}^{k}\prod_{j>i}\dfrac{x_{i}-tx_{j}%
}{x_{i}-x_{j}}\right) \right) \nonumber\\
& =\left( 1-t\right) ^{k-1}\sum_{\sigma_{1}\in\Sigma_{n}/\Sigma_{n-1}%
}\nonumber\\
& \ \ \ \ \ \ \ \ \ \ \sigma_{1}\left( x_{1}^{\lambda_{1}}g_{1}\sum
_{\sigma_{2}\in\Sigma_{n-1}/\Sigma_{n-k}}\sigma_{2}\left( x_{2}^{\lambda_{2}%
}\cdots x_{k}^{\lambda_{k}}\prod_{i=2}^{k}\prod_{j>i}\dfrac{x_{i}-tx_{j}%
}{x_{i}-x_{j}}\right) \right) . \label{p58.lastline.1}%
\end{align}
Let us now agree to treat $\mu$ as the $\left( n-1\right) $-tuple $\left(
\mu_{1},\mu_{2},\ldots,\mu_{n-1}\right) =\left( \lambda_{2},\lambda
_{3},\ldots,\lambda_{n}\right) $. Then, $m_{0}\left( \mu\right) =n-k$. Now,%
\begin{align}
b_{\mu}\left( t\right) & =\prod_{i\geq1}\underbrace{\varphi_{m_{i}\left(
\mu\right) }\left( t\right) }_{=\left( 1-t\right) ^{m_{i}\left(
\mu\right) }\cdot\left[ m_{i}\left( \mu\right) \right] _{t}!}%
=\prod_{i\geq1}\left( \left( 1-t\right) ^{m_{i}\left( \mu\right) }%
\cdot\left[ m_{i}\left( \mu\right) \right] _{t}!\right) \nonumber\\
& =\underbrace{\left( 1-t\right) ^{\sum_{i\geq1}m_{i}\left( \mu\right) }%
}_{=\left( 1-t\right) ^{k-1}}\cdot\underbrace{\prod_{i\geq1}\left[
m_{i}\left( \mu\right) \right] _{t}!}_{=\left( \prod_{i\geq0}\left[
m_{i}\left( \mu\right) \right] _{t}!\right) /\left[ m_{0}\left(
\mu\right) \right] _{t}!}\nonumber\\
& =\left( 1-t\right) ^{k-1}\cdot\left( \prod_{i\geq0}\underbrace{\left[
m_{i}\left( \mu\right) \right] _{t}!}_{=v_{m_{i}\left( \mu\right)
}\left( t\right) }\right) /\underbrace{\left[ m_{0}\left( \mu\right)
\right] _{t}!}_{\substack{=v_{m_{0}\left( \mu\right) }\left( t\right)
=v_{n-k}\left( t\right) \\\text{(since }m_{0}\left( \mu\right)
=n-k\text{)}}}\nonumber\\
& =\left( 1-t\right) ^{k-1}\cdot\underbrace{\left( \prod_{i\geq0}%
v_{m_{i}\left( \mu\right) }\left( t\right) \right) }_{=v_{\mu}\left(
t\right) }/v_{n-k}\left( t\right) =\left( 1-t\right) ^{k-1}\dfrac{v_{\mu
}\left( t\right) }{v_{n-k}\left( t\right) }. \label{p58.lastline.2}%
\end{align}
But every $\sigma_{1}\in\Sigma_{n}/\Sigma_{n-1}$ satisfies%
\begin{align*}
& \sum_{\sigma_{2}\in\Sigma_{n-1}/\Sigma_{n-k}}\sigma_{2}\left(
x_{2}^{\lambda_{2}}\cdots x_{k}^{\lambda_{k}}\prod_{i=2}^{k}\prod_{j>i}%
\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}}\right) \\
& =\sum_{\sigma_{2}\in\Sigma_{n-1}/\Sigma_{n-k}}\sigma_{2}\left(
x_{2}^{\lambda_{2}}\cdots x_{k}^{\lambda_{k}}\prod_{i=2}^{k}\prod_{j>i}%
\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}}\cdot\dfrac{1}{v_{n-k}\left( t\right) }%
\sum_{\tau\in\Sigma_{n-k}}\tau\left( \prod_{k*i}\dfrac{x_{i}-tx_{j}%
}{x_{i}-x_{j}}\right) \cdot\tau\left( \prod_{k**i}\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}}\right) \left(
\prod_{k**i}\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}%
}\right) \text{)}}}\\
& =\dfrac{1}{v_{n-k}\left( t\right) }\sum_{\sigma_{2}\in\Sigma_{n-1}%
/\Sigma_{n-k}}\sum_{\tau\in\Sigma_{n-k}}\\
& \ \ \ \ \ \ \ \ \ \ \left( \sigma_{2}\tau\right) \left( x_{2}%
^{\lambda_{2}}\cdots x_{k}^{\lambda_{k}}\left( \prod_{i=2}^{k}\prod
_{j>i}\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}}\right) \left( \prod_{k**i}\dfrac{x_{i}-tx_{j}}%
{x_{i}-x_{j}}\right) \left( \prod_{k**i}%
\dfrac{x_{i}-tx_{j}}{x_{i}-x_{j}}\right) }_{=\dfrac{v_{\mu}\left( t\right)
}{v_{n-k}\left( t\right) }P_{\mu}\left( x_{2},\ldots,x_{n};t\right)
}\right) \\
& =\underbrace{\left( 1-t\right) ^{k-1}\dfrac{v_{\mu}\left( t\right)
}{v_{n-k}\left( t\right) }}_{\substack{=b_{\mu}\left( t\right) \\\text{(by
(\ref{p58.lastline.2}))}}}\underbrace{\sum_{\sigma_{1}\in\Sigma_{n}%
/\Sigma_{n-1}}\sigma_{1}\left( x_{1}^{\lambda_{1}}g_{1}P_{\mu}\left(
x_{2},\ldots,x_{n};t\right) \right) }_{=\sum_{i=1}^{n}x_{i}^{\lambda_{1}%
}g_{i}P_{\mu}\left( x_{1},\ldots,x_{i-1},x_{i+1},\ldots,x_{n};t\right) }\\
& =b_{\mu^{\prime}}\left( t\right) \sum_{i=1}^{n}x_{i}^{\lambda_{1}}%
g_{i}\underbrace{P_{\mu}\left( x_{1},\ldots,x_{i-1},x_{i+1},\ldots
,x_{n};t\right) }_{\substack{=P_{\mu}^{\left( i\right) }\left(
x_{1},\ldots,x_{n};t\right) \\\text{(by the symmetry of }P_{\mu}\text{ and
by}\\\text{Proposition 7.2.3 (d))}}}\\
& =b_{\mu^{\prime}}\left( t\right) \sum_{i=1}^{n}x_{i}^{\lambda_{1}}%
g_{i}P_{\mu}^{\left( i\right) }\left( x_{1},\ldots,x_{n};t\right)
=\sum_{i=1}^{n}x_{i}^{\lambda_{1}}g_{i}\underbrace{b_{\mu^{\prime}}\left(
t\right) P_{\mu}^{\left( i\right) }\left( x_{1},\ldots,x_{n};t\right)
}_{\substack{=Q_{\mu}^{\left( i\right) }\left( x_{1},\ldots,x_{n};t\right)
\\\text{(since }Q_{\mu}\left( x_{1},\ldots,x_{n};t\right) =b_{\mu}\left(
t\right) P_{\mu}\left( x_{1},\ldots,x_{n};t\right) \text{)}}}\\
& =\sum_{i=1}^{n}x_{i}^{\lambda_{1}}g_{i}Q_{\mu}^{\left( i\right) }\left(
x_{1},\ldots,x_{n};t\right) .
\end{align*}
This completes the proof of Lemma 8.1.2.
\item \textbf{page 59, \textbf{Proposition }8.1.3:} It should be said here
that you are working in the ring%
\[
\Lambda\left[ t\right] \left[ \left[ u_{1},u_{1}^{-1}u_{2},u_{2}^{-1}%
u_{3},u_{3}^{-1}u_{4},\ldots\right] \right]
\]
(so the power series $\underline{Q}\left( u_{i}\right) $ and $F\left(
u_{i}^{-1}u_{j}\right) $ are actually well-defined), and that elements of
$\Lambda\left[ t\right] $ are treated as scalars (so that a coefficient can
include $t$'s and $x_{i}$'s).
\item \textbf{page 59, proof of Proposition 8.1.3:} Strictly speaking, you
need an extra induction base case $\ell\left( \lambda\right) =0$ here.
\item \textbf{page 59, proof of Proposition 8.1.3:} \textquotedblleft and so
this follows from Lemma 8.1.1\textquotedblright\ $\rightarrow$
\textquotedblleft and so this follows from the definition of $\underline{Q}%
\left( u\right) $\textquotedblright.
\item \textbf{page 59, proof of Proposition 8.1.3:} Before \textquotedblleft
By our induction hypothesis\textquotedblright, add the sentence
\textquotedblleft We work with finitely many variables $x_{1},\ldots,x_{n}$
from now on.\textquotedblright.
\item \textbf{page 59, proof of Proposition 8.1.3:} In the third display in
this proof, and every time from there onwards until the end of page 59, you're
using the letter \textquotedblleft$\lambda_{1}$\textquotedblright\ as a
summation index; but it already has a different meaning (it stands for the
first entry of $\lambda$). The summation index should be renamed to avoid this collision.
\item \textcolor{red}{
\textbf{page 59, proof of Proposition 8.1.3:} The second equality sign
in the last display looks suspicious to me: It uses (8.1), but (8.1)
holds only for $r>0$, so we would somehow get an error term from the $\left(
p,m\right) =\left( 0,0\right) $ addend, right? (Here I have renamed your
summation index $\lambda_{1}$ as $p$, so as to avoid the collision mentioned above.)
The third equality sign also feels wrong. I understand that\[
\underline{Q}\left( u_{1}\right) \sum_{m\geq0}f_{m}u_{1}^{-m}=\sum_{m\geq
0}\sum_{p\geq-m}u_{1}^{p}f_{m}q_{p+m}\left( x_{1},\ldots,x_{n};t\right) .
\]
But $\sum_{p\geq-m}$ is not $\sum_{p\geq0}$. Also I'm not sure if $\sum
_{m\geq0}f_{m}u_{1}^{-m}$ is actually well-defined in the ring $\Lambda\left[
t\right] \left[ \left[ u_{1},u_{1}^{-1}u_{2},u_{2}^{-1}u_{3},u_{3}^{-1}u_{4},\ldots\right] \right] $; it is easy to get contradictions when
computing with power series in $u_{1}$ and power series in $u_{1}^{-1}$ at the
same time.
}
\item \textcolor{red}{
\textbf{page 60:} I don't understand the raising operators.
When computing $R_{i_1, j_1} R_{i_2, j_2} q_{\alpha}$, it may happen that
$R_{i_2, j_2} \alpha$ has a negative entry, but
$R_{i_1, j_1} R_{i_2, j_2} q_{\alpha}$ has no negative entries
(specifically, this tends to happen when $j_2 = i_1$).
In this case, should $R_{i_1, j_1} R_{i_2, j_2} q_{\alpha}$ be
understood as $0$ because $R_{i_2, j_2} q_{\alpha} = 0$,
or should it be nonzero?
In other words, should I first have the $R$'s act
formally on the subscripts and only then evaluate to a symmetric
function, or should I apply the $R$ operators one by one?
In the first case, how do you get, e.g., the first displayed equation
in the proof of Theorem 8.1.6 (i.e., why does
$\prod_{i\lambda$\textquotedblright.
\item \textbf{page 61, Proposition 8.2.1:} \textquotedblleft if $\left\langle
u_{\lambda},v_{\lambda}\right\rangle _{t}$\textquotedblright\ $\rightarrow$
\textquotedblleft if $\left\langle u_{\lambda},v_{\mu}\right\rangle _{t}%
$\textquotedblright.
\item \textbf{page 63:} In the first display on page 63, the first
\textquotedblleft$\prod_{i\geq1}$\textquotedblright\ sign should be
\textquotedblleft$\prod_{i=1}^{\ell\left( \lambda\right) }$%
\textquotedblright, since any factors with $i>\ell\left( \lambda\right) $
are undefined (they involve division by $0$). However, the second
\textquotedblleft$\prod_{i\geq1}$\textquotedblright\ sign is more complicated,
since the numerators should be multiplied over all $i\geq1$, whereas the
denominators should only be multiplied over $i\in\left\{ 1,2,\ldots
,\ell\left( \lambda\right) \right\} $. So I suggest replacing the right
hand side by \textquotedblleft$\dfrac{\prod_{i\geq1}\left( m_{i}\left(
\lambda\right) !i^{m_{i}\left( \lambda\right) }\right) }{\prod_{i=1}%
^{\ell\left( \lambda\right) }\left( 1-t^{\lambda_{i}}\right) }%
$\textquotedblright.
\item \textbf{page 64, proof of Lemma 9.1.2:} After \textquotedblleft and
that\textquotedblright, add \textquotedblleft Lemma 2.5.3
yields\textquotedblright.
\item \textbf{page 64, proof of Lemma 9.1.2:} I think it's worth saying a
couple words about why the $q_{r}$ with odd $r$ are algebraically independent, too.
\item \textbf{page 65, proof of Lemma 9.1.4:} \textquotedblleft$\prod_{i\geq
1}\dfrac{1}{1-t^{2i+1}}$\textquotedblright\ should be \textquotedblleft%
$\prod_{i\geq0}\dfrac{1}{1-t^{2i+1}}$\textquotedblright.
\item \textbf{page 65, Proposition 9.1.5:} You're using the notion of a
\textquotedblleft strict partition\textquotedblright, so you should define it.
(Of course, it just means a partition in $\operatorname*{DPar}$, so you may
also avoid using the word altogether.)
\item \textbf{page 65, Proposition 9.1.5:} \textquotedblleft the coefficient
of $q_{\mu}$ is divisible by $2^{\ell\left( \lambda\right) -\ell\left(
\mu\right) }$\textquotedblright\ $\rightarrow$ \textquotedblleft the
coefficient of $q_{\lambda}$ is divisible by $2^{\ell\left( \mu\right)
-\ell\left( \lambda\right) }$\textquotedblright\ (note both changes here).
Alternatively, you can avoid talking about coefficients, by just saying
\textquotedblleft each $q_{\mu}$ can be written as a $\mathbf{Z}$-linear
combination of $2^{\ell\left( \mu\right) -\ell\left( \lambda\right)
}q_{\lambda}$ with $\lambda\in\operatorname*{DPar}$ and $\lambda\geq\mu
$\textquotedblright.
\item \textbf{page 65, proof of Proposition 9.1.5:} After \textquotedblleft
with the required divisibility condition\textquotedblright, add
\textquotedblleft and $\lambda\geq\mu$\textquotedblright. (Or, again, you can
avoid talking about divisibility and just put the powers of $2$ in front of
the $q_{\mu}$'s.)
\item \textbf{page 65, proof of Proposition 9.1.5:} After \textquotedblleft
such that $\mu_{i}=\mu_{i+1}=m$\textquotedblright, add \textquotedblleft%
$>0$\textquotedblright.
\item \textbf{page 65, proof of Proposition 9.1.5:} After \textquotedblleft
The coefficient of each $q_{\nu^{i}}$ is\textquotedblright, add
\textquotedblleft a multiple of\textquotedblright.
\item \textbf{page 65, proof of Proposition 9.1.5:} \textquotedblleft
divisiblity\textquotedblright\ $\rightarrow$ \textquotedblleft
divisibility\textquotedblright\ (twice).
\item \textbf{page 65, Proposition 9.1.7:} Shouldn't you somehow hint to the
fact that the $\left\langle \cdot,\cdot\right\rangle $ form is not the usual
bilinear form on $\Lambda$ but rather the $t=-1$ specialization of
$\left\langle \cdot,\cdot\right\rangle _{t}$ ? Maybe call it $\left\langle
\cdot,\cdot\right\rangle _{-1}$ ?
\item \textbf{page 67:} You say: \textquotedblleft So this is a finite group
of order $2n!$\textquotedblright. This is far from obvious at this point, I
believe; the simplest proof(?) comes from the existence of negative
representations. (I am currently fighting a similar problem at
\href{https://mathoverflow.net/questions/285263}{MathOverflow question
\#285263}.)
\item \textbf{page 72, \S 9.5:} Once again, I suggest writing $\left\langle
\cdot,\cdot\right\rangle _{-1}$ instead of $\left\langle \cdot,\cdot
\right\rangle $.
\item \textbf{page 72, Theorem 9.5.1:} A comma too much in \textquotedblleft%
$f,g,\in$\textquotedblright.
\end{itemize}
\end{document}*