\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{framed}
\usepackage{amsmath}
\usepackage{comment}
\usepackage{color}
\usepackage{hyperref}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{amsthm}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Friday, May 08, 2020 19:19:52}
%TCIDATA{SuppressPackageManagement}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\definecolor{darkred}{rgb}{0.7,0,0}
\theoremstyle{definition}
\newtheorem{theo}{Theorem}[section]
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[theo]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{condition}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{Question}
\newenvironment{algorithm}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{conclusion}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{exmp}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\fi
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\newcommand{\kk}{\mathbf{k}}
\newcommand{\id}{\operatorname{id}}
\newcommand{\ev}{\operatorname{ev}}
\newcommand{\Comp}{\operatorname{Comp}}
\newcommand{\bk}{\mathbf{k}}
\newcommand{\Nplus}{\mathbb{N}_{+}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\redbull}{${\color{red}\bullet}$}
\iffalse
\NOEXPAND{\redbull}{${\color{red}\bullet}$}
\fi
\let\sumnonlimits\sum
\let\prodnonlimits\prod
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\DeclareSymbolFont{bbold}{U}{bbold}{m}{n}
\DeclareSymbolFontAlphabet{\mathbbold}{bbold}
\setlength\textheight{22.5cm}
\setlength\textwidth{15cm}
\ihead{Errata to SymFuncs2020 (version 2020-05-08)}
\ohead{\today}
\begin{document}
\begin{center}
\textbf{An involutive introduction to symmetric functions}
\textit{Mark Wildon}
\url{http://www.ma.rhul.ac.uk/~uvah099/Maths/Sym/SymFuncs2020.pdf}
version of 8 May 2020
\textbf{Errata and addenda by Darij Grinberg}
\bigskip
\end{center}
%\setcounter{section}{}
\section*{Errata and comments}
\begin{itemize}
\item \textbf{pages 1--2, Preface:} Something similar to your solution to
Question 21 appears in the proof of Theorem 6.3 of:
\qquad Anthony Mendes, Jeffrey Remmel,
\qquad\textit{Counting with Symmetric Functions},
\qquad Springer 2015,
and also in \S 9.2 of:
\qquad Eric Egge,
\qquad\textit{An Introduction to Symmetric Functions} \textit{and Their
Combinatorics},
\qquad Student Mathematical Library \textbf{91},
\qquad AMS 2019.
Might be worth a brief comparison.
\item \textbf{page 4, \S 1.3:} \textquotedblleft unital $\mathbf{C}$-algebra
isomorphisms\textquotedblright\ $\rightarrow$ \textquotedblleft unital
$\mathbf{C}$-algebra automorphisms\textquotedblright.
\item \textbf{page 4, \S 1.3:} \textquotedblleft sends the unit element $1$ is
sent to itself\textquotedblright\ $\rightarrow$ \textquotedblleft sends the
unit element $1$ to itself\textquotedblright.
\item \textbf{page 21:} It is worth pointing out that Theorem 2.1 immediately
yields a new proof of Proposition 1.16.
\item \textbf{page 23, Definition 3.2:} After \textquotedblleft\textit{abacus}
representing $\lambda$\textquotedblright, add \textquotedblleft(or, for short,
\textit{abacus for }$\lambda$)\textquotedblright.
\item \textbf{page 24, \S 3.2:} In the computation of $a_{\left( 3,1\right)
+\left( 2,1,0\right) }$, the \textquotedblleft$+x_{3}^{2}x_{1}^{5}-x_{1}%
^{2}x_{3}^{5}$\textquotedblright\ part should be \textquotedblleft$-x_{3}%
^{2}x_{1}^{5}+x_{1}^{2}x_{3}^{5}$\textquotedblright\ (both signs need to be flipped).
\item \textbf{page 25:} In the first case (\textquotedblleft If there are no
collisions\textquotedblright) of the definition of $J\left( A,S\right) $, I
briefly stumbled over the question of what to do if the first bead we want to
move right is already in the rightmost position. Thinking about the purpose of
the construction, I soon realized that in this case, the abacus is simply
extended by one gap to the right before moving the bead. This is probably
worth writing out.
\item \textbf{page 27, proof of Corollary 3.9:} It might be worth explaining
what a \textquotedblleft Young's Rule addition\textquotedblright\ is (i.e.,
adding boxes in such a way that no two boxes are added in the same column).
\item \textbf{page 28, \S 3.5, and many places below:} Let me note that
\textquotedblleft ribbon\textquotedblright\ and \textquotedblleft border
strip\textquotedblright\ are synonyms for \textquotedblleft
rim-hook\textquotedblright\ widely used in the literature.
\item \textbf{page 28, \S 3.5:} I'd add the remark that (for any partition
$\lambda$) we say that $\lambda/\lambda$ is a $0$-strip, and that its sign
$\operatorname*{sgn}\left( \lambda/\lambda\right) $ is defined to be $1$
(contrary to what the definition of sign would suggest). This convention is
important in making Corollary 3.13 work (keep in mind that $\alpha_{i}$ can be
$0$ in a composition $\alpha$).
\item \textbf{page 30, proof of Corollary 3.13:} I'd mention here that you are
using Theorem 3.11 for all $r\in\mathbf{N}_{0}$, not just for $r\in\mathbf{N}%
$. (Of course, Theorem 3.11 for $r=0$ is obvious.)
\item \textbf{page 30, \S 3.6:} After \textquotedblleft just observe that
$P\left( 1,2,2,1\right) =\left( 2,2,1,1\right) $\textquotedblright, add
\textquotedblleft$=P\left( 1,1,2,2\right) $\textquotedblright, in order to
clarify what this has to do with $2$-rim-hooks.
\item \textbf{page 31, Definition 4.1:} I think an example illustrating the
concepts of \textquotedblleft excess\textquotedblright\ and \textquotedblleft
record\textquotedblright\ used in this definition would be helpful. For
example, in order to find the $1$-unpaired $1$s in $121321132$, we make the
following table:%
\[
\left(
\begin{array}
[c]{ccccccccc}%
1 & 2 & 1 & 3 & 2 & 1 & 1 & 3 & 2\\
1 & 0 & 1 & 1 & 0 & 1 & 2 & 2 & 1\\
\ast & & & & & & \ast & &
\end{array}
\right) .
\]
The top row is the word $w=121321132$. The middle row shows, for each entry of
this word, the excess of $1$s over $2$s in the part of the word reaching up to
this entry (when the word is read from left to right). The bottom row has an
asterisk $\ast$ in each column where the excess achieves a new record; thus,
the $1$-unpaired $1$s in $w$ are exactly the entries which have a $\ast$ under
them. A similar table can be made for finding $1$-unpaired $2$s.
\item \textbf{page 32, proof of Lemma 4.2:} You write: \textquotedblleft since
every $k+1$ to the left of position $i$ is paired, this new $k$ is
unpaired\textquotedblright. I believe this isn't so simple. Couldn't this new
$k$ grab a $k+1$ to its left that was previously paired with some other $k$ in
$w$, and thus mess up the pairing of parentheses?
Let me suggest two valid proofs of this claim (though I cannot say any of them
is particularly readable).
I shall refer to the third sentence of Lemma 4.2 (\textquotedblleft Changing
the letters [...] entries of $w$\textquotedblright) as Lemma 4.2 (b).
\textit{First proof of Lemma 4.2 (b):} Let $w^{\prime}$ be the word obtained
from $w$ by the change indicated in Lemma 4.2 (b).
Regard the $k$s and $\left( k+1\right) $s in $w$ as closing and opening
parentheses, respectively. The paired $k$s and the paired $\left( k+1\right)
$s then correspond to parentheses that are paired according to the usual rules
of bracketing. This pairing has the following property: Between any paired
parenthesis and its partner\footnote{The \textit{partner} of a paired
parenthesis is the other parenthesis that it is paired with.}, there are no
unpaired parentheses\footnote{In fact, any unpaired parenthesis between them
would have prevented them from getting paired with each other.}. Therefore,
any change to the unpaired parentheses in $w$ does not interfere with the
paired parentheses; in particular, it does not render their pairing
invalid\footnote{\textquotedblleft Invalid\textquotedblright\ would mean that
two parentheses that were paired to each other before the change could end up
not paired to each other after the change. This cannot happen, because there
were no unpaired entries between them (as we have just seen), and so none of
the letters between them have changed.}. In general, such a change might
introduce some new paired parentheses; however, the change indicated in Lemma
4.2 (b) cannot do this, because it replaces the unpaired subword $k^{c}\left(
k+1\right) ^{d}$ by a subword of the form $k^{c^{\prime}}\left( k+1\right)
^{d^{\prime}}$, which clearly creates no opportunity for further pairing.
Therefore, the paired parentheses in $w^{\prime}$ are exactly the paired
parentheses in $w$ (in particular, they occupy the same positions in
$w^{\prime}$ as in $w$); consequently, the $k$-unpaired entries of $w^{\prime
}$ are in the same positions as the $k$-unpaired entries of $w$. This proves
Lemma 4.2.
\textit{Second proof of Lemma 4.2 (b):} We proceed by strong induction on the
length of the word. Thus, we fix our $w$, $k$, $c$, $d$, $c^{\prime}$ and
$d^{\prime}$, but we assume that Lemma 4.2 (b) is already proven for all words
shorter than $w$ in the place of $w$.
A word is said to be \textit{simple} if it has the form $\left( k+1\right)
vk$, where $v$ is a word (possibly empty) containing neither of the letters
$k$ and $k+1$. (Of course, the letter $k$ is fixed here.) Let $w^{\prime}$ be
the word obtained from $w$ by the change indicated in Lemma 4.2 (b).
If the word $w$ contains no simple factor, then Lemma 4.2 (b) is obvious
(indeed, in this case, all $k$s and all $\left( k+1\right) $s are unpaired
in $w$, and the same holds for $w^{\prime}$). We thus assume that the word $w$
contains a simple factor. In this case, we choose some simple factor of $w$;
we denote this factor by $u$, and we let $p$ and $q$ be the positions (in $w$)
of its first and last letter. For any word $z$ having at least $q$ letters, we
let $\overline{z}$ be the word obtained from $z$ by removing the letters at
positions $p,p+1,\ldots,q$.
Now, the pairing of the $k$s and $\left( k+1\right) $s in $w$ (regarded as
closing and opening parentheses) has the following property: The $k+1$ in
position $p$ is paired with the $k$ in position $q$ (since there are no $k$s
and no $\left( k+1\right) $s between them), and the pairing of the remaining
$k$s and $\left( k+1\right) $s in $w$ is precisely the same as if the simple
factor $u$ (starting at position $p$ and ending at position $q$) was absent
(i.e., it is the same as for the word $\overline{w}$). Exactly the same holds
for the word $w^{\prime}$, because the simple factor $u$ is unaffected by the
change that transforms $w$ into $w^{\prime}$ (indeed, the change only modifies
unpaired letters, but there are no unpaired letters in $u$). Hence, in order
to prove Lemma 4.2 (b) for our word $w$, it suffices to prove Lemma 4.2 (b)
for the word $\overline{w}$ (since the word $\overline{w^{\prime}}$ is
obtained from $\overline{w}$ by the same change that transforms $w$ into
$w^{\prime}$). But this follows from the induction hypothesis, since the word
$\overline{w}$ is shorter than $w$. This concludes the proof of Lemma 4.2 (b).
\item \textbf{page 33, proof of Lemma 4.4:} You write: \textquotedblleft If
$t^{\prime}$ is not semistandard then $t\left( a-1,b\right) =k$%
\textquotedblright. This requires proof. A priori, it is clear that if
$t^{\prime}$ is not semistandard, then either $t\left( a-1,b\right) =k$ or
$t\left( a,b-1\right) =k+1$ (or both). To obtain your claim, we need to rule
out that $t\left( a,b-1\right) =k+1$. Fortunately, this is easy: If we had
$t\left( a,b-1\right) =k+1$, then the letter $k+1$ of $\operatorname*{w}%
\left( t\right) $ corresponding to the entry $k+1$ in position $\left(
a,b-1\right) $ of $t$ would be a $k$-unpaired $k+1$ (indeed, the letter
immediately following it is a $k$-unpaired $k+1$, but there is a fact (easily
proven using Definition 4.1) that if a letter $p$ in a word $w$ is a $k+1$,
and if the letter immediately following it is a $k$-unpaired $k+1$, then the
letter $p$ must also be a $k$-unpaired $k+1$), but this would contradict the
fact that the leftmost unpaired $k+1$ in $\operatorname*{w}\left( t\right) $
is the letter corresponding to the entry $t\left( a,b\right) $ (which is
further right than the letter we are talking about).
For some reason, every argument I make about coplactic maps degenerates into a
run-on sentence like this...
\item \textbf{page 33, proof of Lemma 4.4:} You say \textquotedblleft$S_{k}$
and $S_{k}E_{k}$ are involutions\textquotedblright. Well, almost... In order
to be able to say that $S_{k}$ is an involution, you need to extend $S_{k}$ to
a map $\operatorname*{SSYT}\left( \mu,\alpha\right) \rightarrow
\operatorname*{SSYT}\left( \mu,\alpha\right) $ (rather than merely
$\operatorname*{SSYT}\nolimits_{k}\left( \mu,\alpha\right) \rightarrow
\operatorname*{SSYT}\nolimits_{k+1}\left( \mu,\alpha\right) $). An
involution must be a bijection from a set to itself, not to another set.
Likewise, the map $S_{k}E_{k}$ is not in itself an involution, but if you
combine the maps $S_{k}E_{k}:\operatorname*{SSYT}\nolimits_{k+1}\left(
\mu,\alpha\right) \rightarrow\operatorname*{SSYT}\nolimits_{k+1}\left(
\mu,\alpha^{\prime}-\epsilon\left( k\right) \right) $ for all $\alpha$ into
one large map $S_{k}E_{k}:\operatorname*{SSYT}\nolimits_{k+1}\left(
\mu\right) \rightarrow\operatorname*{SSYT}\nolimits_{k+1}\left( \mu\right)
$, where $\operatorname*{SSYT}\nolimits_{k+1}\left( \mu\right)
=\bigsqcup_{\alpha}\operatorname*{SSYT}\nolimits_{k+1}\left( \mu
,\alpha\right) $, then this large map $S_{k}E_{k}$ is an involution.
\item \textbf{page 35, proof of Lemma 4.7:} \textquotedblleft$\sigma
=\operatorname*{id}\nolimits_{\operatorname*{Sym}\nolimits_{N}}$ by Question
22\textquotedblright\ $\rightarrow$ \textquotedblleft$\sigma
=\operatorname*{id}\nolimits_{\operatorname*{Sym}\nolimits_{N}}$ by Question
22 (b)\textquotedblright.
\item \textbf{page 35, proof of Lemma 4.7:} At the very end of this proof, it
wouldn't hurt to explicitly mention that $t$ is the unique element of
$\operatorname*{SSYT}\left( \lambda,\lambda\right) $ because $\left\vert
\operatorname*{SSYT}\left( \lambda,\lambda\right) \right\vert =K_{\lambda
\lambda}=1$ by Question 11 (c).
\item \textbf{page 35:} You write: \textquotedblleft$J$ has a unique fixed
point in $\mathcal{T}$ if $\mu=\lambda$, and otherwise none\textquotedblright.
This is not quite obvious. In order to prove that an unlatticed tableau
$t\in\mathcal{T}$ cannot be a fixed point of $J$, you need to observe that the
content of $J\left( t\right) $ is different from the content of $t$ (because
Question 22 (c) shows that $\lambda\cdot\left( \sigma\left( k,k+1\right)
\right) \neq\lambda\cdot\sigma$).
\item \textbf{page 37, proof of Theorem 4.10:} You write: \textquotedblleft it
follows that applying $J$ therefore cancels all contributions to $c_{\mu}$
except those coming from tableaux $t\in\mathcal{T}$ such that $J\left(
t\right) =t$\textquotedblright. Let me explain this in a bit more detail:
Define the \textit{sign} $\operatorname*{sgn}\left( t\right) $ of a tableau
$t\in\mathcal{T}$ to be $\operatorname*{sgn}\sigma$, where $\sigma$ is the
unique permutation in $\operatorname*{Sym}\nolimits_{N}$ satisfying
$t\in\operatorname*{SSYT}\left( \mu,\lambda\cdot\sigma\right) $. (The
uniqueness of this $\sigma$ follows from Question 22 (c).) Then, we can
rewrite the definition of $c_{\mu}$ as $c_{\mu}=\sum_{t\in\mathcal{T}%
}\operatorname*{sgn}\left( t\right) $ (using the fact that the $\sigma$ in
the preceding sentence is unique). Thus, a sign-reversing involution on
$\mathcal{T}$ should help simplify $c_{\mu}$. And Lemma 4.7 (i) shows
precisely that the involution $J$ is sign-reversing on the unlatticed tableaux
$t\in\mathcal{T}$.
\item \textbf{page 39, proof of Theorem 5.3:} In Claim 1, it might be better
to replace \textquotedblleft$\dfrac{a_{j}!}{C_{1j}!\cdots C_{kj}!}%
$\textquotedblright\ by \textquotedblleft$\dbinom{a_{j}}{C_{1,j}%
,\ldots,C_{k,j}}$\textquotedblright\ (after perhaps reminding the reader of
the definition of multinomial coefficients: namely, if $u_{1},u_{2}%
,\ldots,u_{k}$ are $k$ nonnegative integers, and if $v=u_{1}+u_{2}%
+\cdots+u_{k}$, then the multinomial coefficient $\dbinom{v}{u_{1}%
,u_{2},\ldots,u_{k}}$ is defined to be the positive integer $\dfrac{v!}%
{u_{1}!u_{2}!\cdots u_{k}!}$). After all, you always write it as a multinomial
coefficient later on.
\item \textbf{page 41, proof of Lemma 5.4:} In the last computation of this
proof, you are tacitly using the identity $\left\langle s_{\lambda},h_{\mu
}\right\rangle =K_{\lambda\mu}$ (for any partitions $\lambda$ and $\mu$). This
is probably worth stating earlier on.
\item \textbf{page 41, proof of Lemma 5.6:} I'd replace \textquotedblleft
Comparing (5.2) and (5.3) we get\textquotedblright\ by the somewhat more
detailed \textquotedblleft The definition of $\omega$ yields $\omega\left(
h_{\mu}\right) =e_{\mu}$. In view of (5.2) and (5.3), this rewrites
as\textquotedblright.
\item \textbf{page 42, \S 5.3, Alternative proof:} I don't know how detailed
this all is supposed to be, but I feel like there are a lot of silent steps
here. In particular, it would help pointing out (probably somewhere in \S 3)
that an abacus of a partition $\lambda$ can be obtained by vertically
reflecting an abacus of its conjugate $\lambda^{\prime}$ and turning beads
into gaps and vice versa. This is a beautiful (yet simple) fact, and explains
why the border-strip tableaux for $\lambda$ are in bijection with those of
$\lambda^{\prime}$.
\item \textbf{page 42, \S 5.4:} Have you ever defined what a skew-partition
is, and how its Young diagram is defined?
(A \textit{skew-partition} is a pair $\left( \lambda,\nu\right) $ of two
partitions $\lambda$ and $\nu$ satisfying $\left[ \nu\right] \subseteq
\left[ \lambda\right] $. It is written as $\lambda/\nu$, and its Young
diagram $\left[ \lambda/\nu\right] $ is defined to be the set difference
$\left[ \lambda\right] -\left[ \nu\right] $. A tableau of shape
$\lambda/\nu$ is defined just as a tableau of usual shape.)
\item \textbf{page 43, \S 5.5:} It can't possibly hurt to say somewhere that
the \textquotedblleft$\omega$-involution\textquotedblright\ means the
involution $\omega$.
\item \textbf{page 43, proof of Proposition 5.7:} I am not sure how you
conclude that this irreducible constituent in the last sentence is actually
the image of $s_{\mu}$. (This is not hard to check -- e.g., there is a
standard trick that uses $\left\langle \chi^{\mu},\chi^{\mu}\right\rangle
=\left\langle s_{\mu},s_{\mu}\right\rangle =1$ to show that $\chi^{\mu}$ is
$\pm$ an irreducible character, and then we can use $\left\langle \chi^{\mu
},\pi^{\mu}\right\rangle =1>0$ to conclude that the $\pm$ is in fact a $+$.)
\item \textbf{page 43, proof of Corollary 5.10:} Strictly speaking, you have
not shown that all irreducible characters of $\operatorname*{Sym}%
\nolimits_{n}$ are of the form $\chi^{\lambda}$, so the proof is incomplete.
(I am not saying that this is difficult, but it needs a couple more lines.)
\item \textbf{page 46, (5.7):} Replace \textquotedblleft$g$\textquotedblright%
\ by \textquotedblleft$\sigma$\textquotedblright\ or vice versa.
\item \textbf{page 46, proof of Lemma 5.12:} Replace \textquotedblleft of
$S_{n}$\textquotedblright\ by \textquotedblleft of $\operatorname*{Sym}%
\nolimits_{n}$\textquotedblright.
\item \textbf{page 47, proof of Theorem 5.14:} It is worth stating explicitly
the fact that you are using for the last equality sign in the long
computation. This fact says that if $A$ is a finite group, and if $B$ and $C$
are two subgroups of $A$, then
\[
\left\langle 1\uparrow_{B}^{A}\downarrow_{C}^{A},1\right\rangle _{C}=\left(
\text{the number of double cosets }BaC\text{ with }a\in A\right) .
\]
This can indeed be derived from Mackey's formula or from the interpretation of
$1\uparrow_{B}^{A}$ as a coset space character.
\item \textbf{pages 50--51, Question 7:} Here is an easier way to solve part
(g) (which also shows that you can replace \textquotedblleft$\ell\in
\mathbf{N}$\textquotedblright\ by \textquotedblleft$\ell\in\mathbf{N}_{0}%
$\textquotedblright):
\textit{Step 1:} We observe that every $N\geq0$ satisfies%
\begin{equation}
\sum_{i=0}^{N}\dbinom{N}{i}d_{\left( 1^{i}\right) }=N!. \label{p46.e7.g.1}%
\end{equation}
(This follows by noticing that $\dbinom{N}{i}d_{\left( 1^{i}\right) }$ is
the number of permutations $\sigma\in\operatorname*{Sym}\nolimits_{N}$ that
have exactly $i$ fixed points.)
\textit{Step 2:} Now, fix $n\in\mathbf{N}_{0}$. For each $\ell\in\left\{
0,1,\ldots,n\right\} $, we set%
\begin{equation}
w_{\ell}=\left( -1\right) ^{\ell+1}\sum_{m=\ell+1}^{n}\dbinom{m-1}{\ell
}\dbinom{n}{m}d_{\left( 1^{n-m}\right) }. \label{p46.e7.g.wl=}%
\end{equation}
Thus, our goal is to prove that%
\[
d_{\left( 1^{n}\right) }=\dfrac{n!}{\ell!}d_{\left( 1^{\ell}\right)
}+w_{\ell}\ \ \ \ \ \ \ \ \ \ \text{for each }\ell\in\left\{ 0,1,\ldots
,n\right\} .
\]
This we shall prove by induction over $n-\ell$. The base case ($n-\ell=0$) is
obvious (since $w_{n}=0$). For the induction step, it suffices to prove that%
\begin{equation}
\dfrac{n!}{\ell!}d_{\left( 1^{\ell}\right) }+w_{\ell}=\dfrac{n!}{\left(
\ell+1\right) !}d_{\left( 1^{\ell+1}\right) }+w_{\ell+1} \label{p46.e7.g.3}%
\end{equation}
for each $\ell\in\left\{ 0,1,\ldots,n-1\right\} $. Thus we shall focus on
proving (\ref{p46.e7.g.3}).
\textit{Step 3:} Fix $\ell\in\left\{ 0,1,\ldots,n-1\right\} $. The
definition of $w_{\ell+1}$ yields%
\begin{align*}
w_{\ell+1} & =\left( -1\right) ^{\ell+2}\sum_{m=\ell+2}^{n}\dbinom
{m-1}{\ell+1}\dbinom{n}{m}d_{\left( 1^{n-m}\right) }\\
& =\left( -1\right) ^{\ell+2}\sum_{m=\ell+1}^{n}\dbinom{m-1}{\ell+1}%
\dbinom{n}{m}d_{\left( 1^{n-m}\right) }\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we have extended the range of the sum by one}\\
\text{extra addend, which is zero}\\
\text{(since }\dbinom{m-1}{\ell+1}=0\text{ when }m=\ell+1\text{)}%
\end{array}
\right) \\
& =-\left( -1\right) ^{\ell+1}\sum_{m=\ell+1}^{n}\dbinom{m-1}{\ell
+1}\dbinom{n}{m}d_{\left( 1^{n-m}\right) }.
\end{align*}
Subtracting this equality from (\ref{p46.e7.g.wl=}), we find%
\begin{align*}
& w_{\ell}-w_{\ell+1}\\
& =\left( -1\right) ^{\ell+1}\sum_{m=\ell+1}^{n}\dbinom{m-1}{\ell}%
\dbinom{n}{m}d_{\left( 1^{n-m}\right) }\\
& \ \ \ \ \ \ \ \ \ \ -\left( -\left( -1\right) ^{\ell+1}\sum_{m=\ell
+1}^{n}\dbinom{m-1}{\ell+1}\dbinom{n}{m}d_{\left( 1^{n-m}\right) }\right) \\
& =\left( -1\right) ^{\ell+1}\sum_{m=\ell+1}^{n}\underbrace{\left(
\dbinom{m-1}{\ell}+\dbinom{m-1}{\ell+1}\right) }_{\substack{=\dbinom{m}%
{\ell+1}\\\text{(by the recursion of the}\\\text{binomial coefficients)}%
}}\dbinom{n}{m}d_{\left( 1^{n-m}\right) }\\
& =\left( -1\right) ^{\ell+1}\sum_{m=\ell+1}^{n}\underbrace{\dbinom{m}%
{\ell+1}\dbinom{n}{m}}_{\substack{=\dbinom{n}{\ell+1}\dbinom{n-\left(
\ell+1\right) }{n-m}\\\text{(by straightforward manipulations)}}}d_{\left(
1^{n-m}\right) }\\
& =\left( -1\right) ^{\ell+1}\dbinom{n}{\ell+1}\underbrace{\sum_{m=\ell
+1}^{n}\dbinom{n-\left( \ell+1\right) }{n-m}d_{\left( 1^{n-m}\right) }%
}_{\substack{=\sum_{i=0}^{n-\left( \ell+1\right) }\dbinom{n-\left(
\ell+1\right) }{i}d_{\left( 1^{i}\right) }\\\text{(here, we have
substituted }i\\\text{for }n-m\text{ in the sum)}}}\\
& =\left( -1\right) ^{\ell+1}\dbinom{n}{\ell+1}\underbrace{\sum
_{i=0}^{n-\left( \ell+1\right) }\dbinom{n-\left( \ell+1\right) }%
{i}d_{\left( 1^{i}\right) }}_{\substack{=\left( n-\left( \ell+1\right)
\right) !\\\text{(by (\ref{p46.e7.g.1}) (applied to }N=n-\left(
\ell+1\right) \text{))}}}\\
& =\left( -1\right) ^{\ell+1}\underbrace{\dbinom{n}{\ell+1}\left(
n-\left( \ell+1\right) \right) !}_{=\dfrac{n!}{\left( \ell+1\right) !}%
}=\left( -1\right) ^{\ell+1}\dfrac{n!}{\left( \ell+1\right) !}.
\end{align*}
Comparing this with%
\begin{align*}
& \dfrac{n!}{\left( \ell+1\right) !}\underbrace{d_{\left( 1^{\ell
+1}\right) }}_{\substack{=\left( \ell+1\right) d_{\left( 1^{\ell}\right)
}+\left( -1\right) ^{\ell+1}\\\text{(by the well-known recursion}\\\text{for
derangement numbers)}}}-\dfrac{n!}{\ell!}d_{\left( 1^{\ell}\right) }\\
& =\dfrac{n!}{\left( \ell+1\right) !}\left( \left( \ell+1\right)
d_{\left( 1^{\ell}\right) }+\left( -1\right) ^{\ell+1}\right) -\dfrac
{n!}{\ell!}d_{\left( 1^{\ell}\right) }\\
& =\underbrace{\dfrac{n!}{\left( \ell+1\right) !}\left( \ell+1\right)
}_{=\dfrac{n!}{\ell!}}d_{\left( 1^{\ell}\right) }+\dfrac{n!}{\left(
\ell+1\right) !}\left( -1\right) ^{\ell+1}-\dfrac{n!}{\ell!}d_{\left(
1^{\ell}\right) }\\
& =\dfrac{n!}{\ell!}d_{\left( 1^{\ell}\right) }+\dfrac{n!}{\left(
\ell+1\right) !}\left( -1\right) ^{\ell+1}-\dfrac{n!}{\ell!}d_{\left(
1^{\ell}\right) }=\dfrac{n!}{\left( \ell+1\right) !}\left( -1\right)
^{\ell+1}=\left( -1\right) ^{\ell+1}\dfrac{n!}{\left( \ell+1\right) !},
\end{align*}
we obtain%
\[
w_{\ell}-w_{\ell+1}=\dfrac{n!}{\left( \ell+1\right) !}d_{\left( 1^{\ell
+1}\right) }-\dfrac{n!}{\ell!}d_{\left( 1^{\ell}\right) }.
\]
This is clearly equivalent to (\ref{p46.e7.g.3}). Thus, (\ref{p46.e7.g.3}) is
proven. This completes the induction step.
\item \textbf{page 54, Question 24:} I know it's a stupid remark, but you have
never actually defined the notion of a \textquotedblleft coplactic
map\textquotedblright. (It just means one of the maps $E_{k}$, $F_{k}$ and
$S_{k}$ defined in \S 4.2.)
\item \textbf{page 55, Question 25:} Question 25 (b) is precisely the claim of
Lemma 5.6.
\item \textbf{page 58, solution to Question 2:} Let me add that part (b) of
the Question can also be easily solved without using part (a). One such
solution appears in the solution to Exercise 2.2.9 in
\qquad Darij Grinberg and Victor Reiner,
\qquad\textit{Hopf Algebras in Combinatorics},
\qquad version of 20 April 2020,
\qquad\url{http://www.cip.ifi.lmu.de/~grinberg/algebra/HopfComb-sols.pdf}
(also available at \href{https://arxiv.org/abs/1409.8356v6}{arXiv:1409.8356v6})
(beware that the numbering on my website might have changed by the time you're
reading this, but the numbering on
\href{https://arxiv.org/abs/1409.8356v6}{arXiv:1409.8356v6} will never change).
Incidentally, a generalization of your Question 2 appears in Propositions 1.1
and 1.2 of
\qquad C. DeConcini, David Eisenbud, and C. Procesi,
\qquad\textit{\href{https://eudml.org/doc/142693}{\textit{Young Diagrams and
Determinantal Varieties},}}
\qquad Inventiones math. 56 (1980), pp. 129--165.
\item \textbf{page 64, solution to Question 21:} Maybe explain what
\textquotedblleft disjoint union\textquotedblright\ means (in
\textquotedblleft disjoint union of rim-hooks\textquotedblright).
\end{itemize}
\end{document}