\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{framed}
\usepackage{comment}
\usepackage{color}
\usepackage{hyperref}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{tikz}
\usepackage{needspace}
\usepackage{tabls}
\usepackage{wasysym}
\usepackage{easytable}
\usepackage{ytableau}
\usepackage[type={CC}, modifier={zero}, version={1.0},]{doclicense}
\usepackage{xargs}
\usepackage[textwidth=65mm]{todonotes}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Thursday, March 26, 2026 05:47:33}
%TCIDATA{SuppressPackageManagement}
%TCIDATA{<META NAME="GraphicsSave" CONTENT="32">}
%TCIDATA{<META NAME="SaveForMode" CONTENT="1">}
%TCIDATA{BibliographyScheme=Manual}
%TCIDATA{Language=American English}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\DeclareMathOperator{\oddcol}{\mathsf{oddcol}}
\usetikzlibrary{arrows.meta}
\usetikzlibrary{chains}
\newcounter{exer}
\newcounter{exera}
\numberwithin{exer}{subsection}
\theoremstyle{definition}
\newtheorem{theo}{Theorem}[subsection]
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[theo]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{convention}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{Question}
\newenvironment{question}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{warning}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{exam}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exam}[#1]\begin{leftbar}}
{\end{leftbar}\end{exam}}
\newtheorem{exmp}[exer]{Exercise}
\newenvironment{exercise}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\newenvironment{statement}{\begin{quote}}{\end{quote}}
\newenvironment{fineprint}{\medskip \begin{small}}{\end{small} \medskip}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{question}[1][Question]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{warning}[1][Warning]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{teachingnote}[1][Teaching note]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\fi
\let\sumnonlimits\sum
\let\prodnonlimits\prod
\let\cupnonlimits\bigcup
\let\capnonlimits\bigcap
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\renewcommand{\bigcup}{\cupnonlimits\limits}
\renewcommand{\bigcap}{\capnonlimits\limits}
\setlength\tablinesep{3pt}
\setlength\arraylinesep{3pt}
\setlength\extrarulesep{3pt}
\voffset=0cm
\hoffset=-0.7cm
\setlength\textheight{22.5cm}
\setlength\textwidth{15.5cm}
\newcommand\arxiv[1]{\href{http://www.arxiv.org/abs/#1}{\texttt{arXiv:#1}}}
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\newcommand{\CC}{\mathbb{C}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\KK}{\mathbb{K}}
\newcommand{\B}{\mathcal{B}}
\newcommand{\id}{\operatorname{id}}
\newcommand{\lcm}{\operatorname{lcm}}
\newcommand{\rev}{\operatorname{rev}}
\newcommand{\powset}[2][]{\ifthenelse{\equal{#2}{}}{\mathcal{P}\left(#1\right)}{\mathcal{P}_{#1}\left(#2\right)}}
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\tup}[1]{\left( #1 \right)}
\newcommand{\ive}[1]{\left[ #1 \right]}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\lf}[2]{#1^{\underline{#2}}}
\newcommand{\underbrack}[2]{\underbrace{#1}_{\substack{#2}}}
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}}
\newcommand{\are}{\ar@{-}}
\newcommand{\nnn}{\nonumber\\}
\newcommand{\sslash}{\mathbin{/\mkern-6mu/}}
\newcommand{\numboxed}[2]{\underbrace{\boxed{#1}}_{\text{box } #2}}
\newcommand{\ig}[2]{\includegraphics[scale=#1]{#2.png}}
\newcommand{\UNFINISHED}{\begin{center} \Huge{\textbf{Unfinished material begins here.}} \end{center} }
\iffalse
\NOEXPAND{\today}{\today}
\NOEXPAND{\sslash}{\sslash}
\NOEXPAND{\numboxed}[2]{\numboxed}
\NOEXPAND{\UNFINISHED}{\UNFINISHED}
\fi
\ihead{Left ideal Gelfand model}
\ohead{page \thepage}
\cfoot{\today}
\newcommand{\franco}[2][]{\todo[size=\small,color=ForestGreen!30,#1]{#2 \\ \hfill --- Franco}}
\newcommand{\sarah}[2][]{\todo[size=\small,color=teal!30,#1]{#2 \\ \hfill --- Sarah}}
\newcommand{\patty}[2][]{\todo[size=\small,color=purple!30,#1]{#2 \\ \hfill --- Patty}}
\newcommand{\darij}[2][]{\todo[size=\small,color=cyan!30,#1]{#2 \\ \hfill --- Darij}}
\begin{document}

\title{A left ideal Gelfand model for the symmetric group}
\author{Sarah Brauner, Patricia Commins,\\Darij Grinberg, Franco Saliola}
\date{working draft,
%TCIMACRO{\TeXButton{today}{\today}}%
%BeginExpansion
\today
%EndExpansion
}
\maketitle
\tableofcontents

\textbf{This is a working draft.} It will need a lot of editing. Some of the
lemmas are probably redundant or can be found in the literature. Sections
marked with (*) contain ideas and to-do lists rather than actual results, and
will likely be removed (if the ideas don't work out) or elaborated upon (if
they do).

This paper will likely be split into two eventually.

\section{\label{sec.intro}Introduction}

Ever since Alfred Young's legendary \textquotedblleft On quantitative
substitutional analysis\textquotedblright\ paper series \cite{Young77}, the
group algebra $\mathbb{Q}\left[  S_{n}\right]  $ of the symmetric group
$S_{n}$ has been studied as the origin of permutational symmetries on
vectorial objects such as polynomials and tensors. While the representations
of the algebra (i.e., of the symmetric group) are well-understood at least in
characteristic $0$, there has come forth a steady stream of remarkable
families of elements of $\mathbb{Q}\left[  S_{n}\right]  $ that exhibit
surprising, nontrivial and often deep properties. We shall refer to these
elements as \emph{shuffles}, as many of them (viz., those with nonnegative
coefficients) can be interpreted as random ways to shuffle a deck of cards.
Some of these shuffles actually originated in this card-shuffling context,
such as the top-to-random, bottom-to-random and Gilbert--Shannon--Reeds
shuffles (see \cite{DiaFul23} for these and many others), though the latter
have independently arisen in the study of Hochschild cohomology of commutative
algebras (see \cite{GerSch87} and \cite[\S 5.6]{DiaFul23}\footnote{Not all of
us agree with the characterization of Hochschild cohomology as
\textquotedblleft an esoteric part of modern algebra\textquotedblright.}).
Others originate, e.g., in Lie theory (the Dynkin and Klyachko idempotents
\cite[Theorem 8.16]{Reuten93}, \cite{BleLau92}) or in real algebraic geometry
(\cite[\S 3]{KarPur23}).

Two particularly mysterious families of shuffles in $\mathbb{Q}\left[
S_{n}\right]  $ were constructed in the memoir \cite{RSW} by Reiner, Saliola
and Welker. The first of them -- the \emph{random-to-random shuffles}, denoted
$\nu_{\left(  k,1^{n-k}\right)  }$ in \cite{RSW} -- has since seen its main
properties proved (\cite{DieSal18}, \cite{Lafren}, \cite{AFBCCL24},
\cite{BCGS25}). We shall here study the second, which we call the \emph{dyadic
shuffles} ($\nu_{\left(  2^{k},1^{n-2k}\right)  }$ in \cite{RSW}). Despite
these two families being constructed in parallel ways in \cite{RSW} (as
\textquotedblleft symmetrizations\textquotedblright\ of BHR shuffles) and --
to some extent -- having analogous properties, we have found their similitude
to be shallow, and the proofs we will give use methods entirely different from
those that have worked for the first family. (In particular, while the first
family can be $q$-deformed into the Hecke algebra, no such deformation has
been found for the second family.)

The easiest way to define these dyadic shuffles is by
\[
\mathcal{S}_{n,k}:=\sum_{w\in S_{n}}\operatorname*{incmat}\nolimits_{k}\left(
w\right)  w\in\mathbb{Q}\left[  S_{n}\right]
\]
for all $n,k\in\mathbb{N}$, where $\operatorname*{incmat}\nolimits_{k}\left(
w\right)  $ denotes $\dfrac{1}{k!}$ times the number of ways to choose $2k$
distinct elements $i_{1},i_{2},\ldots,i_{k},j_{1},j_{2},\ldots,j_{k}%
\in\left\{  1,2,\ldots,n\right\}  $ such that each $p\in\left\{
1,2,\ldots,k\right\}  $ satisfies $i_{p}<j_{p}$ and $w\left(  i_{p}\right)
<w\left(  j_{p}\right)  $. (Alternatively, we can remove the $\dfrac{1}{k!}$
factor and instead impose the additional condition $i_{1} < i_{2} < \cdots<
i_{k}$.) As an example, using one-line notation, $\operatorname*{incmat}%
\nolimits_{2}\left(  13254\right)  = 10$. (This is a restatement of Definition
\ref{def.dyadic.def} below, except that we shall use an arbitrary field
$\mathbf{k}$ of characteristic $0$ instead of the $\mathbb{Q}$ here.) Clearly,
$\mathcal{S}_{n,0}$ is a known central element (the sum of all permutations in
$S_{n}$), while $\mathcal{S}_{n,k}$ is just $0$ when $2k>n$. It is the
intermediate shuffles $\mathcal{S}_{n,1},\mathcal{S}_{n,2},\ldots
,\mathcal{S}_{n,\left\lfloor n/2\right\rfloor }$ that are interesting.

The major surprises -- already proved in \cite[Theorem 1.6]{RSW} -- are that

\begin{enumerate}
\item[\textbf{(a)}] these shuffles $\mathcal{S}_{n,0},\mathcal{S}%
_{n,1},\mathcal{S}_{n,2},\ldots$ all commute, and

\item[\textbf{(b)}] each of them has nonnegative integer eigenvalues (when
acting by left or right multiplication on $\mathbb{Q}\left[  S_{n}\right]  $
or on any representation of $\mathbb{Q}\left[  S_{n}\right]  $).
\end{enumerate}

The proofs in \cite{RSW} rely fundamentally on a \emph{Gelfand model} of
$S_{n}$: a representation of $S_{n}$ that can be decomposed as a direct sum of
all irreducible representations (i.e., all Specht modules $\mathcal{S}%
^{\lambda}$ with $\lambda$ a partition of $n$), each appearing exactly once.
The specific model constructed in \cite[\S 5.1]{RSW} is the Whitney homology
of a poset, or rather a copy thereof in $\mathbb{Q}\left[  S_{n}\right]  $
identified using character computations.

In the present work, we shall simplify this by finding a much simpler Gelfand
model of $S_{n}$: Namely, our Gelfand model is%
\[
\mathcal{G}:=\operatorname*{span}\left(  G_{i_{1},i_{2},\ldots,i_{k}%
;\ j_{1},j_{2},\ldots,j_{k}}\ \mid\ i_{1},i_{2},\ldots,i_{k},j_{1}%
,j_{2},\ldots,j_{k}\in\left[  n\right]  \text{ are distinct}\right)
\subseteq\mathbb{Q}\left[  S_{n}\right]  ,
\]
where%
\[
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}:=\sum_{\substack{w\in
S_{n};\\w\left(  i_{s}\right)  <w\left(  j_{s}\right)  \text{ for all }%
s\in\left\{  1,2,\ldots,k\right\}  }}w^{-1}\in\mathbb{Q}\left[  S_{n}\right]
.
\]
This is a left ideal of $\mathbb{Q}\left[  S_{n}\right]  $, thus a
representation, and we will show that it is a Gelfand model using the basic
theory of Specht modules. It is related to the classical Gelfand model of
Kodiyalam--Verma \cite{KodVer04} and Adin--Postnikov--Roichman \cite[Theorem
1.2]{AdPoRo08}; in fact, it has a filtration whose associated graded object is
canonically isomorphic to the latter. More usefully for us, it contains all
the dyadic shuffles $\mathcal{S}_{n,i}$, and even better, we have
$\mathcal{S}_{n,i}\in\mathcal{G}^{\ast}\mathcal{G}$, where $\ast$ denotes the
antipode map of $\mathbb{Q}\left[  S_{n}\right]  $ (the linear map sending
each permutation to its inverse). This alone is sufficient to prove the above
properties \textbf{(a)} and \textbf{(b)} by general properties of Gelfand
models. In fact, our paper begins with a general study of Gelfand models of
$S_{n}$ (most of them generalizable to finite-dimensional split semisimple
algebras, though we don't elaborate on this).

We then move on to proving new properties of the dyadic shuffles. Among them
are the identities%
\[
\binom{n-2\left(  k-1\right)  }{2}\mathcal{S}_{n,k-1}=\mathcal{S}_{n,k}\left(
\mathcal{B}_{n}-\left(  n-2k\right)  \right)  =\left(  \mathcal{B}_{n}^{\ast
}-\left(  n-2k\right)  \right)  \mathcal{S}_{n,k},
\]
where $\mathcal{B}_{n}$ is the bottom-to-random shuffle (i.e., the sum of the
cycles $\left(  n,n-1,\ldots,j\right)  $ for all $j\in\left\{  1,2,\ldots
,n\right\}  $), and%
\[
w_{0}\mathcal{S}_{n,k}=\mathcal{S}_{n,k}w_{0}=\sum_{i=0}^{k}\left(  -1\right)
^{i}\dbinom{n-2i}{2k-2i}\dfrac{\left(  2k-2i\right)  !}{2^{k-i}\left(
k-i\right)  !}\mathcal{S}_{n,i},
\]
where $w_{0}\in S_{n}$ is the permutation sending $1,2,\ldots,n$ to
$n,n-1,\ldots,1$.

Finally (TODO: this part needs to be written up), we take a bite off one of
the open problems of \cite{RSW}, namely the computation of the eigenvalues of
the $\mathcal{S}_{n,k}$ acting on Specht modules $\mathcal{S}^{\lambda}$. For
general reasons, at most one of these eigenvalues is nonzero for each choice
of $k$ and $\lambda$ (which means that it can also be viewed as the trace of
$\mathcal{S}_{n,k}$ acting on $\mathcal{S}^{\lambda}$, that is, as its value
under the irreducible character corresponding to $\lambda$); this nonzero
value is a positive integer. But a combinatorial or even just manifestly
positive formula for this value is far from known (see \cite[Problem 5.5]%
{RSW}). Lafreni\`{e}re conjectured a formula \cite[Conjecture 142]{Lafren} for
all \emph{hook-shaped} partitions $\lambda$ as well as general recursions
which hold for all $\lambda.$ We prove each of her conjectures (TODO).

\bigskip

\textbf{Plotlines:} After this introduction, the paper is structured as follows:

\begin{enumerate}
\item In Section \ref{sec.gen}, we prove general properties of
multiplicity-free representations and Gelfand models of $S_{n}$ over a field
$\mathbf{k}$ of characteristic $0$. We suspect that these are folklore to the
experts, but we could not find them explicitly stated in the literature,
whence we lay out the (fairly easy) proofs here.

\item In Section \ref{sec.gelfand}, we construct the left ideal $\mathcal{G}$
of $\mathbf{k}\left[  S_{n}\right]  $ and prove that it is a Gelfand model of
$S_{n}$. We furthermore study its canonical filtration and use it to recover
the results of \cite{KodVer04} and \cite[Theorem 1.2]{AdPoRo08}.

\item In Section \ref{sec.dyadic}, we introduce the dyadic shuffles
$\mathcal{S}_{n,k}$ and prove various identities holding for them. We reprove
their commutativity and the nonnegative integrality of their eigenvalues using
the Gelfand model $\mathcal{G}$.

\item In Section \ref{sec.hook} (TODO), we examine the eigenvalues of the
$\mathcal{S}_{n, k}$. First, we prove Lafreni\`{e}re's conjectures on
recursions satisfied by the eigenvalues of $\mathcal{S}_{n, k}$ as $k$ grows.
Then, we prove her conjecture for the eigenvalues of $\mathcal{S}_{n,k}$ on
hook-shaped Specht modules.
\end{enumerate}

\bigskip

\textbf{Acknowledgements:} We thank Nadia Lafreni\`{e}re for prompting this
research in her thesis \cite{Lafren}. We also thank Lafreni\`{e}re and Trevor
Karn for many discussions on the dyadic family. Much of this work was done at
the MFO Oberwolfach in Fall 2024 (Oberwolfach Research Fellows, 2442p) and at
the ICERM Providence in Fall 2025 (program \textquotedblleft Categorification
and Computation in Algebraic Combinatorics\textquotedblright). We thank both
of these institutes for their hospitality.

\bigskip

\textbf{Notations.} We shall follow the notations of \cite{sga} whenever
possible (but not in the notations for the antipode and the dual space). We
fix a field $\mathbf{k}$ of characteristic $0$. Rings and algebras are
associative and unital by default. We let $n$ be a nonnegative integer, and
$S_{n}$ be the symmetric group of the set $\left[  n\right]  :=\left\{
1,2,\ldots,n\right\}  $. The group algebra $\mathbf{k}\left[  S_{n}\right]  $
of this group will be denoted by $\mathcal{A}$. The notation \textquotedblleft%
$\lambda\vdash n$\textquotedblright\ means \textquotedblleft$\lambda$ is a
partition of $n$\textquotedblright. Young diagrams will be drawn in English
notation. The Specht modules (i.e., the irreducible representations of $S_{n}%
$, constructed using polytabloids) are denoted by $\mathcal{S}^{\lambda}$,
where $\lambda\vdash n$. Further notations will be introduced along the way.

\section{\label{sec.gen}General theorems on multiplicity-free left ideals}

In this section, we shall study the general behavior of a multiplicity-free
representation of $S_{n}$. Many of our results are likely to be implicit in
the literature in some form or another, and generalize far beyond the case of
$S_{n}$-representations, but we will restrict ourselves to $S_{n}$ to keep our focus.

\subsection{Basics and definitions}

Let $\mathbf{k}$ be a field of characteristic $0$. Let $n\in\mathbb{N}$.
Consider the symmetric group $S_{n}$. Let $\mathcal{A}$ be the group algebra
$\mathbf{k}\left[  S_{n}\right]  $. Thus, the representations of $S_{n}$ over
$\mathbf{k}$ are the left $\mathcal{A}$-modules. We recall that the
$\mathbf{k}$-algebra $\mathcal{A}$ is semisimple; thus, each representation of
$S_{n}$ can be decomposed as a direct sum of irreducible representations. The
irreducible representations of $S_{n}$ are (up to isomorphism) the Specht
modules $\mathcal{S}^{\lambda}$ corresponding to the partitions $\lambda$ of
$n$. Moreover, each Specht module $\mathcal{S}^{\lambda}$ is absolutely
irreducible, i.e., satisfies%
\begin{equation}
\operatorname*{End}\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda
}\right)  \cong\mathbf{k} \label{eq.EndSlam}%
\end{equation}
as $\mathbf{k}$-algebras. For the proofs of these facts, see
\cite[\S 5.12--\S 5.13]{EGHetc11} (or combine \cite[Proposition 4.5]{s2b3}
with the fact that a $\mathbf{k}$-algebra that is $\cong\mathbf{k}$ as a
$\mathbf{k}$-module must also be $\cong\mathbf{k}$ as a $\mathbf{k}$-algebra).
Note that every irreducible representation $I$ of $S_{n}$ is isomorphic to
some Specht module $\mathcal{S}^{\lambda}$, and thus satisfies%
\begin{equation}
\operatorname*{End}\nolimits_{\mathcal{A}}I\cong\mathbf{k} \label{eq.EndI=k}%
\end{equation}
as $\mathbf{k}$-algebras (by (\ref{eq.EndSlam})).

A representation of $S_{n}$ is said to be \emph{multiplicity-free} if it can
be written as a direct sum of mutually non-isomorphic irreducible
representations of $S_{n}$. Well-known examples of multiplicity-free
representations are the restrictions $\operatorname*{Res}\nolimits_{S_{n}%
}^{S_{n+1}}\mathcal{S}^{\lambda}$ of Specht modules $\mathcal{S}^{\lambda}$ of
the symmetric group $S_{n+1}$ (see, e.g., \cite[Theorem 2.8.3 part 1]%
{Sagan01}). We shall, however, focus on a different source of examples.

Let $J$ be a left ideal of the group algebra $\mathcal{A}$ such that $J$ is
multiplicity-free as a representation of $S_{n}$ (that is, $J$ can be written
as a direct sum of mutually non-isomorphic irreducible representations of
$S_{n}$). We shall derive some properties of $J$ from this. In
Section~\ref{sec.dyadic}, these will be used to provide an alternative proof
of \cite[Theorem 1.6]{RSW}; they also yield some stronger claims along the
same lines.

\subsection{Commutativity of $\operatorname*{End}\nolimits_{\mathcal{A}}J$ and
$J\left[  J,J\right]  =0$}

The following is a well-known property of multiplicity-free modules over a
split semisimple algebra:

\begin{proposition}
\label{prop.mf1}The endomorphism algebra $\operatorname*{End}%
\nolimits_{\mathcal{A}}J$ is commutative.
\end{proposition}

\begin{proof}
Let $J=I_{1}\oplus I_{2}\oplus\cdots\oplus I_{k}$ be a decomposition of $J$
into irreducible subrepresentations (this exists because $\mathcal{A}$ is
semisimple). Then, these irreducible addends $I_{1},I_{2},\ldots,I_{k}$ are
pairwise non-isomorphic (since $J$ is multiplicity-free), and thus their
pairwise hom-spaces $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
I_{a},I_{b}\right)  $ with $a\neq b$ are all $0$ (by Schur's lemma or by
\cite[Proposition 4.5]{s2b3}). Hence, the canonical \textquotedblleft
block-diagonal\textquotedblright\ embedding%
\begin{align*}
\prod_{j=1}^{k}\operatorname*{End}\nolimits_{\mathcal{A}}\left(  I_{j}\right)
&  \rightarrow\operatorname*{End}\nolimits_{\mathcal{A}}\left(  I_{1}\oplus
I_{2}\oplus\cdots\oplus I_{k}\right)  ,\\
\left(  f_{1},f_{2},\ldots,f_{k}\right)   &  \mapsto\left(
\begin{array}
[c]{cccc}%
f_{1} & 0 & \cdots & 0\\
0 & f_{2} & \cdots & 0\\
\vdots & \vdots & \ddots & \vdots\\
0 & 0 & \cdots & f_{k}%
\end{array}
\right)
\end{align*}
is surjective (since any endomorphism $f\in\operatorname*{End}%
\nolimits_{\mathcal{A}}\left(  I_{1}\oplus I_{2}\oplus\cdots\oplus
I_{k}\right)  $ that does not lie in its image would have a nontrivial
projection on some $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
I_{a},I_{b}\right)  $ with $a\neq b$). Therefore, this embedding is a
$\mathbf{k}$-algebra isomorphism. Thus, we have the $\mathbf{k}$-algebra
isomorphism%
\[
\operatorname*{End}\nolimits_{\mathcal{A}}J=\operatorname*{End}%
\nolimits_{\mathcal{A}}\left(  I_{1}\oplus I_{2}\oplus\cdots\oplus
I_{k}\right)  \cong\prod_{j=1}^{k}\underbrace{\operatorname*{End}%
\nolimits_{\mathcal{A}}\left(  I_{j}\right)  }_{\substack{\cong\mathbf{k}%
\\\text{(by (\ref{eq.EndI=k}))}}}\cong\prod_{j=1}^{k}\mathbf{k}=\mathbf{k}%
^{k}.
\]
Hence, $\operatorname*{End}\nolimits_{\mathcal{A}}J$ is commutative, since
$\mathbf{k}^{k}$ is commutative.
\end{proof}

Recall that the \emph{commutator} $\left[  u,v\right]  $ of two elements
$u,v\in\mathcal{A}$ is defined to be $uv-vu\in\mathcal{A}$. Furthermore, the
\emph{commutator} $\left[  U,V\right]  $ of two $\mathbf{k}$-vector subspaces
$U$ and $V$ of $\mathcal{A}$ is defined to be the span of all commutators
$\left[  u,v\right]  =uv-vu$ with $u\in U$ and $v\in V$.

\begin{theorem}
\label{thm.mf2}We have $J\left[  J,J\right]  =0$.
\end{theorem}

\begin{proof}
It suffices to show that $J\left[  a,b\right]  =0$ for all $a\in J$ and $b\in
J$. So let us fix $a\in J$ and $b\in J$. Consider the map%
\begin{align*}
\rho_{a}:J  &  \rightarrow J,\\
x  &  \mapsto xa.
\end{align*}
This map $\rho_{a}$ is well-defined (since $a\in J$ and since $J$ is a left
ideal), and is left $\mathcal{A}$-linear. Thus, $\rho_{a}\in
\operatorname*{End}\nolimits_{\mathcal{A}}J$. Similarly, we can define an
analogous map $\rho_{b}\in\operatorname*{End}\nolimits_{\mathcal{A}}J$ that
sends each $x$ to $xb$. Since $\operatorname*{End}\nolimits_{\mathcal{A}}J$ is
commutative (by Proposition \ref{prop.mf1}), we have $\rho_{a}\circ\rho
_{b}=\rho_{b}\circ\rho_{a}$. In other words, each $x\in J$ satisfies
$xab=xba$. In other words, each $x\in J$ satisfies $x\left[  a,b\right]  =0$.
In other words, $J\left[  a,b\right]  =0$. This proves Theorem \ref{thm.mf2}.
\end{proof}

Note that the \textquotedblleft opposite\textquotedblright\ version $\left[
J,J\right]  J=0$ of Theorem \ref{thm.mf2} does not hold in general.

\begin{remark}
More generally, Proposition \ref{prop.mf1} and Theorem \ref{thm.mf2} hold if
we replace $\mathcal{A}$ by any finite-dimensional semisimple $\mathbf{k}%
$-algebra $R$ with the property that all irreducible left $R$-modules satisfy
(\ref{eq.EndI=k}). Indeed, the above proofs use nothing but the conditions we
just listed.
\end{remark}

\subsection{Left ideals of $\mathcal{A}$ are generated by idempotents}

For our next vanishing commutator theorem, we need a general property of left
ideals of group algebras (\cite[Theorem (3.7.A)]{Weyl53}):

\begin{theorem}
\label{thm.mf-li-gen}Let $G$ be a finite group. Let $I$ be a left ideal of the
group algebra $\mathbf{k}\left[  G\right]  $. (We are still assuming that
$\mathbf{k}$ has characteristic $0$.) Then, there exists an idempotent $e\in
I$ such that $I=\mathbf{k}\left[  G\right]  \cdot e$.
\end{theorem}

\begin{proof}
A left ideal of $\mathbf{k}\left[  G\right]  $ is a left $G$-subrepresentation
of $\mathbf{k}\left[  G\right]  $. Thus, Maschke's theorem shows that there is
a left $\mathbf{k}\left[  G\right]  $-linear projection $\pi:\mathbf{k}\left[
G\right]  \rightarrow I$. Consider this $\pi$.

Set $e:=\pi\left(  1\right)  $ (where $1$ is the unity of $\mathbf{k}\left[
G\right]  $). Then, $e\in I$. Thus, $\mathbf{k}\left[  G\right]  \cdot
e\subseteq I$ (since $I$ is a left ideal). Furthermore, each $x\in I$
satisfies%
\begin{align*}
x  &  =\pi\left(  x\right)  \ \ \ \ \ \ \ \ \ \ \left(  \text{since }\pi\text{
is a projection onto }I\right) \\
&  =\pi\left(  x1\right)  =x\underbrace{\pi\left(  1\right)  }_{=e}%
\ \ \ \ \ \ \ \ \ \ \left(  \text{since }\pi\text{ is left }\mathbf{k}\left[
G\right]  \text{-linear}\right) \\
&  =xe.
\end{align*}
Applying this to $x=e$, we find $e=ee$. This shows that $e$ is idempotent.
Furthermore, $I\subseteq\mathbf{k}\left[  G\right]  \cdot e$ (since we showed
that each $x\in I$ satisfies $x=xe\in\mathbf{k}\left[  G\right]  \cdot e$).
Combined with $\mathbf{k}\left[  G\right]  \cdot e\subseteq I$, this yields
$I=\mathbf{k}\left[  G\right]  \cdot e$. Thus, the proof of Theorem
\ref{thm.mf-li-gen} is complete.
\end{proof}

\begin{remark}
More generally, Theorem \ref{thm.mf-li-gen} holds if we replace $\mathbf{k}%
\left[  G\right]  $ by any finite-dimensional semisimple $\mathbf{k}$-algebra
(since Maschke's theorem is a general property of such algebras).
\end{remark}

\subsection{Commutativity of $J^{\ast}J$ and of $K^{\ast}J$}

Next, let us consider the \emph{antipode} of the group algebra $\mathbf{k}%
\left[  S_{n}\right]  $. This is the $\mathbf{k}$-linear map from
$\mathbf{k}\left[  S_{n}\right]  $ to $\mathbf{k}\left[  S_{n}\right]  $ that
sends each $w\in S_{n}$ to $w^{-1}$. This antipode will be denoted by
$x\mapsto x^{\ast}$. It is known to be an involutive $\mathbf{k}$-algebra
anti-automorphism (see \cite[\S 3.11.4]{sga}, where this map is denoted by
$S$). We let $J^{\ast}$ denote the image of the left ideal $J$ under this
antipode. We now claim the following:

\begin{theorem}
\label{thm.mf3}We have $\left[  J^{\ast}J,\ J^{\ast}J\right]  =0$.
\end{theorem}

More generally:

\begin{theorem}
\label{thm.mf3gen}Let $K$ be a further left ideal of $\mathcal{A}$ that is
multiplicity-free as a representation of $S_{n}$. Then, $\left[  K^{\ast
}J,\ K^{\ast}J\right]  =0$.
\end{theorem}

Before we prove this, we pave our way with several well-intended lemmas. The
first is a well-known basic fact about rings (see, e.g., \cite[Lemma
5.13.4]{EGHetc11}):

\begin{lemma}
\label{lem.etingof-idp}Let $R$ be a ring, and let $M$ be a left $R$-module.
Let $e\in R$ be idempotent. Then, $\operatorname*{Hom}\nolimits_{R}\left(
Re,M\right)  \cong eM$ as abelian groups (where $\operatorname*{Hom}%
\nolimits_{R}$ denotes the hom-space of left $R$-modules).

Moreover, if $R$ is a $\mathbf{k}$-algebra, then this isomorphism is an
isomorphism of $\mathbf{k}$-vector spaces.
\end{lemma}

\begin{proof}
[Proof of Lemma \ref{lem.etingof-idp} (sketched).]It is easy to see that the
maps
\begin{align*}
\operatorname*{Hom}\nolimits_{R}\left(  Re,M\right)   &  \rightarrow eM,\\
f  &  \mapsto f\left(  e\right)
\end{align*}
and%
\begin{align*}
eM  &  \rightarrow\operatorname*{Hom}\nolimits_{R}\left(  Re,M\right)  ,\\
x  &  \mapsto\left(  Re\rightarrow M,\ r\mapsto rx\right)
\end{align*}
are $\mathbb{Z}$-linear and mutually inverse. Thus, they are isomorphisms of
abelian groups. When $R$ is a $\mathbf{k}$-algebra, they are furthermore
$\mathbf{k}$-linear and thus are isomorphisms of $\mathbf{k}$-vector spaces.
\end{proof}

\begin{lemma}
\label{lem.JJ-invar}Let $V$ and $W$ be two left ideals of $\mathcal{A}$ that
are non-isomorphic and irreducible as representations of $S_{n}$. Then, $VW=0$.
\end{lemma}

\begin{proof}
The irreducible representations $V$ and $W$ are non-isomorphic. Hence, Schur's
lemma shows that $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
V,W\right)  =0$. In particular, for any $x\in W$, the map%
\begin{align*}
V  &  \rightarrow W,\\
y  &  \mapsto yx
\end{align*}
must be $0$ (since this map is left $\mathcal{A}$-linear and thus belongs to
$\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  V,W\right)  =0$). But this
means that $yx=0$ for all $y\in V$ and $x\in W$. In other words, $VW=0$, qed.
\end{proof}

\begin{lemma}
\label{lem.Ae*irr}Let $e\in\mathcal{A}$ be an idempotent such that the left
$\mathcal{A}$-module $\mathcal{A}e$ is irreducible. Then, the left
$\mathcal{A}$-module $\mathcal{A}e^{\ast}$ is also irreducible.
\end{lemma}

\begin{proof}
Assume the contrary. Then, the left $\mathcal{A}$-module $\mathcal{A}e^{\ast}$
is not irreducible.

First, we observe that $\mathcal{A}e\neq0$ (since $\mathcal{A}e$ is
irreducible), so that $e\neq0$ and therefore $e^{\ast}\neq0$ (since the
antipode map $\mathcal{A}\rightarrow\mathcal{A},\ x\mapsto x^{\ast}$ is
bijective). Therefore, $\mathcal{A}e^{\ast}\neq0$.

But we assumed that $\mathcal{A}e^{\ast}$ is not irreducible. Hence,
$\mathcal{A}e^{\ast}$ is not indecomposable (by the Maschke theorem). Since
$\mathcal{A}e^{\ast}\neq0$, this shows that $\mathcal{A}e^{\ast}$ is a direct
sum $P\oplus Q$ of two nontrivial left $\mathcal{A}$-submodules $P$ and $Q$.
Consequently, the hom-space $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{A}e^{\ast},\mathcal{A}e^{\ast}\right)  $ has dimension $\geq2$ (since
it contains at least the projections onto $P$ and $Q$).

But Lemma \ref{lem.etingof-idp} (applied to $R=\mathcal{A}$ and $M=\mathcal{A}%
e$) yields $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{A}%
e,\ \mathcal{A}e\right)  \cong e\mathcal{A}e$. Thus,
\[
e\mathcal{A}e\cong\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{A}e,\ \mathcal{A}e\right)  =\operatorname*{End}\nolimits_{\mathcal{A}%
}\left(  \mathcal{A}e\right)  \cong\mathbf{k}%
\]
(by (\ref{eq.EndI=k}), applied to $V=\mathcal{A}e$, since $\mathcal{A}e$ is irreducible).

Applying the antipode anti-isomorphism $\mathcal{A}\rightarrow\mathcal{A}%
,\ x\mapsto x^{\ast}$ to $e\mathcal{A}e\cong\mathbf{k}$, we obtain $e^{\ast
}\mathcal{A}e^{\ast}\cong\mathbf{k}$. Furthermore, $e$ is idempotent, so that
$e^{\ast}$ is idempotent as well (since $x\mapsto x^{\ast}$ is an algebra
anti-morphism). Thus, Lemma \ref{lem.etingof-idp} yields $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  \mathcal{A}e^{\ast},\ \mathcal{A}e^{\ast
}\right)  \cong e^{\ast}\mathcal{A}e^{\ast}\cong\mathbf{k}$, so that
$\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{A}%
e^{\ast},\ \mathcal{A}e^{\ast}\right)  \right)  =\dim\mathbf{k}=1$. This
contradicts the fact that the hom-space $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  \mathcal{A}e^{\ast},\mathcal{A}e^{\ast}\right)
$ has dimension $\geq2$. This contradiction shows that our assumption was
false. Hence, Lemma \ref{lem.Ae*irr} is proved.
\end{proof}

\begin{lemma}
\label{lem.V*W-dim}Let $V$ and $W$ be two left ideals of $\mathcal{A}$ that
are irreducible as representations of $S_{n}$. Then, $\dim\left(  V^{\ast
}W\right)  \leq1$.
\end{lemma}

\begin{proof}
We know that $V$ is a left ideal of the algebra $\mathcal{A}=\mathbf{k}\left[
S_{n}\right]  $. Hence, Theorem \ref{thm.mf-li-gen} (applied to $G=S_{n}$ and
$I=V$) shows that there exists an idempotent $e\in V$ such that $V=\mathbf{k}%
\left[  S_{n}\right]  \cdot e$. Consider this $e$. Then, $V=\mathbf{k}\left[
S_{n}\right]  \cdot e=\mathcal{A}e$. Hence, the left $\mathcal{A}$-module
$\mathcal{A}e$ is irreducible (since $V$ is irreducible). Thus, Lemma
\ref{lem.Ae*irr} yields that the left $\mathcal{A}$-module $\mathcal{A}%
e^{\ast}$ is irreducible.

The element $e$ is idempotent. Hence, $e^{\ast}$ is idempotent as well (since
the antipode $\mathcal{A}\rightarrow\mathcal{A},\ x\mapsto x^{\ast}$ is an
algebra anti-morphism). Therefore, Lemma \ref{lem.etingof-idp} (applied to
$\mathcal{A}$, $W$ and $e^{\ast}$ instead of $R$, $M$ and $e$) yields
$\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{A}e^{\ast
},\ W\right)  \cong e^{\ast}W$ as $\mathbf{k}$-vector spaces.

But all irreducible left $\mathcal{A}$-modules $I$ satisfy
$\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  I,I\right)
=\operatorname*{End}\nolimits_{\mathcal{A}}I\cong\mathbf{k}$ (by
(\ref{eq.EndI=k})) and thus $\dim\left(  \operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  I,I\right)  \right)  =1$. Hence, any two
irreducible left $\mathcal{A}$-modules $P$ and $Q$ satisfy $\dim\left(
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  P,Q\right)  \right)  \leq1$
(indeed, $\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
P,Q\right)  \right)  $ is $1$ if $P\cong Q$ and $0$ otherwise). Thus, in
particular, $\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{A}e^{\ast},\ W\right)  \right)  \leq1$ (since $\mathcal{A}e^{\ast}$
and $W$ are two irreducible left $\mathcal{A}$-modules). In view of
$\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{A}e^{\ast
},\ W\right)  \cong e^{\ast}W$, we can rewrite this as $\dim\left(  e^{\ast
}W\right)  \leq1$.

However, $\mathcal{A}W=W$ (since $W$ is a left ideal of $\mathcal{A}$). From
$V=\mathcal{A}e$, we obtain $V^{\ast}=\left(  \mathcal{A}e\right)  ^{\ast
}=e^{\ast}\mathcal{A}$, so that $V^{\ast}W=e^{\ast}\underbrace{\mathcal{A}%
W}_{=W}=e^{\ast}W$. Thus, $\dim\left(  V^{\ast}W\right)  =\dim\left(  e^{\ast
}W\right)  \leq1$. This proves Lemma \ref{lem.V*W-dim}.
\end{proof}

\begin{proof}
[Proof of Theorem \ref{thm.mf3gen}.]Decompose the $S_{n}$-representation $J$
as a direct sum of irreducible subrepresentations:%
\[
J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}.
\]
These subrepresentations $J_{1},J_{2},\ldots,J_{k}$ are pairwise
non-isomorphic (since $J$ is multiplicity-free). Likewise, decompose the
$S_{n}$-representation $K$ as a direct sum of pairwise non-isomorphic
irreducible subrepresentations:%
\[
K=K_{1}\oplus K_{2}\oplus\cdots\oplus K_{\ell}.
\]


Now, we have%
\begin{equation}
J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}=\sum_{i=1}^{k}J_{i}
\label{pf.thm.mf3gen.J=}%
\end{equation}
and similarly%
\[
K=\sum_{j=1}^{\ell}K_{j}.
\]
The latter equality entails (by applying the antipode to both sides)%
\begin{equation}
K^{\ast}=\sum_{j=1}^{\ell}K_{j}^{\ast}. \label{pf.thm.mf3gen.K=}%
\end{equation}
Multiplying this with (\ref{pf.thm.mf3gen.J=}), we find%
\[
K^{\ast}J=\left(  \sum_{j=1}^{\ell}K_{j}^{\ast}\right)  \left(  \sum_{i=1}%
^{k}J_{i}\right)  =\sum_{j,i}K_{j}^{\ast}J_{i}.
\]
Hence, in order to achieve our goal (of showing that $\left[  K^{\ast
}J,\ K^{\ast}J\right]  =0$), it shall suffice to prove that%
\begin{equation}
\left[  K_{j}^{\ast}J_{i},\ K_{v}^{\ast}J_{u}\right]  =0
\label{pf.thm.mf3gen.subg}%
\end{equation}
for all $i,u\in\left\{  1,2,\ldots,k\right\}  $ and $j,v\in\left\{
1,2,\ldots,\ell\right\}  $. So let us prove this.

Fix $i,u\in\left\{  1,2,\ldots,k\right\}  $ and $j,v\in\left\{  1,2,\ldots
,\ell\right\}  $. We must prove (\ref{pf.thm.mf3gen.subg}). Our proof depends
on some cases:

\begin{itemize}
\item \textit{Case 1:} Assume that $i\neq u$. Then, the left $\mathcal{A}%
$-modules $J_{i}$ and $J_{u}$ are not isomorphic (since $J_{1},J_{2}%
,\ldots,J_{k}$ are pairwise non-isomorphic). Thus, Lemma \ref{lem.JJ-invar}
(applied to $V=J_{i}$ and $W=J_{u}$) yields $J_{i}J_{u}=0$. The same argument
(with the roles of $i$ and $u$ interchanged) yields $J_{u}J_{i}=0$. Now,%
\[
\left[  K_{j}^{\ast}J_{i},\ K_{v}^{\ast}J_{u}\right]  \subseteq K_{j}^{\ast
}J_{i}\underbrace{K_{v}^{\ast}J_{u}}_{\substack{\subseteq\mathcal{A}%
J_{u}=J_{u}\\\text{(since }J_{u}\text{ is a}\\\text{left ideal)}}%
}+\,K_{v}^{\ast}J_{u}\underbrace{K_{j}^{\ast}J_{i}}_{\substack{\subseteq
\mathcal{A}J_{i}=J_{i}\\\text{(since }J_{i}\text{ is a}\\\text{left ideal)}%
}}\subseteq K_{j}^{\ast}\underbrace{J_{i}J_{u}}_{=0}+\,K_{v}^{\ast
}\underbrace{J_{u}J_{i}}_{=0}=0.
\]
In other words, $\left[  K_{j}^{\ast}J_{i},\ K_{v}^{\ast}J_{u}\right]  =0$.
Hence, (\ref{pf.thm.mf3gen.subg}) is proved in Case 1.

\item \textit{Case 2:} Assume that $j\neq v$. Then, the left $\mathcal{A}%
$-modules $K_{j}$ and $K_{v}$ are not isomorphic (since $K_{1},K_{2}%
,\ldots,K_{\ell}$ are pairwise non-isomorphic). Hence, ma \ref{lem.JJ-invar}
(applied to $V=K_{j}$ and $W=K_{v}$) yields $K_{j}K_{v}=0$. Applying the
antipode (i.e., the algebra anti-automorphism $a\mapsto a^{\ast}$) to this
equality, we obtain $K_{j}^{\ast}K_{v}^{\ast}=0$ (since $\left(  K_{j}%
K_{v}\right)  ^{\ast}=K_{v}^{\ast}K_{j}^{\ast}$). The same argument (with the
roles of $j$ and $v$ interchanged) yields $K_{v}^{\ast}K_{j}^{\ast}=0$. Note
that $K_{j}$ is a left ideal of $\mathcal{A}$; hence, $\mathcal{A}K_{j}=K_{j}%
$. Applying the antipode to this equality, we obtain $K_{j}^{\ast}%
\mathcal{A}=K_{j}^{\ast}$. Similarly, $K_{v}^{\ast}\mathcal{A}=K_{v}^{\ast}$.
Now,%
\[
\left[  K_{j}^{\ast}J_{i},\ K_{v}^{\ast}J_{u}\right]  \subseteq
\underbrace{K_{j}^{\ast}J_{i}}_{\subseteq K_{j}^{\ast}\mathcal{A}=K_{j}^{\ast
}}K_{v}^{\ast}J_{u}+\underbrace{K_{v}^{\ast}J_{u}}_{\subseteq K_{v}^{\ast
}\mathcal{A}=K_{v}^{\ast}}K_{j}^{\ast}J_{i}\subseteq\underbrace{K_{j}^{\ast
}K_{v}^{\ast}}_{=0}J_{u}+\underbrace{K_{v}^{\ast}K_{j}^{\ast}}_{=0}J_{i}=0.
\]
In other words, $\left[  K_{j}^{\ast}J_{i},\ K_{v}^{\ast}J_{u}\right]  =0$.
Hence, (\ref{pf.thm.mf3gen.subg}) is proved in Case 2.

\item \textit{Case 3:} Assume that $i=u$ and $j=v$. Lemma \ref{lem.V*W-dim}
(applied to $V=K_{j}$ and $W=J_{i}$) yields that $\dim\left(  K_{j}^{\ast
}J_{i}\right)  \leq1$ (since $K_{j}$ and $J_{i}$ are left ideals that are
irreducible as representations of $S_{n}$). Thus, any two elements of
$K_{j}^{\ast}J_{i}$ are scalar multiples of each other (unless one of them is
$0$), and therefore commute. In other words, $\left[  K_{j}^{\ast}%
J_{i},\ K_{j}^{\ast}J_{i}\right]  =0$. Since $i=u$ and $j=v$, we can rewrite
this as $\left[  K_{j}^{\ast}J_{i},\ K_{v}^{\ast}J_{u}\right]  =0$. Hence,
(\ref{pf.thm.mf3gen.subg}) is proved in Case 3.
\end{itemize}

We have now proved (\ref{pf.thm.mf3gen.subg}) in all three cases. Hence,
(\ref{pf.thm.mf3gen.subg}) always holds. As explained above, this yields
$\left[  K^{\ast}J,\ K^{\ast}J\right]  =0$ and thus proves Theorem
\ref{thm.mf3gen}.
\end{proof}

\begin{remark}
More generally, Theorem \ref{thm.mf3gen} and the lemmas we used in its proof
hold if we replace $\mathcal{A}$ by any finite-dimensional semisimple
$\mathbf{k}$-algebra $R$ with the property that all irreducible left
$R$-modules satisfy (\ref{eq.EndI=k}) and with an algebra anti-automorphism
$R\rightarrow R,\ a\mapsto a^{\ast}$.
\end{remark}

\begin{noncompile}
OLD\ PROOF\ OF THEOREM \ref{thm.mf3} ASSUMING\ THAT $\mathbf{k}$ IS\ AN\ ORDERED\ FIELD:

Theorem \ref{thm.mf-li-gen} (applied to $G=S_{n}$ and $I=J$) shows that there
exists an idempotent $e\in J$ such that $J=\mathbf{k}\left[  S_{n}\right]
\cdot e$. Consider this $e$. Thus, $J=\mathbf{k}\left[  S_{n}\right]  \cdot
e=\mathcal{A}e$, so that $J^{\ast}=\left(  \mathcal{A}e\right)  ^{\ast
}=e^{\ast}\mathcal{A}$. Hence, $J^{\ast}J=\left(  e^{\ast}\mathcal{A}\right)
\left(  \mathcal{A}e\right)  =e^{\ast}\mathcal{A}e$. Thus, we must prove that
$\left[  e^{\ast}\mathcal{A}e,\ e^{\ast}\mathcal{A}e\right]  =0$. In other
words, we must prove that $\left[  e^{\ast}xe,\ e^{\ast}ye\right]  =0$ for all
$x,y\in\mathcal{A}$.

So let $x,y\in\mathcal{A}$. Then, $\underbrace{e}_{\in J}\left[
\underbrace{e^{\ast}xe}_{\in J},\ \underbrace{e^{\ast}ye}_{\in J}\right]  \in
J\left[  J,\ J\right]  =0$ (by Theorem \ref{thm.mf2}). Hence, $e\left[
e^{\ast}xe,\ e^{\ast}ye\right]  =0$. Thus,%
\begin{align}
0  &  =e\left[  e^{\ast}xe,\ e^{\ast}ye\right] \nonumber\\
&  =ee^{\ast}xee^{\ast}ye-ee^{\ast}yee^{\ast}xe\nonumber\\
&  =ee^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e. \label{pf.thm.mf3.4}%
\end{align}


However, $\mathbf{k}$ is an ordered field; thus, a known fact says that every
nonzero $z\in\mathcal{A}$ satisfies $z^{\ast}z\neq0$ (since the $1$%
-coefficient of $z^{\ast}z$ is the sum of the squares of all coefficients of
$z$). Applying this to $z=e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e$,
we see that if $e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e$ is nonzero,
then $\left(  e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e\right)
^{\ast}e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e\neq0$, which
contradicts%
\begin{align*}
&  \left(  e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e\right)  ^{\ast
}e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e\\
&  =e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  ^{\ast}%
\underbrace{ee^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e}%
_{\substack{=0\\\text{(by (\ref{pf.thm.mf3.4}))}}}=0.
\end{align*}
Hence, $e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e$ must be $0$. Thus,%
\[
0=e^{\ast}\left(  xee^{\ast}y-yee^{\ast}x\right)  e=e^{\ast}xee^{\ast
}ye-e^{\ast}yee^{\ast}xe=\left[  e^{\ast}xe,\ e^{\ast}ye\right]  .
\]
In other words, $\left[  e^{\ast}xe,\ e^{\ast}ye\right]  =0$. Theorem
\ref{thm.mf3} is thus proved.
\end{noncompile}

\subsection{Some $1$-dimensional spaces}

Recall that $J$ is a left ideal of $\mathcal{A}$ that is multiplicity-free as
an $S_{n}$-representation.

\begin{lemma}
\label{lem.HomJK=1}Let $K$ be an irreducible representation of $S_{n}$. Then,
$\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J,K\right)
\right)  \leq1$.
\end{lemma}

\begin{proof}
Decompose the $S_{n}$-representation $J$ as a direct sum of irreducible
subrepresentations:%
\[
J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}.
\]
These subrepresentations $J_{1},J_{2},\ldots,J_{k}$ are pairwise
non-isomorphic (since $J$ is multiplicity-free). Hence, at most one of them is
isomorphic to $K$. In other words, we are in one of the following two cases:

\textit{Case 1:} There is a unique $i\in\left\{  1,2,\ldots,k\right\}  $ such
that $J_{i}\cong K$.

\textit{Case 2:} There is no $i\in\left\{  1,2,\ldots,k\right\}  $ such that
$J_{i}\cong K$.

Let us first consider Case 1. In this case, there is a unique $i\in\left\{
1,2,\ldots,k\right\}  $ such that $J_{i}\cong K$. Consider this $i$. Now, from
$J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}$ and $K\cong J_{i}$, we obtain%
\begin{align*}
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J,K\right)   &
\cong\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J_{1}\oplus J_{2}%
\oplus\cdots\oplus J_{k},\ J_{i}\right) \\
&  \cong\bigoplus_{r=1}^{k}\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
J_{r},J_{i}\right)  \cong\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
J_{i},J_{i}\right)
\end{align*}
(since the irreducible representations $J_{1},J_{2},\ldots,J_{k}$ are pairwise
non-isomorphic, and thus the Schur lemma shows that $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  J_{r},J_{i}\right)  =0$ for all $r\neq i$).
Thus,%
\[
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J,K\right)  \cong%
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J_{i},J_{i}\right)
\cong\operatorname*{End}\nolimits_{\mathcal{A}}\left(  J_{i}\right)
\cong\mathbf{k}%
\]
(by (\ref{eq.EndI=k})) and therefore $\dim\left(  \operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  J,K\right)  \right)  =\dim\mathbf{k}=1$. Hence,
Lemma \ref{lem.HomJK=1} is proved in Case 1.

Let us now consider Case 2. In this case, there is no $i\in\left\{
1,2,\ldots,k\right\}  $ such that $J_{i}\cong K$. Hence, each $i\in\left\{
1,2,\ldots,k\right\}  $ satisfies $J_{i}\ncong K$ and therefore
$\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J_{i},K\right)  =0$ (by the
Schur lemma, since both representations $J_{i}$ and $K$ are irreducible). Now,
from $J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}$, we obtain%
\[
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J,K\right)  \cong%
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J_{1}\oplus J_{2}%
\oplus\cdots\oplus J_{k},\ K\right)  \cong\bigoplus_{i=1}^{k}%
\underbrace{\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J_{i},K\right)
}_{\substack{=0\\\text{(as we just saw)}}}=0.
\]
Hence, $\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
J,K\right)  \right)  =0\leq1$. Thus, Lemma \ref{lem.HomJK=1} is proved in Case
2. Thus the proof of Lemma \ref{lem.HomJK=1} is complete.
\end{proof}

\begin{lemma}
\label{lem.dimaK1}Let $K$ be an irreducible representation of $S_{n}$. Let
$a\in J$. Then, $\dim\left(  aK\right)  \leq1$.
\end{lemma}

\begin{proof}
Theorem \ref{thm.mf-li-gen} (applied to $G=S_{n}$ and $I=J$) shows that there
exists an idempotent $e\in J$ such that $J=\mathcal{A}e$. Consider this $e$.

Now, from $a\in J=\mathcal{A}e$, we obtain $a=be$ for some $b\in\mathcal{A}$,
and thus%
\[
\dim\left(  aK\right)  =\dim\left(  beK\right)  \leq\dim\left(  eK\right)
\]
(since there is a surjective $\mathbf{k}$-linear map $eK\rightarrow beK$
sending each vector $v\in eK$ to $bv\in beK$).

But $J=\mathcal{A}e$, and thus $\operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  J,K\right)  =\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{A}e,K\right)  \cong eK$ by Lemma \ref{lem.etingof-idp}. Hence,
\begin{equation}
\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  J,K\right)
\right)  =\dim\left(  eK\right)  . \label{pf.lem.dimaK1.4}%
\end{equation}
But Lemma \ref{lem.HomJK=1} yields $\dim\left(  \operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  J,K\right)  \right)  \leq1$. In view of
(\ref{pf.lem.dimaK1.4}), we can rewrite this as $\dim\left(  eK\right)  \leq
1$. Hence, $\dim\left(  aK\right)  \leq\dim\left(  eK\right)  \leq1$. This
proves Lemma \ref{lem.dimaK1}.
\end{proof}

\begin{remark}
More generally, Lemma \ref{lem.HomJK=1} and Lemma \ref{lem.dimaK1} hold if we
replace $\mathcal{A}$ by any finite-dimensional semisimple $\mathbf{k}%
$-algebra $R$ with the property that all irreducible left $R$-modules satisfy
(\ref{eq.EndI=k}).
\end{remark}

\subsection{Closer study of $J^{\ast}J$}

We continue with our study of an arbitrary multiplicity-free left ideal $J$ of
$\mathcal{A}$. First, some general properties of representations of $S_{n}$
need to be proved. Recall that if $V$ and $W$ are two representations of
$S_{n}$, then a $\mathbf{k}$-bilinear map $f:V\times W\rightarrow U$ into some
vector space $U$ is said to be $S_{n}$\emph{-invariant} if it satisfies%
\[
f\left(  gv,gw\right)  =f\left(  v,w\right)  \ \ \ \ \ \ \ \ \ \ \text{for all
}g\in S_{n}\text{ and }v\in V\text{ and }w\in W.
\]
We also let $V^{\vee}$ denote the dual space $\operatorname*{Hom}\left(
V,\mathbf{k}\right)  $ of any $\mathbf{k}$-vector space $V$ (since the
notation $V^{\ast}$ is already taken for something else). If $V$ is a
representation of $S_{n}$, then $V^{\vee}$ canonically becomes a
representation of $S_{n}$ as well (known as the \emph{(contragredient) dual}
of $V$). See \cite[\S 5.19.3]{sga} for details (but beware that $V^{\vee}$ is
denoted by $V^{\ast}$ there).

Recall that if $V$ and $W$ are two $\mathbf{k}$-vector spaces, then any
$\mathbf{k}$-bilinear form $f:V\times W\rightarrow\mathbf{k}$ gives rise to
two $\mathbf{k}$-linear maps%
\begin{align*}
f_{L}:V  &  \rightarrow W^{\vee},\\
v  &  \mapsto\left(  \text{the }\mathbf{k}\text{-linear map }W\rightarrow
\mathbf{k},\ w\mapsto f\left(  v,w\right)  \right)
\end{align*}
and%
\begin{align*}
f_{R}:W  &  \rightarrow V^{\vee},\\
w  &  \mapsto\left(  \text{the }\mathbf{k}\text{-linear map }V\rightarrow
\mathbf{k},\ v\mapsto f\left(  v,w\right)  \right)
\end{align*}
(see \cite[\S 5.19.2]{sga}). If these two maps $f_{L}$ and $f_{R}$ are
bijective (thus $\mathbf{k}$-vector space isomorphisms), the form $f$ is said
to be \emph{nondegenerate}. Note that when $V$ and $W$ are finite-dimensional,
this nondegeneracy is equivalent to saying that $\operatorname*{Ker}f_{L}=0$
and $\operatorname*{Ker}f_{R}=0$.

\begin{lemma}
\label{lem.repSn.Hom-form}Let $V$ and $W$ be two finite-dimensional
representations of $S_{n}$ over $\mathbf{k}$. Then, the $S_{n}$-invariant
bilinear forms $f:V\times W\rightarrow\mathbf{k}$ form a vector space, which
is isomorphic to $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
W,\ V^{\vee}\right)  $.
\end{lemma}

\begin{proof}
Clearly, the $S_{n}$-invariant bilinear forms $f:V\times W\rightarrow
\mathbf{k}$ form a vector space. Moreover, the map%
\begin{align*}
\operatorname*{curry}:\left\{  \text{bilinear forms }f:V\times W\rightarrow
\mathbf{k}\right\}   &  \rightarrow\operatorname*{Hom}\nolimits_{\mathbf{k}%
}\left(  W,\ V^{\vee}\right)  ,\\
f  &  \mapsto f_{R}%
\end{align*}
(where $f_{R}$ is defined as above) is easily seen to be a $\mathbf{k}$-vector
space isomorphism (indeed, this is a linear-algebraic version of the standard
\textquotedblleft currying isomorphism\textquotedblright, which turns a
two-variable function into a one-variable function that outputs one-variable
functions). Moreover, it sends the $S_{n}$-invariant bilinear forms $f:V\times
W\rightarrow\mathbf{k}$ into the $S_{n}$-equivariant linear maps $W\rightarrow
V^{\vee}$, that is, into the $\mathcal{A}$-linear maps $W\rightarrow V^{\vee}$
(since $\mathcal{A}=\mathbf{k}\left[  S_{n}\right]  $). This is an
if-and-only-if statement (i.e., only $S_{n}$-invariant $f$'s give rise to
$\mathcal{A}$-linear $f_{R}$'s). Thus, by restricting this isomorphism
$\operatorname*{curry}$, we obtain a $\mathbf{k}$-vector space isomorphism%
\begin{align*}
\left\{  S_{n}\text{-invariant bilinear forms }f:V\times W\rightarrow
\mathbf{k}\right\}   &  \rightarrow\operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  W,\ V^{\vee}\right)  ,\\
f  &  \mapsto f_{R}.
\end{align*}
This proves Lemma \ref{lem.repSn.Hom-form}.
\end{proof}

\begin{noncompile}
OLD\ PROOF: Clearly, the $S_{n}$-invariant bilinear forms $f:V\times
W\rightarrow\mathbf{k}$ form a vector space. This vector space is isomorphic
to the vector space $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
V\otimes W,\ \mathbf{k}\right)  $ where $\mathbf{k}$ is the trivial
representation of $S_{n}$ (since we can re-encode each bilinear form
$f:V\times W\rightarrow\mathbf{k}$ as a linear form $f^{\otimes}:V\otimes
W\rightarrow\mathbf{k}$, and then the $S_{n}$-invariance of the former form
translates into the $S_{n}$-equivariance of the latter). But a form of the
tensor-hom adjunction shows that the vector space $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  V\otimes W,\ \mathbf{k}\right)  $ is isomorphic
to $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  W,\ V^{\vee}\right)  $
(indeed, the isomorphism sends each $g\in\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  V\otimes W,\ \mathbf{k}\right)  $ to the map
that sends each $w\in W$ to the partial evaluation $g\left(  ?\otimes
w\right)  \in V^{\vee}$ of $g$). Combining these facts, we conclude Lemma
\ref{lem.repSn.Hom-form}.
\end{noncompile}

\begin{lemma}
\label{lem.repSn.bilform}Let $V$ be any irreducible representation of $S_{n}$
over $\mathbf{k}$. Then:

\begin{enumerate}
\item[\textbf{(a)}] There is a nondegenerate symmetric $S_{n}$-invariant
bilinear form $g:V\times V\rightarrow\mathbf{k}$.

\item[\textbf{(b)}] Any $S_{n}$-invariant bilinear form $f:V\times
V\rightarrow\mathbf{k}$ is symmetric.

\item[\textbf{(c)}] Any $S_{n}$-invariant bilinear map $f:V\times V\rightarrow
U$ into any vector space $U$ is symmetric.
\end{enumerate}
\end{lemma}

\begin{proof}
\textbf{(a)} We know that $V$ is isomorphic to a Specht module $\mathcal{S}%
^{\lambda}$ for a partition $\lambda$ of $n$. Consider this $\lambda$. Thus,
we must find a nondegenerate symmetric $S_{n}$-invariant bilinear form
$\mathcal{S}^{\lambda}\times\mathcal{S}^{\lambda}\rightarrow\mathbf{k}$. But
the existence of such a form is a well-known fact (implicit in \cite[proof of
Corollary 3.5]{Wildon18}, where it is shown that the symmetric $S_{n}%
$-invariant form $\left\langle \cdot,\cdot\right\rangle $ on the Young module
$\mathcal{M}^{\lambda}$ satisfies $\mathcal{S}^{\lambda}\cap\left(
\mathcal{S}^{\lambda}\right)  ^{\perp}=0$, whence the restriction of this form
to the Specht module $\mathcal{S}^{\lambda}$ is nondegenerate\footnote{On the
nose, the argument in \cite[proof of Corollary 3.5]{Wildon18} requires
$\mathbf{k}$ to have characteristic $0$; but the claim $\mathcal{S}^{\lambda
}\cap\left(  \mathcal{S}^{\lambda}\right)  ^{\perp}=0$ holds more generally
when $n!$ is invertible in $\mathbf{k}$. One way to see this is as follows:
Assume that $n!$ is invertible in $\mathbf{k}$. Then, the partition $\lambda$
is $\operatorname*{char}\mathbf{k}$-regular. Hence, \cite[Theorem
5.7]{Wildon18} shows that $\mathcal{S}^{\lambda}\not \subseteq \left(
\mathcal{S}^{\lambda}\right)  ^{\perp}$. Hence, $\mathcal{S}^{\lambda}%
\cap\left(  \mathcal{S}^{\lambda}\right)  ^{\perp}\neq\mathcal{S}^{\lambda}$.
Since $\mathcal{S}^{\lambda}$ is irreducible, this yields that $\mathcal{S}%
^{\lambda}\cap\left(  \mathcal{S}^{\lambda}\right)  ^{\perp}=0$, because
$\mathcal{S}^{\lambda}\cap\left(  \mathcal{S}^{\lambda}\right)  ^{\perp}$ is
an $S_{n}$-subrepresentation of $\mathcal{S}^{\lambda}$.}). Thus, part
\textbf{(a)} is proved. \medskip

\textbf{(b)} The representation $V$ is irreducible. Thus, $\operatorname*{End}%
\nolimits_{\mathcal{A}}V\cong\mathbf{k}$ (by (\ref{eq.EndI=k})).

Part \textbf{(a)} shows that there is a nondegenerate symmetric $S_{n}%
$-invariant bilinear form $g:V\times V\rightarrow\mathbf{k}$. Consider this
$g$. Since $g$ is $S_{n}$-invariant and nondegenerate, we conclude that the
dual space $V^{\vee}$ (with the contragredient representation of $S_{n}$)
satisfies $V^{\vee}\cong V$ as $S_{n}$-representations (by the standard
isomorphism induced by $g$). In other words, $V^{\vee}\cong V$ as
$\mathcal{A}$-modules. Thus, $\operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  V,\ V^{\vee}\right)  \cong\operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  V,\ V\right)  =\operatorname*{End}\nolimits_{\mathcal{A}}%
V\cong\mathbf{k}$.

Lemma \ref{lem.repSn.Hom-form} shows that the $S_{n}$-invariant bilinear forms
$f:V\times V\rightarrow\mathbf{k}$ form a $\mathbf{k}$-vector space, which is
isomorphic to $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  V,\ V^{\vee
}\right)  $. Since $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
V,\ V^{\vee}\right)  \cong\mathbf{k}$, this vector space is isomorphic to
$\mathbf{k}$, thus $1$-dimensional. Since there is at least one nondegenerate
symmetric form in this space (by part \textbf{(a)}), we thus conclude that any
form in this space is a scalar multiple of this one form, and thus is
symmetric. This proves part \textbf{(b)}. \medskip

\textbf{(c)} This follows from part \textbf{(b)} by decomposing $f$ into its
coordinates. (In more details: Let $U$ be any vector space. Let $f:V\times
V\rightarrow U$ be an $S_{n}$-invariant bilinear map. Let $\eta:U\rightarrow
\mathbf{k}$ be any $\mathbf{k}$-linear map. Then, the map $\eta\circ f:V\times
V\rightarrow\mathbf{k}$ is an $S_{n}$-invariant bilinear form, and thus is
symmetric (by part \textbf{(b)}). In other words, $\eta\left(  f\left(
v,w\right)  \right)  =\eta\left(  f\left(  w,v\right)  \right)  $ for all
$v,w\in V$. Since this holds for all $\mathbf{k}$-linear maps $\eta
:U\rightarrow\mathbf{k}$, we thus conclude that $f\left(  v,w\right)
=f\left(  w,v\right)  $ for all $v,w\in V$ (since two vectors in $U$ that
become equal upon application of every $\mathbf{k}$-linear map to $\mathbf{k}$
must have been equal in the first place). In other words, $f$ is symmetric.)
\end{proof}

\begin{lemma}
\label{lem.repSn.bilform0}Let $V$ and $W$ be two non-isomorphic irreducible
representations of $S_{n}$ over $\mathbf{k}$. Then:

\begin{enumerate}
\item[\textbf{(a)}] Any $S_{n}$-invariant bilinear form $f:V\times
W\rightarrow\mathbf{k}$ is $0$.

\item[\textbf{(b)}] Any $S_{n}$-invariant bilinear map $f:V\times W\rightarrow
U$ into any vector space $U$ is $0$.
\end{enumerate}
\end{lemma}

\begin{proof}
\textbf{(a)} As in the proof of Lemma \ref{lem.repSn.bilform} \textbf{(b)}, we
can see that $V^{\vee}\cong V$. Moreover, the $S_{n}$-invariant bilinear forms
$f:V\times W\rightarrow\mathbf{k}$ form a $\mathbf{k}$-vector space that is
isomorphic to $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  W,\ V^{\vee
}\right)  $ (by Lemma \ref{lem.repSn.Hom-form}) and therefore to
$\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  W,V\right)  $ (since
$V^{\vee}\cong V$). But $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
W,V\right)  =0$, since $V$ and $W$ are irreducible and non-isomorphic. Thus,
Lemma \ref{lem.repSn.bilform0} follows. \medskip

\textbf{(b)} Follows from \textbf{(a)} just like Lemma \ref{lem.repSn.bilform}
\textbf{(c)} follows from Lemma \ref{lem.repSn.bilform} \textbf{(b)}.
\end{proof}

\begin{lemma}
\label{lem.J*J-invar-1}Let $V$ and $W$ be two left ideals of $\mathcal{A}$
that are non-isomorphic and irreducible as representations of $S_{n}$. Then,
$V^{\ast}W=0$.
\end{lemma}

\begin{proof}
The map%
\begin{align*}
V\times W  &  \rightarrow\mathcal{A},\\
\left(  v,w\right)   &  \mapsto v^{\ast}w
\end{align*}
is bilinear and $S_{n}$-invariant (since $\left(  gv\right)  ^{\ast}\left(
gw\right)  =v^{\ast}\underbrace{g^{\ast}}_{=g^{-1}}gw=v^{\ast}%
\underbrace{g^{-1}g}_{=1}w=v^{\ast}w$ for all $g\in S_{n}$ and all $v\in V$
and $w\in W$). Thus, it is $0$ by Lemma \ref{lem.repSn.bilform0} \textbf{(b)}.
In other words, $v^{\ast}w=0$ for all $v\in V$ and $w\in W$. In other words,
$V^{\ast}W=0$.
\end{proof}

\begin{lemma}
\label{lem.J*J-invar-2}Let $V$ be a left ideal of $\mathcal{A}$ that is
irreducible as a representation of $S_{n}$. Then, $v^{\ast}w=w^{\ast}v$ for
any $v,w\in V$.
\end{lemma}

\begin{proof}
The map%
\begin{align*}
V\times V  &  \rightarrow\mathcal{A},\\
\left(  v,w\right)   &  \mapsto v^{\ast}w
\end{align*}
is bilinear and $S_{n}$-invariant (as we saw in the proof of Lemma
\ref{lem.J*J-invar-1}). Hence, it is symmetric by Lemma
\ref{lem.repSn.bilform} \textbf{(c)}. In other words, $v^{\ast}w=w^{\ast}v$
for any $v,w\in V$.
\end{proof}

\begin{lemma}
\label{lem.J*J-invar-3}Let $V$ be a left ideal of $\mathcal{A}$ that is
irreducible as a representation of $S_{n}$. Then, $\dim\left(  V^{\ast
}V\right)  =1$.
\end{lemma}

\begin{proof}
Lemma \ref{lem.V*W-dim} (applied to $W=V$) yields $\dim\left(  V^{\ast
}V\right)  \leq1$.

On the other hand, $V\neq0$ (since $V$ is irreducible), so that $V^{\ast}%
\neq0$.

Now, let us decompose the semisimple algebra $\mathcal{A}$ itself into a
direct sum of irreducible $S_{n}$-subrepresentations: $\mathcal{A}=I_{1}\oplus
I_{2}\oplus\cdots\oplus I_{\ell}$. Of course, these subrepresentations
$I_{1},I_{2},\ldots,I_{\ell}$ are left ideals of $\mathcal{A}$.

On the other hand, $V=\mathcal{A}V$ (since $V$ is a left ideal of
$\mathcal{A}$). Applying the antipode (i.e., the algebra anti-morphism
$\mathcal{A}\rightarrow\mathcal{A}^{\ast},\ a\mapsto a^{\ast}$) to this
equality, we obtain%
\begin{align*}
V^{\ast}  &  =V^{\ast}\mathcal{A}=V^{\ast}\left(  I_{1}\oplus I_{2}%
\oplus\cdots\oplus I_{\ell}\right)  \ \ \ \ \ \ \ \ \ \ \left(  \text{since
}\mathcal{A}=I_{1}\oplus I_{2}\oplus\cdots\oplus I_{\ell}\right) \\
&  =V^{\ast}I_{1}+V^{\ast}I_{2}+\cdots+V^{\ast}I_{\ell}.
\end{align*}
Hence, at least one of the products $V^{\ast}I_{1},\ V^{\ast}I_{2}%
,\ \ldots,\ V^{\ast}I_{\ell}$ must be nonzero (since $V^{\ast}\neq0$). In
other words, there exists some $j\in\left\{  1,2,\ldots,\ell\right\}  $ such
that $V^{\ast}I_{j}\neq0$. Consider this $j$. From $V^{\ast}I_{j}\neq0$, we
obtain $V\cong I_{j}$ (otherwise, Lemma \ref{lem.J*J-invar-1} would yield
$V^{\ast}I_{j}=0$). Hence, $V^{\ast}V\cong V^{\ast}I_{j}\neq0$. Thus,
$\dim\left(  V^{\ast}V\right)  >0$. Combining this with $\dim\left(  V^{\ast
}V\right)  \leq1$, we obtain $\dim\left(  V^{\ast}V\right)  =1$. This proves
Lemma \ref{lem.J*J-invar-3}.
\end{proof}

\begin{theorem}
\label{thm.v*w}We have $v^{\ast}w=w^{\ast}v$ for all $v,w\in J$.
\end{theorem}

\begin{proof}
Decompose the $S_{n}$-representation $J$ as a direct sum of irreducible
subrepresentations:%
\begin{equation}
J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}. \label{pf.thm.J*J-invar.decomp}%
\end{equation}


We must show that $v^{\ast}w=w^{\ast}v$ for all $v,w\in J$. This equality is
linear in each of $v$ and $w$. Thus, it suffices to show that $v^{\ast
}w=w^{\ast}v$ for all $i,j\in\left\{  1,2,\ldots,k\right\}  $ and all $v\in
J_{i}$ and all $w\in J_{j}$ (by (\ref{pf.thm.J*J-invar.decomp})). So let us
fix $i,j\in\left\{  1,2,\ldots,k\right\}  $. We must prove that $v^{\ast
}w=w^{\ast}v$.

If $i=j$, then this follows from Lemma \ref{lem.J*J-invar-2} (applied to
$V=J_{i}=J_{j}$). Thus, we WLOG assume that $i\neq j$. Then, $J_{i}$ and
$J_{j}$ are non-isomorphic as representations of $S_{n}$ (since $J$ is
multiplicity-free). Hence, Lemma \ref{lem.J*J-invar-1} yields $J_{i}^{\ast
}J_{j}=0$. Thus, $v^{\ast}w=0$ (since $v\in J_{i}$ and $w\in J_{j}$).
Similarly, $w^{\ast}v=0$. Hence, $v^{\ast}w=0=w^{\ast}v$, qed.
\end{proof}

\begin{theorem}
\label{thm.J*J-invar}Each element of $J^{\ast}J$ is invariant under the
antipode. In other words, for each $x\in J^{\ast}J$, we have $x^{\ast}=x$.
\end{theorem}

\begin{proof}
Each $x\in J^{\ast}J$ is a $\mathbf{k}$-linear combination of elements of the
form $w^{\ast}v$ with $w,v\in J$. Thus, it suffices to show that $\left(
w^{\ast}v\right)  ^{\ast}=w^{\ast}v$ for all $v,w\in J$. But this is now easy:
For any $v,w\in J$, we have $\left(  w^{\ast}v\right)  ^{\ast}=v^{\ast
}\underbrace{\left(  w^{\ast}\right)  ^{\ast}}_{=w}=v^{\ast}w=w^{\ast}v$ (by
Theorem \ref{thm.v*w}). This proves Theorem \ref{thm.J*J-invar}.
\end{proof}

\subsection{Splitness}

\begin{theorem}
\label{thm.minpol-factors}Let $a\in J$. Then, there exists a multiset $Z_{a}$
of elements of $\mathbf{k}$ such that $\prod_{\lambda\in Z_{a}}\left(
a-\lambda\right)  =0$. (In other words, the minimal polynomial of $a$ over
$\mathbf{k}$ factors into linear factors.)
\end{theorem}

\begin{proof}
We say that a $\mathbf{k}$-linear endomorphism $f$ of a finite-dimensional
vector space $V$ is \emph{split} if there exists a multiset $Z_{f}$ of
elements of $\mathbf{k}$ such that $\prod_{\lambda\in Z_{f}}\left(
f-\lambda\operatorname*{id}\right)  =0$ (that is, if the minimal polynomial of
$f$ factors into linear factors -- or, equivalently, if all eigenvalues of $f$
belong to $\mathbf{k}$). Clearly, if $f_{1},f_{2},\ldots,f_{k}$ are split
endomorphisms of vector spaces $V_{1},V_{2},\ldots,V_{k}$, respectively, then
their direct sum $f_{1}\oplus f_{2}\oplus\cdots\oplus f_{k}\in
\operatorname*{End}\left(  V_{1}\oplus V_{2}\oplus\cdots\oplus V_{k}\right)  $
is split again. Furthermore, any vector space endomorphism of rank $\leq1$ is
split (indeed, if $f$ is vector space endomorphism of rank $\leq1$, then
$f\circ\left(  f-\left(  \operatorname*{Tr}f\right)  \operatorname*{id}%
\right)  =0$).

We say that an element $b\in\mathcal{A}$ acts \emph{splitly} on a left
$\mathcal{A}$-module $U$ if and only if there exists a multiset $Z_{b,U}$ of
elements of $\mathbf{k}$ such that $\left(  \prod_{\lambda\in Z_{b,U}}\left(
b-\lambda\right)  \right)  \cdot U=0$. Thus, we must prove that $a$ acts
splitly on the left regular $\mathcal{A}$-module $\mathcal{A}$ (since this
will yield $\left(  \prod_{\lambda\in Z_{b,\mathcal{A}}}\left(  a-\lambda
\right)  \right)  \cdot\mathcal{A}=0$ and thus $\prod_{\lambda\in
Z_{b,\mathcal{A}}}\left(  a-\lambda\right)  =0$). Note that an element
$b\in\mathcal{A}$ acts splitly on a left $\mathcal{A}$-module $U$ if and only
if the action of $b$ on $U$ is a split endomorphism of $U$.

Decompose the left regular $\mathcal{A}$-module $\mathcal{A}$ as a direct sum
of irreducible subrepresentations:%
\[
\mathcal{A}=L_{1}\oplus L_{2}\oplus\cdots\oplus L_{k}.
\]


It suffices to show that $a$ acts splitly on each of these subrepresentations
$L_{1},L_{2},\ldots,L_{k}$, since the action of $a$ on the full $\mathcal{A}$
is just the direct sum of these actions.

So let us fix $i\in\left\{  1,2,\ldots,k\right\}  $. We must show that $a$
acts splitly on $L_{i}$. It suffices to prove that the action of $a$ on
$L_{i}$ has rank $\leq1$ (as a $\mathbf{k}$-linear map), since any vector
space endomorphism of rank $\leq1$ is split .

So we need to show that the image of $a$ on $L_{i}$ has dimension $\leq1$. In
other words, we need to show that $\dim\left(  aL_{i}\right)  \leq1$.

But this follows immediately from Lemma \ref{lem.dimaK1} (applied to $K=L_{i}%
$), since the representation $L_{i}$ is irreducible.
\end{proof}

\subsection{The left ideal $J$ as a nonunital algebra}

Each left ideal of an algebra $A$ is a nonunital subalgebra. Sometimes, this
nonunital subalgebra does in fact has a unity (although this unity is usually
not the unity of $A$). Usually, it does not. The situation for left ideals of
group algebras is as follows:

\begin{proposition}
\label{prop.unity.I}Let $G$ be a finite group. Let $A$ be its group algebra
$\mathbf{k}\left[  G\right]  $. Let $I$ be any left ideal of $A$. Then:

\begin{enumerate}
\item[\textbf{(a)}] The nonunital $\mathbf{k}$-algebra $I$ has a right unity.

\item[\textbf{(b)}] The nonunital $\mathbf{k}$-algebra $I$ has an actual
(two-sided) unity if and only if $I$ is a (two-sided) ideal of $A$.
\end{enumerate}
\end{proposition}

\begin{proof}
Theorem \ref{thm.mf-li-gen} says that there exists an idempotent $e\in I$ such
that $I=Ae$ (since $A=\mathbf{k}\left[  G\right]  $). Consider this $e$.
Clearly, $ee=e$ (since $e$ is idempotent). Each $x\in I$ satisfies $x\in I=Ae$
and thus $x=ae$ for some $a\in A$, so that $xe=a\underbrace{ee}_{=e}=ae=x$.
This shows that $e$ is a right unity of $I$. Thus, part \textbf{(a)} is
proved. \medskip

\textbf{(b)} $\Longleftarrow:$ Assume that $I$ is a two-sided ideal of $A$.
Thus, $I$ is both a left ideal and a right ideal of $A$. Hence, by part
\textbf{(a)}, we know that $I$ has a right unity (since $I$ is a left ideal).
An analogous argument, with the order of factors reversed, shows that $I$ has
a left unity (since $I$ is a right ideal). These two unities must be equal
(indeed, if we call them $r$ and $\ell$, then $r=\ell r=\ell$), and thus must
be a unity. So $I$ has a unity. \medskip

$\Longrightarrow:$ Assume that $I$ has a unity. Let $u$ be this unity. Then,
$ue=u$ (since $e$ is a right unity of $I$), so that $u=ue=e$ (since $u$ is a
unity of $I$). Hence, $e$ is a unity of $I$ (since $u$ is a unity of $I$).
Therefore, $eI=I$. In view of $I=Ae$, we can rewrite this as $eAe=Ae$.
Therefore, $Ae=e\underbrace{Ae}_{\subseteq A}\subseteq eA$.

Now, let $c\in eA$. We shall show that $c=ce$. To wit, let $x:=c\left(
1-e\right)  $. Let $y\in A$ be arbitrary. Then, $ye\in Ae=I$, so that $e\cdot
ye=ye$ (since $e$ is the unity of the algebra $I$). Thus, $\left(  1-e\right)
ye=ye-\underbrace{e\cdot ye}_{=ye}=0$. Furthermore, we can write $c$ as $c=ez$
for some $z\in A$ (since $c\in eA$). Consider this $z$. Now,%
\[
\left(  xy\right)  ^{2}=\underbrace{x}_{=c\left(  1-e\right)  }y\underbrace{x}%
_{=c\left(  1-e\right)  }y=c\left(  1-e\right)  y\underbrace{c}_{=ez}\left(
1-e\right)  y=c\underbrace{\left(  1-e\right)  ye}_{=0}z\left(  1-e\right)
y=0.
\]
Hence, the element $xy$ of $A$ is nilpotent, and thus the element $1-xy$ is
invertible (since $1$ minus a nilpotent element is always invertible).

Forget that we fixed $y$. We thus have shown that $1-xy\in A$ is invertible
for each $y\in A$. In other words, $x$ belongs to the Jacobson radical of the
ring $A$. But $A$ is semisimple (by Maschke's theorem, since $A$ is the group
algebra of the finite group $G$ over the field $\mathbf{k}$ of characteristic
$0$). Hence, the Jacobson radical of $A$ is $0$. Therefore, $x=0$ (since $x$
belongs to this Jacobson radical). Hence, $0=x=c\left(  1-e\right)  =c-ce$, so
that $c=ce\in Ae$.

Forget that we fixed $c$. We thus have shown that $c\in Ae$ for each $c\in
eA$. In other words, $eA\subseteq Ae$. Combining this with $Ae\subseteq eA$,
we obtain $Ae=eA$.

\begin{noncompile}
OLD: But \cite[Lemma 5.5]{BCGS25} shows that $\dim\left(  Ae\right)
=\dim\left(  eA\right)  $ (since $A$ is a semisimple $\mathbf{k}$-algebra that
is finite-dimensional as a $\mathbf{k}$-vector space)\footnote{Here is a
sketch of an alternative proof:
\par
Consider the two $\mathbf{k}$-linear maps $L_{e}:A\rightarrow A,\ x\mapsto ex$
and $R_{e}:A\rightarrow A,\ x\mapsto xe$. Both of these maps $L_{e}$ and
$R_{e}$ are idempotent (since $e$ is idempotent). But it is well-known that
any idempotent endomorphism $\phi$ of a finite-dimensional $\mathbf{k}$-vector
space $V$ satisfies $\operatorname*{Tr}\phi=\operatorname*{rank}\phi
\cdot1_{\mathbf{k}}$. Since $L_{e}$ is an idempotent endomorphism $\phi$ of
the $\mathbf{k}$-vector space $A$, we thus have $\operatorname*{Tr}\left(
L_{e}\right)  =\operatorname*{rank}\left(  L_{e}\right)  \cdot1_{\mathbf{k}%
}=\operatorname*{rank}\left(  L_{e}\right)  $ (here, we embed $\mathbb{Q}$ in
$\mathbf{k}$, since $\mathbf{k}$ is a field of characteristic $0$). But
$\operatorname*{rank}\left(  L_{e}\right)  =\dim\left(  L_{e}\left(  A\right)
\right)  =\dim\left(  eA\right)  $ (by the definition of $L_{e}$). Hence,
$\operatorname*{Tr}\left(  L_{e}\right)  =\operatorname*{rank}\left(
L_{e}\right)  =\dim\left(  eA\right)  $. Similarly, $\operatorname*{Tr}\left(
R_{e}\right)  =\dim\left(  Ae\right)  $.
\par
But $A$ is the group algebra $\mathbf{k}\left[  G\right]  $. Thus,
\cite[Proposition 5.11.7]{sga} shows that $\operatorname*{Tr}\left(
R_{e}\right)  =\left\vert G\right\vert \cdot\left[  1\right]  e$, where
$\left[  1\right]  e$ denotes the coefficient of the permutation
$1=\operatorname*{id}\in G$ in $e$. Similarly, $\operatorname*{Tr}\left(
L_{e}\right)  =\left\vert G\right\vert \cdot\left[  1\right]  e$ as well.
Hence, $\dim\left(  eA\right)  =\operatorname*{Tr}\left(  L_{e}\right)
=\left\vert G\right\vert \cdot\left[  1\right]  e$ and likewise $\dim\left(
Ae\right)  =\left\vert G\right\vert \cdot\left[  1\right]  e$, so that
$\dim\left(  eA\right)  =\left\vert G\right\vert \cdot\left[  1\right]
e=\dim\left(  Ae\right)  $, qed.}. Therefore, from $Ae\subseteq eA$, we obtain
$Ae=eA$ (since a subspace $U$ of a finite-dimensional vector space $W$ that
satisfies $\dim U=\dim W$ must be $W$ itself).
\end{noncompile}

Thus, $I=Ae=eA$. This shows that $I$ is a right ideal of $A$. Since $I$ is
also a left ideal, we conclude that $I$ is a two-sided ideal of $A$.
\end{proof}

Proposition \ref{prop.unity.I} \textbf{(b)} (applied to $G=S_{n}$) shows that
a left ideal $I$ of $\mathcal{A}=\mathbf{k}\left[  S_{n}\right]  $ rarely has
a unity. Indeed, the group algebra $\mathcal{A}=\mathbf{k}\left[
S_{n}\right]  $ has only finitely many two-sided ideals (in fact, the
Artin--Wedderburn theorem shows that $\mathcal{A}\cong\prod\limits_{\lambda
\vdash n}\mathbf{k}^{f_{\lambda}\times f_{\lambda}}$, and each matrix ring
$\mathbf{k}^{f_{\lambda}\times f_{\lambda}}$ is a simple $\mathbf{k}$-algebra;
thus, the two-sided ideals of $\mathcal{A}$ correspond to the $2^{\left\vert
\left\{  \lambda\vdash n\right\}  \right\vert }$ many subproducts of the
direct product $\prod\limits_{\lambda\vdash n}\mathbf{k}^{f_{\lambda}\times
f_{\lambda}}$), but has infinitely many left ideals when $n\geq3$.

Proposition \ref{prop.unity.I} yields a direct (and rather disappointing)
answer to the question when the nonunital $\mathbf{k}$-algebra $J$ has a
unity. The multiplicity-freeness is mostly a hindrance here. However, the
multiplicity-freeness of $J$ gives the following structural property of $J$:

\begin{proposition}
\label{prop.J.struct}Let $J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}$ be a
decomposition of $J$ into irreducible $S_{n}$-subrepresentations. Then,
$J\cong J_{1}\times J_{2}\times\cdots\times J_{k}$ as nonunital $\mathbf{k}$-algebras.
\end{proposition}

\begin{proof}
The subrepresentations $J_{1},J_{2},\ldots,J_{k}$ are pairwise non-isomorphic
(since $J$ is multiplicity-free). Hence, Lemma \ref{lem.JJ-invar} shows that
$J_{i}J_{j}=0$ for all $i\neq j$. Therefore, the canonical $\mathbf{k}$-vector
space isomorphism $J_{1}\times J_{2}\times\cdots\times J_{k}\rightarrow J$ is
actually a nonunital $\mathbf{k}$-algebra isomorphism.
\end{proof}

\subsection{The nonunital subalgebra $J^{\ast}J$}

The product $J^{\ast}J$ is a nonunital subalgebra of $\mathcal{A}$ as well,
since $J^{\ast}\underbrace{JJ^{\ast}J}_{\subseteq J}\subseteq J^{\ast}J$. When
does it have a unity? This has a rather tricky answer, depending somewhat on
$\mathbf{k}$:

\begin{proposition}
\label{prop.J*J.struct}Let $J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}$ be a
decomposition of $J$ into irreducible $S_{n}$-subrepresentations. Then:

\begin{enumerate}
\item[\textbf{(a)}] We have $J^{\ast}J\cong\prod_{i=1}^{k}\left(  J_{i}^{\ast
}J_{i}\right)  $ as nonunital $\mathbf{k}$-algebras.

\item[\textbf{(b)}] Let $\mathbf{k}_{0}$ be the $\mathbf{k}$-vector space
$\mathbf{k}$, turned into a nonunital $\mathbf{k}$-algebra by defining the
product of any two elements to be $0$. Then, $J^{\ast}J\cong\mathbf{k}%
^{r}\times\mathbf{k}_{0}^{k-r}$ as nonunital $\mathbf{k}$-algebras for some
$r\in\left\{  0,1,\ldots,k\right\}  $.

\item[\textbf{(c)}] Assume that $\mathbf{k}$ is an ordered field (for
instance, $\mathbb{Q}$ or $\mathbb{R}$). Then, $J^{\ast}J\cong\mathbf{k}^{k}$
as nonunital $\mathbf{k}$-algebras. In particular, $J^{\ast}J$ has a unity.
\end{enumerate}
\end{proposition}

\begin{proof}
\textbf{(a)} The subrepresentations $J_{1},J_{2},\ldots,J_{k}$ are pairwise
non-isomorphic (since $J$ is multiplicity-free). Hence, Lemma
\ref{lem.J*J-invar-1} shows that $J_{i}^{\ast}J_{j}=0$ for all $i\neq j$. Now,
from $J=J_{1}\oplus J_{2}\oplus\cdots\oplus J_{k}=\sum_{i=1}^{k}J_{i}$, we
obtain%
\[
J^{\ast}J=\left(  \sum_{i=1}^{k}J_{i}\right)  ^{\ast}\sum_{i=1}^{k}J_{i}%
=\sum_{i=1}^{k}J_{i}^{\ast}\sum_{j=1}^{k}J_{j}=\sum_{i=1}^{k}\ \ \sum
_{j=1}^{k}\underbrace{J_{i}^{\ast}J_{j}}_{=0\text{ for all }i\neq j}%
=\sum_{i=1}^{k}J_{i}^{\ast}J_{i}.
\]
Moreover, the sum on the right hand side here is a direct sum (because the
addends $J_{i}^{\ast}J_{i}$ are subspaces of the respective $J_{i}$ and thus
are linearly disjoint). Thus,
\[
J^{\ast}J=\bigoplus_{i=1}^{k}\left(  J_{i}^{\ast}J_{i}\right)
\ \ \ \ \ \ \ \ \ \ \left(  \text{an internal direct sum}\right)  .
\]
Moreover, $\left(  J_{i}^{\ast}J_{i}\right)  \left(  J_{j}^{\ast}J_{j}\right)
=0$ for all $i\neq j$ (since $J_{i}^{\ast}J_{i}\subseteq J_{i}^{\ast}$ and
$J_{j}^{\ast}J_{j}\subseteq J_{j}$ but $J_{i}^{\ast}J_{j}=0$). Thus, the
canonical $\mathbf{k}$-vector space isomorphism $\prod_{i=1}^{k}\left(
J_{i}^{\ast}J_{i}\right)  \rightarrow J^{\ast}J$ is actually a nonunital
$\mathbf{k}$-algebra isomorphism. This proves part \textbf{(a)}. \medskip

\textbf{(b)} This will follow from part \textbf{(a)}, once we show that each
$i\in\left\{  1,2,\ldots,k\right\}  $ satisfies $J_{i}^{\ast}J_{i}%
\cong\mathbf{k}$ or $J_{i}^{\ast}J_{i}\cong\mathbf{k}_{0}$. So let us show this.

Let $i\in\left\{  1,2,\ldots,k\right\}  $. Then, we must prove that
$J_{i}^{\ast}J_{i}\cong\mathbf{k}$ or $J_{i}^{\ast}J_{i}\cong\mathbf{k}_{0}$.
It suffices to prove that $\dim\left(  J_{i}^{\ast}J_{i}\right)  =1$, since
every nonunital $\mathbf{k}$-algebra of dimension $1$ is isomorphic to either
$\mathbf{k}$ or $\mathbf{k}_{0}$ (depending on whether its product takes any
nonzero value or does not). But this equality follows from Lemma
\ref{lem.J*J-invar-3} (applied to $V=J_{i}$). As explained above, this
completes the proof of part \textbf{(b)}. \medskip

\textbf{(c)} This will follow from part \textbf{(a)}, once we show that each
$i\in\left\{  1,2,\ldots,k\right\}  $ satisfies $J_{i}^{\ast}J_{i}%
\cong\mathbf{k}$. So let us show this.

Let $i\in\left\{  1,2,\ldots,k\right\}  $. Then, we must prove that
$J_{i}^{\ast}J_{i}\cong\mathbf{k}$. In the proof of part \textbf{(b)}, we have
already seen that $J_{i}^{\ast}J_{i}\cong\mathbf{k}$ or $J_{i}^{\ast}%
J_{i}\cong\mathbf{k}_{0}$, so we only need to rule out the case $J_{i}^{\ast
}J_{i}\cong\mathbf{k}_{0}$.

But $\mathbf{k}$ is an ordered field. Thus, we have the following simple fact:

\begin{statement}
\textit{Star positivity trick:} For any nonzero $x\in\mathcal{A}$, we have
$x^{\ast}x\neq0$.
\end{statement}

\begin{proof}
[Proof of the star positivity trick:]Let $x\in\mathcal{A}$ be nonzero. Write
$x$ as $x=\sum_{g\in S_{n}}\xi_{g}g$, where $\xi_{g}\in\mathbf{k}$ are
scalars. Then, the coefficient of the identity permutation $\operatorname*{id}%
\in S_{n}$ in $x^{\ast}x$ is easily seen to be $\sum_{g\in S_{n}}\xi_{g}^{2}$.
But $\sum_{g\in S_{n}}\xi_{g}^{2}>0$ (since $x\neq0$ shows that at least one
of the $\xi_{g}$ is nonzero, but a sum of nonzero squares in an ordered field
is always positive). Hence, the coefficient of the identity permutation
$\operatorname*{id}\in S_{n}$ in $x^{\ast}x$ is $>0$. Thus, $x^{\ast}x\neq0$, qed.
\end{proof}

Now, Lemma \ref{lem.J*J-invar-3} (applied to $V=J_{i}$) yields $\dim\left(
J_{i}^{\ast}J_{i}\right)  =1$. Hence, $J_{i}^{\ast}J_{i}\neq0$, so that there
exists some nonzero $x\in J_{i}^{\ast}J_{i}$. Consider this $x$. By the star
positivity trick, we thus conclude that $x^{\ast}x\neq0$. Since $x\in
J_{i}^{\ast}J_{i}$, this shows that the product of the nonunital algebra
$J_{i}^{\ast}J_{i}$ is not identically $0$. Hence, the case $J_{i}^{\ast}%
J_{i}\cong\mathbf{k}_{0}$ is impossible. This completes the proof of part
\textbf{(c)}.
\end{proof}

The claim of Proposition \ref{prop.J*J.struct} \textbf{(c)} holds more
generally for any field $\mathbf{k}$, as long as the left ideal $J$ is defined
over an ordered subfield of $\mathbf{k}$. In particular, it holds when $J$ is
defined over $\mathbb{Q}$, which is the case for most combinatorially
meaningful $J$'s.

\subsection{Appendix: Sums of non-isomorphic irreducibles are direct}

Let us finally state a basic property of representations which is surely
well-known, but which we could not locate in the literature. We shall use it
later on. It says that any sum of pairwise non-isomorphic irreducible
representations (inside a larger representation) of $\mathcal{A}$ must be a
direct sum. More generally, this holds for any finite-dimensional $\mathbf{k}%
$-algebra instead of $\mathcal{A}$:

\begin{proposition}
\label{prop.sum-irrep-direct}Let $R$ be a finite-dimensional $\mathbf{k}%
$-algebra. Let $V$ be a left $R$-module. Let $I_{1},I_{2},\ldots,I_{k}$ be
pairwise non-isomorphic irreducible submodules of $V$. Then, the sum
$I_{1}+I_{2}+\cdots+I_{k}$ is a direct sum.
\end{proposition}

\begin{proof}
[Proof sketch.]We induct on $k$. The \textit{base case} ($k=0$) is obvious, so
we step to the \textit{induction step} (from $k-1$ to $k$). Thus, we assume
(as the induction hypothesis) that the sum $I_{1}+I_{2}+\cdots+I_{k-1}$ is a
direct sum. Hence, there exist left $R$-linear projections $\pi_{j}%
:I_{1}+I_{2}+\cdots+I_{k-1}\rightarrow I_{j}$ for all $j\in\left[  k-1\right]
$.

Now, the intersection $I_{k}\cap\left(  I_{1}+I_{2}+\cdots+I_{k-1}\right)  $
is a left $R$-submodule of $I_{k}$, and thus equals $I_{k}$ or $0$ (since
$I_{k}$ is irreducible, so that the only left $R$-submodules of $I_{k}$ are
$I_{k}$ and $0$). Since $I_{k}\cap\left(  I_{1}+I_{2}+\cdots+I_{k-1}\right)
=I_{k}$ is impossible (because $I_{k}\cap\left(  I_{1}+I_{2}+\cdots
+I_{k-1}\right)  =I_{k}$ would yield $I_{k}\subseteq I_{1}+I_{2}%
+\cdots+I_{k-1}$, and thus at least one of the projections $\pi_{j}$ would
yield a nontrivial left $R$-module morphism%
\[
I_{k}\overset{\text{inclusion}}{\longrightarrow}I_{1}+I_{2}+\cdots
+I_{k-1}\overset{\pi_{j}}{\longrightarrow}I_{j},
\]
and this would entail $I_{k}\cong I_{j}$ by Schur's lemma, contradicting the
non-isomorphy of $I_{1},I_{2},\ldots,I_{k}$), we thus obtain $I_{k}\cap\left(
I_{1}+I_{2}+\cdots+I_{k-1}\right)  =0$. Since the sum $I_{1}+I_{2}%
+\cdots+I_{k-1}$ is a direct sum, this yields that the sum $I_{1}+I_{2}%
+\cdots+I_{k}$ is a direct sum. This completes the induction step, and thus
proves Proposition \ref{prop.sum-irrep-direct}.
\end{proof}

\section{\label{sec.gelfand}The left ideal Gelfand model}

We shall now apply the above general theory to a specific left ideal of
$\mathcal{A}=\mathbf{k}\left[  S_{n}\right]  $.

As before, we fix $n\in\mathbb{N}$.

\subsection{\label{subsec.gelfand.def}Definition}

We let $\left[  m\right]  :=\left\{  1,2,\ldots,m\right\}  $ for each
$m\in\mathbb{Z}$. Thus, $S_{n}$ is the group of all permutations of $\left[
n\right]  =\left\{  1,2,\ldots,n\right\}  $.

For any $i\neq j$ in $\left[  n\right]  $, we let $t_{i,j}\in S_{n}$ denote
the transposition swapping $i$ with $j$.

For any $2k$ distinct elements $i_{1},i_{2},\ldots,i_{k},j_{1},j_{2}%
,\ldots,j_{k}\in\left[  n\right]  $, we define
\begin{equation}
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}:=\sum_{\substack{w\in
S_{n};\\w\left(  i_{s}\right)  <w\left(  j_{s}\right)  \text{ for all }%
s\in\left[  k\right]  }}w^{-1}\in\mathcal{A}. \label{eq.gelfand.def}%
\end{equation}


\begin{example}
Let $\left[  i_{1}i_{2}\cdots i_{n}\right]  $ be shorthand for the permutation
in $S_{n}$ that sends $1,2,\ldots,n$ to $i_{1},i_{2},\ldots,i_{n}$. For $n=3$,
we have%
\[
G_{3;\ 1}=\sum_{\substack{w\in S_{3};\\w\left(  3\right)  <w\left(  1\right)
}}w^{-1}=\left[  231\right]  ^{-1}+\left[  321\right]  ^{-1}+\left[
312\right]  ^{-1}=\left[  312\right]  +\left[  321\right]  +\left[
231\right]  .
\]
For $n=4$, we have
\begin{align*}
G_{1,3;\ 2,4}  &  =\sum_{\substack{w\in S_{4};\\w\left(  1\right)  <w\left(
2\right)  ;\\w\left(  3\right)  <w\left(  4\right)  }}w^{-1}\\
&  =\left[  1234\right]  ^{-1}+\left[  1324\right]  ^{-1}+\left[  1423\right]
^{-1}+\left[  2314\right]  ^{-1}+\left[  2413\right]  ^{-1}+\left[
3412\right]  ^{-1}\\
&  =\left[  1234\right]  +\left[  1324\right]  +\left[  1342\right]  +\left[
3124\right]  +\left[  3142\right]  +\left[  3412\right]  .
\end{align*}

\end{example}

Clearly, for any $2k$ distinct elements $i_{1},i_{2},\ldots,i_{k},j_{1}%
,j_{2},\ldots,j_{k}\in\left[  n\right]  $ and any permutation $\sigma\in
S_{k}$, we have%
\begin{equation}
G_{i_{\sigma\left(  1\right)  },i_{\sigma\left(  2\right)  },\ldots
,i_{\sigma\left(  k\right)  };\ j_{\sigma\left(  1\right)  },j_{\sigma\left(
2\right)  },\ldots,j_{\sigma\left(  k\right)  }}=G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}. \label{eq.G.perm}%
\end{equation}
That is, $G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}$ depends
only on the \textbf{set} of the pairs $\left(  i_{1},j_{1}\right)  ,\ \left(
i_{2},j_{2}\right)  ,\ \ldots,\ \left(  i_{k},j_{k}\right)  $, not on their order.

It is also easy to see that
\[
wG_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}=G_{w\left(
i_{1}\right)  ,w\left(  i_{2}\right)  ,\ldots,w\left(  i_{k}\right)
;\ w\left(  j_{1}\right)  ,w\left(  j_{2}\right)  ,\ldots,w\left(
j_{k}\right)  }%
\]
for any permutation $w\in S_{n}$. Hence,%
\[
\mathcal{G}:=\operatorname*{span}\left(  G_{i_{1},i_{2},\ldots,i_{k}%
;\ j_{1},j_{2},\ldots,j_{k}}\ \mid\ i_{1},i_{2},\ldots,i_{k},j_{1}%
,j_{2},\ldots,j_{k}\in\left[  n\right]  \text{ are distinct}\right)
\]
is a left ideal of $\mathcal{A}$.

\subsection{The main result}

Most of this section will be spent proving the following description of the
left $\mathcal{A}$-module structure of this ideal $\mathcal{G}$ (implicit in
\cite[\S 5.1]{RSW}):

\begin{theorem}
\label{thm.G.gelfand}This left ideal $\mathcal{G}$ is a Gelfand model of
$S_{n}$; that is, $\mathcal{G}$ is isomorphic to the direct sum of all the
Specht modules $\mathcal{S}^{\lambda}$ with $\lambda\vdash n$.
\end{theorem}

As a consequence of Theorem \ref{thm.G.gelfand}, it will follow that the
representation $\mathcal{G}$ of $S_{n}$ is multiplicity-free. Hence, Theorem
\ref{thm.mf3} will yield that $\left[  \mathcal{G}^{\ast}\mathcal{G}%
,\ \mathcal{G}^{\ast}\mathcal{G}\right]  =0$. This generalizes the
commutativity part of \cite[Theorem 1.6]{RSW}. Furthermore, Theorem
\ref{thm.mf2} will yield $\mathcal{G}\left[  \mathcal{G},\mathcal{G}\right]
=0$.

\subsection{\label{subsec.gelfand.filtr}The filtration $\left(  \mathcal{G}%
_{m}\right)  _{m\in\mathbb{Z}}$}

Before we prove Theorem \ref{thm.G.gelfand}, we define a filtration on
$\mathcal{G}$.

Namely, for each $m\in\mathbb{Z}$, we define the $\mathbf{k}$-vector subspace%
\[
\mathcal{G}_{m}:=\operatorname*{span}\left(  G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}\ \mid\ k\leq m\right)
\ \ \ \ \ \ \ \ \ \ \text{of }\mathcal{A}.
\]
This $\mathcal{G}_{m}$ is always a left ideal of $\mathcal{A}$ (for the same
reason as why $\mathcal{G}$ is). Moreover, $\mathcal{G}_{m}=\mathcal{G}$
whenever $2m\geq n$. Thus, we obtain a filtration $\left(  0=\mathcal{G}%
_{-1}\subseteq\mathcal{G}_{0}\subseteq\cdots\subseteq\mathcal{G}%
_{n}=\mathcal{G}\right)  $ of $\mathcal{G}$ by left $\mathcal{A}$-submodules.
We can easily derive an upper bound on the dimensions of its subquotients
(later we shall see that this upper bound is, in fact, the actual dimension):

\begin{lemma}
\label{lem.G.matchbound}Let $m\in\mathbb{Z}$. An $m$\emph{-matching} of
$\left[  n\right]  $ shall mean a set of $m$ disjoint $2$-element subsets of
$\left[  n\right]  $. Then,
\begin{equation}
\dim\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  \leq\left(  \text{\# of
}m\text{-matchings of }\left[  n\right]  \right)  .
\label{eq.thm.G.gelfand.dimbound}%
\end{equation}

\end{lemma}

\begin{proof}
The definitions of $\mathcal{G}_{m}$ and $\mathcal{G}_{m-1}$ show that the
quotient vector space $\mathcal{G}_{m}/\mathcal{G}_{m-1}$ is spanned by the
family%
\begin{equation}
\left(  \overline{G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}%
}\right)  _{i_{1},i_{2},\ldots,i_{m},j_{1},j_{2},\ldots,j_{m}\text{ are
}2m\text{ distinct elements of }\left[  n\right]  }
\label{pf.lem.G.matchbound.family1}%
\end{equation}
(this is an empty family if $2m>n$). Moreover, if we swap an $i_{p}$ with the
respective $j_{p}$ in $G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}%
}$, then we obtain%
\begin{align*}
&  G_{i_{1},i_{2},\ldots,i_{p-1},j_{p},i_{p+1},\ldots i_{m};\ j_{1}%
,j_{2},\ldots,j_{p-1},i_{p},j_{p+1},\ldots,j_{m}}\\
&  =\sum_{\substack{w\in S_{n};\\w\left(  i_{s}\right)  <w\left(
j_{s}\right)  \text{ for all }s\neq p;\\w\left(  j_{p}\right)  <w\left(
i_{p}\right)  }}w^{-1}=\sum_{\substack{w\in S_{n};\\w\left(  i_{s}\right)
<w\left(  j_{s}\right)  \text{ for all }s\neq p;\\\text{but not }w\left(
i_{p}\right)  <w\left(  j_{p}\right)  }}w^{-1}\\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since }i_{p}\neq j_{p}\text{, and thus }w\left(  i_{p}\right)  \neq
w\left(  j_{p}\right)  \text{ for all }w\in S_{n}\text{,}\\
\text{so that the condition \textquotedblleft}w\left(  j_{p}\right)  <w\left(
i_{p}\right)  \text{\textquotedblright}\\
\text{is equivalent to \textquotedblleft not }w\left(  i_{p}\right)  <w\left(
j_{p}\right)  \text{\textquotedblright}%
\end{array}
\right) \\
&  =\underbrace{\sum_{\substack{w\in S_{n};\\w\left(  i_{s}\right)  <w\left(
j_{s}\right)  \text{ for all }s\neq p}}w^{-1}}_{=G_{i_{1},i_{2},\ldots
,i_{p-1},i_{p+1},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{p-1},j_{p+1}%
,\ldots,j_{m}}}-\underbrace{\sum_{\substack{w\in S_{n};\\w\left(
i_{s}\right)  <w\left(  j_{s}\right)  \text{ for all }s}}w^{-1}}%
_{=G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}}\\
&  =\underbrace{G_{i_{1},i_{2},\ldots,i_{p-1},i_{p+1},\ldots,i_{m}%
;\ j_{1},j_{2},\ldots,j_{p-1},j_{p+1},\ldots,j_{m}}}_{\in\mathcal{G}_{m-1}%
}-\,G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}\\
&  \equiv-G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}%
}\operatorname{mod}\mathcal{G}_{m-1},
\end{align*}
so that%
\begin{align}
&  \overline{G_{i_{1},i_{2},\ldots,i_{p-1},j_{p},i_{p+1},\ldots i_{m}%
;\ j_{1},j_{2},\ldots,j_{p-1},i_{p},j_{p+1},\ldots,j_{m}}}\nonumber\\
&  =-\overline{G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}%
}\ \ \ \ \ \ \ \ \ \ \text{in }\mathcal{G}_{m}/\mathcal{G}_{m-1}.
\label{pf.lem.G.matchbound.redund}%
\end{align}
Thus, in the quotient $\mathbf{k}$-vector space $\mathcal{G}_{m}%
/\mathcal{G}_{m-1}$, the residue class $\overline{G_{i_{1},i_{2},\ldots
,i_{m};\ j_{1},j_{2},\ldots,j_{m}}}$ merely flips its sign when we swap some
$i_{p}$ with the corresponding $j_{p}$. Hence, up to sign, this class depends
only on the $m$ disjoint \textbf{sets} $\left\{  i_{1},j_{1}\right\}
,\ \left\{  i_{2},j_{2}\right\}  ,\ \ldots,\ \left\{  i_{m},j_{m}\right\}  $,
not on the ordered pairs $\left(  i_{1},j_{1}\right)  ,\ \left(  i_{2}%
,j_{2}\right)  ,\ \ldots,\ \left(  i_{m},j_{m}\right)  $ (and, as we know from
(\ref{eq.G.perm}), it also does not depend on the \textbf{order} in which
these $m$ sets are listed). This shows that the family
(\ref{pf.lem.G.matchbound.family1}) is highly redundant, and can be reduced to
a smaller family indexed by the $m$-matchings of $\left[  n\right]  $ (just
take one choice of $\overline{G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2}%
,\ldots,j_{m}}}$ for each $m$-matching) that still spans $\mathcal{G}%
_{m}/\mathcal{G}_{m-1}$. Consequently,
\[
\dim\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  \leq\left(  \text{\# of
}m\text{-matchings of }\left[  n\right]  \right)  .
\]
This proves Lemma \ref{lem.G.matchbound}.
\end{proof}

\subsection{\label{subsec.gelfand.proof}Proving the Gelfandness
half-combinatorially}

We continue on our way towards the proof of Theorem \ref{thm.G.gelfand}.

The key tool will be a proposition (Proposition \ref{prop.G.G*Slam}) showing
that for each Specht module $\mathcal{S}^{\lambda}$, there is a nonzero
element $G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast
}\mathbf{e}_{T}\in\mathcal{G}^{\ast}\mathcal{S}^{\lambda}$ (where
$\mathbf{e}_{T}$ is a polytabloid corresponding some $n$-tableau $T$ of shape
$\lambda$, and where $G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}%
}^{\ast}$ is one of the generators of $\mathcal{G}$). This proposition will
yield that $\mathcal{G}$ has a submodule isomorphic to $\mathcal{S}^{\lambda}%
$. Thus, $\mathcal{G}$ will contain a submodule isomorphic to $\bigoplus
\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}$ (since the Specht modules
$\mathcal{S}^{\lambda}$ are simple and non-isomorphic). On the other hand, we
will show that the dimension of $\mathcal{G}$ agrees with the dimension of
$\bigoplus\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}$, so this submodule
must be in fact the whole $\mathcal{G}$. Thus, Theorem \ref{thm.G.gelfand}
will follow.

We consider this to be a half-combinatorial proof. Proposition
\ref{prop.G.G*Slam} will be proved combinatorially, and the dimensions will be
computed combinatorially, but the deduction of $\bigoplus\limits_{\lambda
\vdash n}\mathcal{S}^{\lambda}=\mathcal{G}$ from $\bigoplus\limits_{\lambda
\vdash n}\mathcal{S}^{\lambda}\subseteq\mathcal{G}$ and $\dim\left(
\bigoplus\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}\right)  =\dim
\mathcal{G}$ is an intrusion of linear algebra.

\begin{noncompile}
Find a combinatorial proof of the fact that $\mathcal{G}$ is a Gelfand model.
That is, for each of the isotypic projectors $\mathbf{E}_{\lambda}%
\in\mathcal{A}$, show that $\mathcal{G}\mathbf{E}_{\lambda}$ is isomorphic to
the Specht module $\mathcal{S}^{\lambda}$ by exhibiting an explicit
isomorphism $\mathcal{S}^{\lambda}\rightarrow\mathcal{G}\mathbf{E}_{\lambda}$.
(This is somewhat similar to the Murphy cellular basis of $\mathcal{A}$, but
using involutions instead of permutations.)

Here is an approach to this question that works, but is not entirely
combinatorial, as the proof relies on dimension counting.
\end{noncompile}

\subsubsection{\label{subsubsec.gelfand.proof.nz}The nonvanishing: statement}

The crux of our proof is the following fact, which will take us a while to prove:

\begin{proposition}
\label{prop.G.G*Slam}Let $\lambda$ be a partition of $n$. Let $k=\sum_{i\geq
1}\left\lfloor \dfrac{\lambda_{i}^{t}}{2}\right\rfloor $, where $\lambda^{t}$
is the conjugate partition of $\lambda$. Let $T$ be any $n$-tableau of shape
$\lambda$ (that is, any tableau of shape $\lambda$ with entries $1,2,\ldots,n$
in some order). Then, there exist some $2k$ distinct elements $i_{1}%
,i_{2},\ldots,i_{k},j_{1},j_{2},\ldots,j_{k}\in\left[  n\right]  $ such that
\[
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}\mathbf{e}%
_{T}\neq0,
\]
where $\mathbf{e}_{T}\in\mathcal{S}^{\lambda}$ is the polytabloid
corresponding to $T$.
\end{proposition}

Note that the $k$ in Proposition \ref{prop.G.G*Slam} can also be described as
$\left(  n-\ddot{o}\right)  /2$, where $\ddot{o}$ is the number of odd-length
columns of the Young diagram $Y\left(  \lambda\right)  $.

\subsubsection{(*) \label{subsubsec.gelfand.proof.nz-pf1}The nonvanishing: bad
proof}

We shall give two proofs of Proposition \ref{prop.G.G*Slam}: one bad, one
good. The bad one is easier to explain, but has some disadvantages, the main
of which is that it really requires $\mathbf{k}$ to have characteristic $0$,
whereas the proposition only needs $n!$ to be invertible in $\mathbf{k}$.

\begin{proof}
[Bad proof of Proposition \ref{prop.G.G*Slam}.]Tile each column of $T$ with
vertical dominoes, except possibly for the bottommost cell of this column
(which cannot be tiled if the column has odd length). Thus, we have altogether
put $k$ disjoint dominoes into $T$. Label these dominoes as $D_{1}%
,D_{2},\ldots,D_{k}$. For each $m\in\left\{  1,2,\ldots,k\right\}  $, let
$i_{m}$ and $j_{m}$ be the entries of $T$ in domino $D_{m}$ from top to
bottom. We claim that these $i_{m}$'s and $j_{m}$'s satisfy%
\[
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}\mathbf{e}%
_{T}\neq0.
\]


Here is an example:%
\[%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%*(yellow) i_1 & *(red) i_3 & *(green) i_4 &  \\
%*(yellow) j_1 & *(red) j_3 & *(green) j_4 \\
%*(blue!50!white) i_2 &  &  \\
%*(blue!50!white) j_2
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
*(yellow) i_1 & *(red) i_3 & *(green) i_4 &  \\
*(yellow) j_1 & *(red) j_3 & *(green) j_4 \\
*(blue!50!white) i_2 &  &  \\
*(blue!50!white) j_2
\end{ytableau}%
%EndExpansion
\]


Indeed, we recall the Specht--Vandermonde avatar of Young and Specht modules
(\cite[Theorem 5.6.1]{sga}): There is an injective morphism $\beta$ of $S_{n}%
$-representations from the Young module $\mathcal{M}^{\lambda}$ to the
polynomial ring $\mathcal{P}:=\mathbf{k}\left[  x_{1},x_{2},\ldots
,x_{n}\right]  $ that sends each $n$-tabloid $\overline{T}$ to the monomial
$\prod_{i\in\left[  n\right]  }x_{i}^{r_{T}\left(  i\right)  -1}$, where
$r_{T}\left(  i\right)  $ is the number of the row of $T$ in which $i$ lies.
Restricting this $\beta$ to the Specht module $\mathcal{S}^{\lambda}$, we see
that%
\[
\beta\left(  \mathbf{e}_{T}\right)  =\prod_{j\geq1}V\left(  x_{T\left(
1,j\right)  },x_{T\left(  2,j\right)  },\ldots,x_{T\left(  \lambda_{j}%
^{t},j\right)  }\right)  ,
\]
where $V\left(  y_{1},y_{2},\ldots,y_{k}\right)  $ denotes the Vandermonde
determinant $\det\left(  y_{j}^{k-i}\right)  _{i,j\in\left[  k\right]  }%
=\prod_{i>j}\left(  y_{i}-y_{j}\right)  $ for any $y_{1},y_{2},\ldots,y_{k}$.

Thus,
\begin{align*}
&  \beta\left(  G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast
}\mathbf{e}_{T}\right) \\
&  =\underbrace{G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast
}}_{=\sum_{\substack{w\in S_{n};\\w\left(  i_{m}\right)  <w\left(
j_{m}\right)  \text{ for each }m}}w}\underbrace{\beta\left(  \mathbf{e}%
_{T}\right)  }_{=\prod_{j\geq1}V\left(  x_{T\left(  1,j\right)  },x_{T\left(
2,j\right)  },\ldots,x_{T\left(  \lambda_{j}^{t},j\right)  }\right)  }\\
&  =\sum_{\substack{w\in S_{n};\\w\left(  i_{m}\right)  <w\left(
j_{m}\right)  \text{ for each }m}}w\cdot\prod_{j\geq1}V\left(  x_{T\left(
1,j\right)  },x_{T\left(  2,j\right)  },\ldots,x_{T\left(  \lambda_{j}%
^{t},j\right)  }\right) \\
&  =\sum_{\substack{w\in S_{n};\\w\left(  i_{m}\right)  <w\left(
j_{m}\right)  \text{ for each }m}}\prod_{j\geq1}V\left(  x_{w\left(  T\left(
1,j\right)  \right)  },x_{w\left(  T\left(  2,j\right)  \right)  }%
,\ldots,x_{w\left(  T\left(  \lambda_{j}^{t},j\right)  \right)  }\right) \\
&  =\sum_{\substack{S\text{ is an }n\text{-tableau of shape }\lambda;\\S\text{
increases on each domino }D_{m}}}\prod_{j\geq1}V\left(  x_{S\left(
1,j\right)  },x_{S\left(  2,j\right)  },\ldots,x_{S\left(  \lambda_{j}%
^{t},j\right)  }\right) \\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{where \textquotedblleft}S\text{ increases on }D_{m}%
\text{\textquotedblright\ means that the two}\\
\text{values of }S\text{ on }D_{m}\text{ increase from top to bottom}%
\end{array}
\right)  .
\end{align*}
Clearly, it will suffice to show that this sum is $\neq0$ (because
$\beta\left(  0\right)  =0$). Being a polynomial in $x_{1},x_{2},\ldots,x_{n}%
$, it will of course be $\neq0$ if we can show that it gives a positive real
number when we evaluate it at $\left(  x_{1},x_{2},\ldots,x_{n}\right)
=\left(  a_{1},a_{2},\ldots,a_{n}\right)  $ for any strictly increasing
$n$-tuple $a_{1}<a_{2}<\cdots<a_{n}$ of reals. So let us do this.

Fix a strictly increasing $n$-tuple $a_{1}<a_{2}<\cdots<a_{n}$ of reals. We
must show that%
\[
\sum_{\substack{S\text{ is an }n\text{-tableau of shape }\lambda;\\S\text{
increases on each domino }D_{m}}}\prod_{j\geq1}V\left(  a_{S\left(
1,j\right)  },a_{S\left(  2,j\right)  },\ldots,a_{S\left(  \lambda_{j}%
^{t},j\right)  }\right)  >0.
\]
We can break the sum up according to the column-tabloid $\widetilde{S}$ (that
is, we bunch column-equivalent tableaux $S$ together), and then the product
can be factored out of the sum, and we can deal with each column separately.
We thus are left with proving the following fact: For any $p\in\mathbb{N}$ and
any $p$ reals $b_{1}<b_{2}<\cdots<b_{p}$, we have%
\[
\sum_{\substack{\sigma\in S_{p};\\\sigma\left(  1\right)  <\sigma\left(
2\right)  ;\\\sigma\left(  3\right)  <\sigma\left(  4\right)  ;\\\ldots
}}V\left(  b_{\sigma\left(  1\right)  },b_{\sigma\left(  2\right)  }%
,\ldots,b_{\sigma\left(  p\right)  }\right)  >0
\]
(where the relations under the summation sign go up to $\sigma\left(
p\right)  $ or $\sigma\left(  p-1\right)  $ depending on the parity of $p$).
Since $V\left(  b_{\sigma\left(  1\right)  },b_{\sigma\left(  2\right)
},\ldots,b_{\sigma\left(  p\right)  }\right)  =\left(  -1\right)  ^{\sigma
}V\left(  b_{1},b_{2},\ldots,b_{p}\right)  $ and $V\left(  b_{1},b_{2}%
,\ldots,b_{p}\right)  >0$, this boils down to showing that
\[
\sum_{\substack{\sigma\in S_{p};\\\sigma\left(  1\right)  <\sigma\left(
2\right)  ;\\\sigma\left(  3\right)  <\sigma\left(  4\right)  ;\\\ldots
}}\left(  -1\right)  ^{\sigma}>0.
\]
But this sum is actually $\left\lfloor p/2\right\rfloor !$, as we can show as follows:

\begin{itemize}
\item If $p$ is even, then%
\begin{align*}
\sum_{\substack{\sigma\in S_{p};\\\sigma\left(  1\right)  <\sigma\left(
2\right)  ;\\\sigma\left(  3\right)  <\sigma\left(  4\right)  ;\\\ldots
}}\left(  -1\right)  ^{\sigma}  &  =\left\lfloor p/2\right\rfloor !\cdot
\sum_{\substack{\sigma\in S_{p};\\\sigma\left(  1\right)  <\sigma\left(
2\right)  ;\\\sigma\left(  3\right)  <\sigma\left(  4\right)  ;\\\ldots
;\\\sigma\left(  1\right)  \leq\sigma\left(  3\right)  \leq\cdots\leq
\sigma\left(  p-1\right)  }}\left(  -1\right)  ^{\sigma}%
\ \ \ \ \ \ \ \ \ \ \left(  \text{by symmetry}\right) \\
&  =\left\lfloor p/2\right\rfloor !,
\end{align*}
according to the formula $\sum_{\substack{\sigma\in S_{p};\\\sigma\left(
1\right)  <\sigma\left(  2\right)  ;\\\sigma\left(  3\right)  <\sigma\left(
4\right)  ;\\\ldots;\\\sigma\left(  1\right)  \leq\sigma\left(  3\right)
\leq\cdots\leq\sigma\left(  p-1\right)  }}\left(  -1\right)  ^{\sigma}=1$ that
Gjergji Zaimi proved in \url{https://math.stackexchange.com/a/58941/} (brief
outline of the proof: define a sign-reversing involution $\Omega$ on the set
of all $\sigma\in S_{p}$ that satisfy $\sigma\left(  1\right)  <\sigma\left(
3\right)  $ and $\sigma\left(  2\right)  <\sigma\left(  4\right)  $ and
$\cdots$ and $\sigma\left(  1\right)  \leq\sigma\left(  3\right)  \leq
\cdots\leq\sigma\left(  p-1\right)  $ but are not the identity; this
involution $\Omega$ shall pick the smallest $k$ satisfying $\sigma\left(
2k-1\right)  =\sigma\left(  2k+1\right)  -1$ and swap the values of $\sigma$
at $2k$ and $2k+2$).

\item If $p$ is odd, then $\sigma\left(  p\right)  $ is not constrained at all
in our sum, and so we can cycle it through all possible values (i.e., replace
$\sigma$ by $\sigma\circ\operatorname*{cyc}\nolimits_{i,i+1,\ldots,p}$ for all
$i\in\left[  p\right]  $); this gives us a $1+\left(  -1\right)  +1+\left(
-1\right)  +\cdots+1=1$ factor that we can factor out and are left with the
corresponding sum for $p-1$ instead of $p$. Since $p-1$ is even, this is done
in the previous bullet point.
\end{itemize}

Altogether, we obtain%
\[
\sum_{\substack{\sigma\in S_{p};\\\sigma\left(  1\right)  <\sigma\left(
2\right)  ;\\\sigma\left(  3\right)  <\sigma\left(  4\right)  ;\\\ldots
}}\left(  -1\right)  ^{\sigma}=\left\lfloor p/2\right\rfloor !>0,
\]
qed.
\end{proof}

\subsubsection{\label{subsubsec.gelfand.proof.nz-pf2}The nonvanishing: good
proof}

Let us now give the good proof of Proposition \ref{prop.G.G*Slam}. This needs
a bit of preparation.

For the rest of Subsubsection \ref{subsubsec.gelfand.proof.nz-pf2}, we fix the
following notations: Let $\lambda$ be a partition of $n$. Let
\[
k=\sum_{i\geq1}\left\lfloor \lambda_{i}^{t}/2\right\rfloor
\ \ \ \ \ \ \ \ \ \ \text{and}\ \ \ \ \ \ \ \ \ \ p=\prod_{i\geq1}\left\lfloor
\lambda_{i}^{t}/2\right\rfloor !,
\]
where $\lambda^{t}$ is the conjugate partition of $\lambda$. Tile each column
of the Young diagram $Y\left(  \lambda\right)  $ with vertical dominoes,
except possibly for the bottommost cell of this column (which cannot be tiled
if the column has odd length). Thus, we have altogether put $k$ disjoint
dominoes into $Y\left(  \lambda\right)  $. Label these dominoes as
$D_{1},D_{2},\ldots,D_{k}$ in arbitrary order. For each $m\in\left[  k\right]
$, let $c_{m}$ and $d_{m}$ be the cells in domino $D_{m}$ from top to bottom.
Note that $c_{m}$ lies in an odd row of $Y\left(  \lambda\right)  $, while
$d_{m}$ is the southern neighbor of $c_{m}$ and thus lies in an even row. For
instance, for $\lambda=\left(  4,3,3,1\right)  $, we have $k=2+1+1+0=4$ and
(choosing to label the dominoes from left to right and from top to bottom in
each column) $D_{1}=\left\{  \left(  1,1\right)  ,\ \left(  2,1\right)
\right\}  $ and $D_{2}=\left\{  \left(  3,1\right)  ,\ \left(  4,1\right)
\right\}  $ and $D_{3}=\left\{  \left(  1,2\right)  ,\ \left(  2,2\right)
\right\}  $ and $D_{4}=\left\{  \left(  1,3\right)  ,\ \left(  2,3\right)
\right\}  $, and the cells $c_{1},d_{1},\ldots,c_{4},d_{4}$ look as follows:%
\[%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%*(yellow) c_1 & *(red) c_3 & *(green) c_4 &  \\
%*(yellow) d_1 & *(red) d_3 & *(green) d_4 \\
%*(blue!50!white) c_2 &  &  \\
%*(blue!50!white) d_2
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
*(yellow) c_1 & *(red) c_3 & *(green) c_4 &  \\
*(yellow) d_1 & *(red) d_3 & *(green) d_4 \\
*(blue!50!white) c_2 &  &  \\
*(blue!50!white) d_2
\end{ytableau}%
%EndExpansion
\ \ .
\]


Let $\operatorname*{Tab}\left(  \lambda\right)  $ be the set of all
$n$-tableaux of shape $\lambda$. Note that these $n$-tableaux are defined as
the bijections $Y\left(  \lambda\right)  \rightarrow\left[  n\right]  $; thus,
there are $n!$ of them. For any $n$-tableau $S\in\operatorname*{Tab}\left(
\lambda\right)  $, we write $\mathbf{e}_{S}$ for the corresponding polytabloid
in the Specht module $\mathcal{S}^{\lambda}$ (see \cite[Definition 5.4.1
\textbf{(a)}]{sga}).

An $n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $ will be
called \emph{domino-standard} if it satisfies $S\left(  c_{m}\right)
<S\left(  d_{m}\right)  $ for each $m\in\left[  k\right]  $. An $n$-tableau
$S\in\operatorname*{Tab}\left(  \lambda\right)  $ will be called
\emph{column-standard} if its entries increase down each column. Clearly, each
column-standard tableau is domino-standard, but not conversely.

Now, we shall show that any $n$-tableau $T\in\operatorname*{Tab}\left(
\lambda\right)  $ satisfies%
\[
G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }^{\ast}\mathbf{e}_{T}\neq0.
\]
This will clearly prove Proposition \ref{prop.G.G*Slam}. First, however, we
need some lemmas.

If $a$ and $b$ are two integers, then $\left[  a,b\right]  $ shall denote the
integer interval
\[
\left\{  x\in\mathbb{Z}\ \mid\ a\leq x\leq b\right\}  =\left\{  a,a+1,\ldots
,b\right\}  .
\]


\begin{lemma}
\label{lem.lacunar-squeeze}Let $t_{1},t_{2},\ldots,t_{m}$ be $m$ distinct
integers. Assume that
\begin{equation}
t_{2}<t_{4}<t_{6}<\cdots\label{eq.lem.lacunar-squeeze.eveninc}%
\end{equation}
(the last element of this chain of inequalities is either $t_{m}$ or $t_{m-1}%
$, depending on whether $m$ is even or odd). Assume furthermore that%
\begin{equation}
t_{i-1}<t_{i}\ \ \ \ \ \ \ \ \ \ \text{for each even }i\in\left[  m\right]  .
\label{eq.lem.lacunar-squeeze.dom}%
\end{equation}
Finally, assume that there exist no $a,b\in\left[  m\right]  $ satisfying
$a\equiv b\operatorname{mod}2$ and $t_{a}<t_{b}$ with the property that none
of the numbers $h\in\left\{  t_{1},t_{2},\ldots,t_{m}\right\}  $ satisfy
$t_{a}<h<t_{b}$. Then,%
\[
t_{1}<t_{2}<\cdots<t_{m}.
\]

\end{lemma}

\begin{proof}
We let $\left\{  1,3,5,\ldots\right\}  _{\leq m}$ denote the set of all odd
integers in $\left[  m\right]  $. Likewise, $\left\{  2,4,6,\ldots\right\}
_{\leq m}$ shall denote the set of all even integers in $\left[  m\right]  $.

The $m$ integers $t_{1},t_{2},\ldots,t_{m}$ are distinct, but we only care
about their relative order, not about their exact values. Thus, we can WLOG
assume that they are $1,2,\ldots,m$ in some order (otherwise, just relabel
them as $1,2,\ldots,m$ preserving their relative order). Assume this. Then,
the assumption \textquotedblleft there exist no $a,b\in\left[  m\right]  $
satisfying $a\equiv b\operatorname{mod}2$ and $t_{a}<t_{b}$ with the property
that none of the numbers $h\in\left\{  t_{1},t_{2},\ldots,t_{m}\right\}  $
satisfy $t_{a}<h<t_{b}$\textquotedblright\ can be rewritten in the simpler
form \textquotedblleft there exist no $a,b\in\left[  m\right]  $ satisfying
$a\equiv b\operatorname{mod}2$ and $t_{a}<t_{b}$ with the property that
$t_{b}-t_{a}=1$\textquotedblright. In other words, there exist no
$a,b\in\left[  m\right]  $ satisfying $a\equiv b\operatorname{mod}2$ such that
$t_{a}$ and $t_{b}$ are two consecutive integers. That is, no two of the
integers $t_{1},t_{3},t_{5},\ldots$ (the list continues until running out of
subscripts) are consecutive, and no two of the integers $t_{2},t_{4}%
,t_{6},\ldots$ are consecutive. In other words, the two sets $\left\{
t_{1},t_{3},t_{5},\ldots\right\}  $ and $\left\{  t_{2},t_{4},t_{6}%
,\ldots\right\}  $ are lacunar\footnote{A subset of $\mathbb{Z}$ is said to be
\emph{lacunar} if it contains no two consecutive integers.}. Of course, these
two sets are disjoint, and their union is $\left\{  t_{1},t_{2},\ldots
,t_{m}\right\}  =\left[  m\right]  $ (since we assumed that the integers
$t_{1},t_{2},\ldots,t_{m}$ are $1,2,\ldots,m$ in some order). But it is not
hard to see that the only ways to write the set $\left[  m\right]  $ as a
union $\left[  m\right]  =L\cup M$ of two disjoint lacunar sets $L$ and $M$
are%
\begin{align*}
\left[  m\right]   &  =\left\{  1,3,5,\ldots\right\}  _{\leq m}\cup\left\{
2,4,6,\ldots\right\}  _{\leq m}\ \ \ \ \ \ \ \ \ \ \text{and}\\
\left[  m\right]   &  =\left\{  2,4,6,\ldots\right\}  _{\leq m}\cup\left\{
1,3,5,\ldots\right\}  _{\leq m}%
\end{align*}
(because if, say, $1\in L$, then $2\in M$ by the lacunarity of $L$, therefore
$3\in L$ by the lacunarity of $M$, therefore $4\in M$ by the lacunarity of
$L$, and so on, eventually resulting in $L=\left\{  1,3,5,\ldots\right\}
_{\leq m}$ and $M=\left\{  2,4,6,\ldots\right\}  _{\leq m}$; the case $1\in M$
is analogous). Hence, we are in one of the following two cases:

\textit{Case 1:} We have $\left\{  t_{1},t_{3},t_{5},\ldots\right\}  =\left\{
1,3,5,\ldots\right\}  _{\leq m}$ and $\left\{  t_{2},t_{4},t_{6}%
,\ldots\right\}  =\left\{  2,4,6,\ldots\right\}  _{\leq m}$.

\textit{Case 2:} We have $\left\{  t_{1},t_{3},t_{5},\ldots\right\}  =\left\{
2,4,6,\ldots\right\}  _{\leq m}$ and $\left\{  t_{2},t_{4},t_{6}%
,\ldots\right\}  =\left\{  1,3,5,\ldots\right\}  _{\leq m}$.

Consider Case 1. In this case, $\left\{  t_{1},t_{3},t_{5},\ldots\right\}
=\left\{  1,3,5,\ldots\right\}  _{\leq m}$ and $\left\{  t_{2},t_{4}%
,t_{6},\ldots\right\}  =\left\{  2,4,6,\ldots\right\}  _{\leq m}$. The latter
equality, combined with (\ref{eq.lem.lacunar-squeeze.eveninc}), yields
$t_{2}=2$ and $t_{4}=4$ and $t_{6}=6$ and so on. Hence, using
(\ref{eq.lem.lacunar-squeeze.dom}), we easily see that $t_{1}=1$ (since
(\ref{eq.lem.lacunar-squeeze.dom}) yields $t_{1}<t_{2}=2$) and $t_{3}=3$
(since (\ref{eq.lem.lacunar-squeeze.dom}) yields $t_{3}<t_{4}=4$, but the
values $1$ and $2$ are already taken by $t_{1}$ and $t_{2}$) and $t_{5}=5$
(since (\ref{eq.lem.lacunar-squeeze.dom}) yields $t_{5}<t_{6}=6$, but the
values $1,2,3,4$ are already taken by $t_{1},t_{2},t_{3},t_{4}$) and so on
(except that we don't get $t_{m}=m$ in this way if $m$ is odd, since there is
no $t_{m+1}$). Altogether, we thus have shown that $t_{i}=i$ for each
$i\in\left[  m-1\right]  $ (whether even or odd). This entails that $t_{m}=m$
holds as well (since the numbers $t_{1},t_{2},\ldots,t_{m}$ are $1,2,\ldots,m$
up to order, so one of them must be $m$), and thus we have $t_{i}=i$ for each
$i\in\left[  m\right]  $. Thus, $t_{1}<t_{2}<\cdots<t_{m}$. This proves Lemma
\ref{lem.lacunar-squeeze} in Case 1.

Now consider Case 2. In this case, $\left\{  t_{1},t_{3},t_{5},\ldots\right\}
=\left\{  2,4,6,\ldots\right\}  _{\leq m}$ and $\left\{  t_{2},t_{4}%
,t_{6},\ldots\right\}  =\left\{  1,3,5,\ldots\right\}  _{\leq m}$. The latter
equality, combined with (\ref{eq.lem.lacunar-squeeze.eveninc}), yields
$t_{2}=1$ and $t_{4}=3$ and $t_{6}=5$ and so on. But
(\ref{eq.lem.lacunar-squeeze.dom}) yields $t_{1}<t_{2}=1$, which is absurd.
Thus, Case 2 cannot happen, and so the proof of Lemma
\ref{lem.lacunar-squeeze} is already complete.
\end{proof}

\begin{lemma}
\label{lem.G.G*eT1}Let $T\in\operatorname*{Tab}\left(  \lambda\right)  $.
Then, in $\mathcal{S}^{\lambda}$, we have%
\begin{equation}
G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }^{\ast}\mathbf{e}_{T}=\sum_{\substack{S\in
\operatorname*{Tab}\left(  \lambda\right)  \\\text{is domino-standard}%
}}\mathbf{e}_{S}.\nonumber
\end{equation}

\end{lemma}

\begin{proof}
Each $n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $ can be
written as $wT$ for a unique permutation $w\in S_{n}$. Moreover, the former
$n$-tableau $S$ is domino-standard if and only if it satisfies%
\[
S\left(  c_{s}\right)  <S\left(  d_{s}\right)  \ \ \ \ \ \ \ \ \ \ \text{for
all }s\in\left[  k\right]  ,
\]
that is, if the latter permutation $w\in S_{n}$ satisfies%
\[
w\left(  T\left(  c_{s}\right)  \right)  <w\left(  T\left(  d_{s}\right)
\right)  \ \ \ \ \ \ \ \ \ \ \text{for all }s\in\left[  k\right]
\]
(because $S=wT$ shows that $S\left(  c_{s}\right)  =w\left(  T\left(
c_{s}\right)  \right)  $ and $S\left(  d_{s}\right)  =w\left(  T\left(
d_{s}\right)  \right)  $ for all $s\in\left[  k\right]  $). Hence, there is a
bijection%
\begin{align*}
\left\{  w\in S_{n}\ \mid\ w\left(  T\left(  c_{s}\right)  \right)  <w\left(
T\left(  d_{s}\right)  \right)  \text{ for all }s\right\}   &  \rightarrow
\left\{  \text{domino-standard }S\in\operatorname*{Tab}\left(  \lambda\right)
\right\}  ,\\
w  &  \mapsto wT.
\end{align*}
Hence, we can substitute $S$ for $wT$ in the sum $\sum_{\substack{S\in
\operatorname*{Tab}\left(  \lambda\right)  \\\text{is domino-standard}%
}}\mathbf{e}_{S}$, and thus obtain%
\begin{equation}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}=\sum_{\substack{w\in S_{n};\\w\left(
T\left(  c_{s}\right)  \right)  <w\left(  T\left(  d_{s}\right)  \right)
\text{ for all }s}}\mathbf{e}_{wT}. \label{pf.lem.G.G*eT1.R}%
\end{equation}


On the other hand, the definition of $G_{T\left(  c_{1}\right)  ,\ T\left(
c_{2}\right)  ,\ \ldots,\ T\left(  c_{k}\right)  ;\ T\left(  d_{1}\right)
,\ T\left(  d_{2}\right)  ,\ \ldots,\ T\left(  d_{k}\right)  }$ yields%
\[
G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }=\sum_{\substack{w\in S_{n};\\w\left(
T\left(  c_{s}\right)  \right)  <w\left(  T\left(  d_{s}\right)  \right)
\text{ for all }s}}w^{-1}.
\]
Applying the linear map $x\mapsto x^{\ast}$ to this equality (which sends each
$w^{-1}$ to $w$), we obtain%
\[
G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }^{\ast}=\sum_{\substack{w\in
S_{n};\\w\left(  T\left(  c_{s}\right)  \right)  <w\left(  T\left(
d_{s}\right)  \right)  \text{ for all }s}}w.
\]
Hence,%
\begin{align*}
&  G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }^{\ast}\mathbf{e}_{T}\\
&  =\sum_{\substack{w\in S_{n};\\w\left(  T\left(  c_{s}\right)  \right)
<w\left(  T\left(  d_{s}\right)  \right)  \text{ for all }s}%
}\ \ \underbrace{w\mathbf{e}_{T}}_{\substack{=\mathbf{e}_{wT}\\\text{(by
\cite[Lemma 5.4.6 \textbf{(a)}]{sga})}}}\\
&  =\sum_{\substack{w\in S_{n};\\w\left(  T\left(  c_{s}\right)  \right)
<w\left(  T\left(  d_{s}\right)  \right)  \text{ for all }s}}\mathbf{e}_{wT}\\
&  =\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}\ \ \ \ \ \ \ \ \ \ \left(  \text{by
(\ref{pf.lem.G.G*eT1.R})}\right)  .
\end{align*}
This proves Lemma \ref{lem.G.G*eT1}.
\end{proof}

\begin{lemma}
\label{lem.G.G*eT}In $\mathcal{S}^{\lambda}$, we have%
\begin{equation}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}=p\cdot\sum_{\substack{S\in\operatorname*{Tab}%
\left(  \lambda\right)  \\\text{is column-standard}}}\mathbf{e}_{S}.\nonumber
\end{equation}

\end{lemma}

\begin{proof}
[Proof of Lemma \ref{lem.G.G*eT}.]For any $n$-tableau $S\in\operatorname*{Tab}%
\left(  \lambda\right)  $, let $\phi\left(  S\right)  $ be the column-standard
tableau obtained from $S$ by sorting the entries within each column $S$ so
that they increase from top to bottom. Thus, we have defined a map
\[
\phi:\operatorname*{Tab}\left(  \lambda\right)  \rightarrow\left\{
\text{column-standard }Q\in\operatorname*{Tab}\left(  \lambda\right)
\right\}  .
\]
For instance,%
\[
\phi:%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%3 & 2 & 5 \\
%8 & 7 \\
%1 & 6 \\
%4
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
3 & 2 & 5 \\
8 & 7 \\
1 & 6 \\
4
\end{ytableau}%
%EndExpansion
\mapsto%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%1 & 2 & 5 \\
%3 & 6 \\
%4 & 7 \\
%8
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
1 & 2 & 5 \\
3 & 6 \\
4 & 7 \\
8
\end{ytableau}%
%EndExpansion
\ \ .
\]
We call $\phi$ the \emph{column-sorting map}, since it sorts each column. Note
that if $S_{1}$ and $S_{2}$ are two column-equivalent $n$-tableaux, then
$\phi\left(  S_{1}\right)  =\phi\left(  S_{2}\right)  $.

Now, we claim a combinatorial equality:

\begin{statement}
\textit{Claim 1:} Let $Q\in\operatorname*{Tab}\left(  \lambda\right)  $ be
column-standard. Then,%
\begin{equation}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard;}\\\phi\left(  S\right)  =Q}}\left(  -1\right)  ^{Q\circ
S^{-1}}=p. \label{pf.lem.G.G*eT.c1}%
\end{equation}
(Recall that $S$ and $Q$ are bijections from $Y\left(  \lambda\right)  $ to
$\left[  n\right]  $, so that $Q\circ S^{-1}:\left[  n\right]  \rightarrow
\left[  n\right]  $ is a well-defined permutation of $\left[  n\right]  $ and
thus has a sign $\left(  -1\right)  ^{Q\circ S^{-1}}$.)
\end{statement}

\begin{proof}
[Proof of Claim 1.]The below proof is secretly a retelling of Gjergji Zaimi's
sign-reversing involution in \url{https://math.stackexchange.com/a/58941/},
although it would take us even further afield to explain the precise correspondence.

The $n$-tableau $Q$ is column-standard, thus domino-standard, and clearly
satisfies $\phi\left(  Q\right)  =Q$. Hence, the sum on the left hand side of
(\ref{pf.lem.G.G*eT.c1}) contains at least the addend for $S=Q$, and this
addend is $\left(  -1\right)  ^{Q\circ Q^{-1}}=\left(  -1\right)
^{\operatorname*{id}}=1$.

If a cell $c\in Y\left(  \lambda\right)  $ belongs to one of the dominoes
$D_{1},D_{2},\ldots,D_{k}$, then the \emph{partner} of $c$ shall mean the
unique cell of this domino that is not $c$. A cell that doesn't belong to any
of $D_{1},D_{2},\ldots,D_{k}$ has no partner. Thus, for each $m\in\left[
k\right]  $, the cells $c_{m}$ and $d_{m}$ are each other's partners.

For any $n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $ and any
$i\in\left[  n\right]  $, we let $r_{S}\left(  i\right)  $ denote the number
of the row in which $S$ contains the entry $i$. A \emph{swappable pair} of an
$n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $ shall mean a
pair $\left(  i,j\right)  $ of elements of $\left[  n\right]  $ such that

\begin{enumerate}
\item we have $i<j$;

\item the entries $i$ and $j$ lie in the same column of $S$;

\item we have $r_{S}\left(  i\right)  \equiv r_{S}\left(  j\right)
\operatorname{mod}2$ (that is, the distance between the cells of $S$ that
contain $i$ and $j$ is even);

\item none of the numbers $h$ that lie in the same column of $S$ as $i$ and
$j$ satisfy $i<h<j$.
\end{enumerate}

For instance, the $9$-tableau $%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%4 & 5 & 8 \\
%7 & 6 & 9 \\
%1 & 3 \\
%2
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
4 & 5 & 8 \\
7 & 6 & 9 \\
1 & 3 \\
2
\end{ytableau}%
%EndExpansion
$ has only one swappable pair, namely $\left(  3,5\right)  $. (The pairs
$\left(  1,4\right)  $ and $\left(  2,7\right)  $ would fail the fourth condition.)

We note that a column-standard tableau $S$ will never have any swappable
pairs. (Indeed, if $\left(  i,j\right)  $ is a swappable pair of $S$, then
conditions 2 and 3 in the definition of \textquotedblleft swappable
pair\textquotedblright\ ensure that there is at least one cell between the
cells containing $i$ and $j$ in $S$ in the column that contains these two
cells; but then the column-standardness of $S$ implies that this cell contains
some entry $h$ satisfying $i<h<j$, and this contradicts condition 4 of the definition.)

It is easy to see that if $\left(  i,j\right)  $ is a swappable pair of an
$n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $, then the
$n$-tableau $t_{i,j}S$ (that is, the tableau obtained from $S$ by swapping the
entries $i$ and $j$) has the properties that%
\begin{align}
&  S\text{ is domino-standard}\nonumber\\
&  \text{if and only if }t_{i,j}S\text{ is domino-standard}
\label{pf.lem.G.G*eT.c1.pf.dsiff}%
\end{align}
(since the condition \textquotedblleft$r_{S}\left(  i\right)  \equiv
r_{S}\left(  j\right)  \operatorname{mod}2$\textquotedblright\ ensures that
each of the dominoes $D_{1},D_{2},\ldots,D_{k}$ contains at most one of $i$
and $j$, and then the condition \textquotedblleft none of the numbers $h$ that
lie in the same column of $S$ as $i$ and $j$ satisfy $i<h<j$\textquotedblright%
\ ensures that swapping $i$ with $j$ does not disturb their order relations
with their potential partners) and%
\begin{equation}
\phi\left(  t_{i,j}S\right)  =\phi\left(  S\right)
\label{pf.lem.G.G*eT.c1.pf.phi=}%
\end{equation}
(since $t_{i,j}$ merely swaps two entries in the same column of $S$). Also, in
this situation, the tableau $t_{i,j}S$ has the same swappable pairs as $S$.

We say that an $n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $
is \emph{swappable} if it has a swappable pair; otherwise, we call it
\emph{unswappable}. We have%
\begin{equation}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}\\\text{and swappable;}\\\phi\left(  S\right)  =Q}}\left(
-1\right)  ^{Q\circ S^{-1}}=0, \label{pf.lem.G.G*eT.c1.pf.s=0}%
\end{equation}
since we can find a sign-reversing involution on the indexing set of this sum
(this involution takes any swappable domino-standard $S\in\operatorname*{Tab}%
\left(  \lambda\right)  $ satisfying $\phi\left(  S\right)  =Q$, finds its
lexicographically minimal swappable pair $\left(  i,j\right)  $, and swaps the
entries $i$ and $j$ in $S$, that is, sends $S$ to $t_{i,j}S$; then, because of
(\ref{pf.lem.G.G*eT.c1.pf.dsiff}) and (\ref{pf.lem.G.G*eT.c1.pf.phi=}), the
tableau $t_{i,j}S$ is again a swappable domino-standard $n$-tableau in
$\operatorname*{Tab}\left(  \lambda\right)  $ satisfying $\phi\left(
t_{i,j}S\right)  =Q$, so we have an involution on our hands\footnote{Here we
are using the fact that the tableau $t_{i,j}S$ has the same swappable pairs as
$S$.}; and this involution is sign-reversing because
\begin{align*}
\left(  -1\right)  ^{Q\circ\left(  t_{i,j}S\right)  ^{-1}}  &  =\left(
-1\right)  ^{Q\circ S^{-1}\circ t_{i,j}}\ \ \ \ \ \ \ \ \ \ \left(
\text{since }\left(  t_{i,j}S\right)  ^{-1}=S^{-1}\circ t_{i,j}\right) \\
&  =\left(  -1\right)  ^{Q\circ S^{-1}}\underbrace{\left(  -1\right)
^{t_{i,j}}}_{=-1}=-\left(  -1\right)  ^{Q\circ S^{-1}}%
\end{align*}
for each $S\in\operatorname*{Tab}\left(  \lambda\right)  $). Now, each
$S\in\operatorname*{Tab}\left(  \lambda\right)  $ is either swappable or
unswappable (but never both); thus,%
\begin{align*}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard;}\\\phi\left(  S\right)  =Q}}\left(  -1\right)  ^{Q\circ
S^{-1}}  &  =\underbrace{\sum_{\substack{S\in\operatorname*{Tab}\left(
\lambda\right)  \\\text{is domino-standard}\\\text{and swappable;}%
\\\phi\left(  S\right)  =Q}}\left(  -1\right)  ^{Q\circ S^{-1}}}%
_{\substack{=0\\\text{(by (\ref{pf.lem.G.G*eT.c1.pf.s=0}))}}}+\sum
_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}\\\text{and unswappable;}\\\phi\left(  S\right)  =Q}}\left(
-1\right)  ^{Q\circ S^{-1}}\\
&  =\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}\\\text{and unswappable;}\\\phi\left(  S\right)  =Q}}\left(
-1\right)  ^{Q\circ S^{-1}}.
\end{align*}
Hence, in order to prove Claim 1, it suffices to show that
\begin{equation}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}\\\text{and unswappable;}\\\phi\left(  S\right)  =Q}}\left(
-1\right)  ^{Q\circ S^{-1}}=p. \label{pf.lem.G.G*eT.c1.pf.need}%
\end{equation}


For this, we need to better understand the structure of unswappable
domino-standard $n$-tableaux $S\in\operatorname*{Tab}\left(  \lambda\right)  $.

To do so, we define yet another operation of $n$-tableaux of shape $Y\left(
\lambda\right)  $. Namely, a \emph{biswap} shall mean an operation that can be
applied to an $n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $.
It proceeds by choosing two distinct dominoes $D_{a}$ and $D_{b}$ that lie in
the same column, and swapping the values of $S$ in cells $c_{a}$ and $c_{b}$,
and simultaneously swapping the values of $S$ in cells $d_{a}$ and $d_{b}$ (so
that the pair of values of $S$ in the two cells of $D_{a}$ gets swapped with
the corresponding set in $D_{b}$). Visually speaking, this amounts to swapping
the entire dominoes $D_{a}$ and $D_{b}$ (including all their entries). For
instance, one biswap takes the $9$-tableau $%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%*(yellow) 3 & 7 \\
%*(yellow) 6 & 5 \\
%2 & 9 \\
%4 \\
%*(green) 1 \\
%*(green) 8
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
*(yellow) 3 & 7 \\
*(yellow) 6 & 5 \\
2 & 9 \\
4 \\
*(green) 1 \\
*(green) 8
\end{ytableau}%
%EndExpansion
$ to $%
%TCIMACRO{\TeXButton{TeX field}{\begin{ytableau}
%*(yellow) 1 & 7 \\
%*(yellow) 8 & 5 \\
%2 & 9 \\
%4 \\
%*(green) 3 \\
%*(green) 6
%\end{ytableau}}}%
%BeginExpansion
\begin{ytableau}
*(yellow) 1 & 7 \\
*(yellow) 8 & 5 \\
2 & 9 \\
4 \\
*(green) 3 \\
*(green) 6
\end{ytableau}%
%EndExpansion
$ (where the yellow and the green rectangles are the two dominoes $D_{a}$ and
$D_{b}$ that we have swapped). Note that entries in cells outside of
$D_{1},D_{2},\ldots,D_{k}$ do not change at all under biswaps.

Note a few properties of biswaps:

\begin{itemize}
\item Any biswap can be undone by another biswap. Thus, there is an
equivalence relation on $\operatorname*{Tab}\left(  \lambda\right)  $ in which
two $n$-tableaux $S_{1},S_{2}\in\operatorname*{Tab}\left(  \lambda\right)  $
are equivalent if and only if one of them can be transformed into the other by
a sequence of biswaps.

\item Each equivalence class of this equivalence relation has size $p$.
(\textit{Proof:} This is saying that any given $n$-tableau $S\in
\operatorname*{Tab}\left(  \lambda\right)  $ can be transformed into exactly
$p$ distinct $n$-tableaux using sequences of biswaps. But the definition of a
biswap shows that a sequence of biswaps amounts to an arbitrary permutation of
the dominoes (or, more precisely, their sets of entries) \textbf{within each
column} of $S$ (since any permutation of dominoes can be achieved by
successively swapping pairs of dominoes); since the number of dominoes inside
the $i$-th column of $S$ is $\left\lfloor \lambda_{i}^{t}/2\right\rfloor $, we
thus conclude that the number of such permutations is $\prod_{i\geq
1}\left\lfloor \lambda_{i}^{t}/2\right\rfloor !=p$. All these $p$ permutations
lead to different $n$-tableaux, since the entries of $S$ are distinct. Hence,
in total, we get $p$ distinct tableaux by applying sequences of biswaps to $S$.)

\item If we apply a biswap to a domino-standard tableau, then we obtain
another domino-standard tableau. (Indeed, domino-standardness is preserved
because a biswap \textquotedblleft moves dominoes wholesale\textquotedblright%
.) In other words, biswaps preserve domino-standardness.

\item The set of swappable pairs of a tableau $S\in\operatorname*{Tab}\left(
\lambda\right)  $ is preserved under biswaps (since a biswap keeps each entry
in its original column, and can only move it by an even number of rows). Thus,
if we apply a biswap to an unswappable tableau, then we obtain another
unswappable tableau. In other words, biswaps preserve unswappability.

\item When we apply a biswap to an $n$-tableau $S\in\operatorname*{Tab}\left(
\lambda\right)  $, the sign $\left(  -1\right)  ^{Q\circ S^{-1}}$ is
unchanged. (This is because a biswap amounts to \textbf{two} swaps of two
entries, so that the permutation $Q\circ S^{-1}\in S_{n}$ is multiplied by
\textbf{two} transpositions and therefore preserves its sign.)
\end{itemize}

Now, let $\operatorname*{BSTab}\left(  Q\right)  $ be the set of all tableaux
$S\in\operatorname*{Tab}\left(  \lambda\right)  $ that can be obtained from
$Q$ by a sequence of biswaps. This is an equivalence class of the equivalence
relation mentioned in the first bullet point above, and thus has size $p$ (by
the second bullet point). Moreover, it contains the tableau $Q$, which is
domino-standard (since it is column-standard) and unswappable (since a
column-standard tableau will never have any swappable pairs). Hence,
\textbf{all} tableaux $S\in\operatorname*{BSTab}\left(  Q\right)  $ are
domino-standard and unswappable (since biswaps preserve domino-standardness
and unswappability), and of course satisfy $\phi\left(  S\right)  =Q$ (since
$S$ is obtained from $Q$ by a sequence of biswaps, hence is column-equivalent
to $Q$, and thus transforms back into $Q$ when we apply the column-sorting map
$\phi$). Thus,%
\begin{equation}
\operatorname*{BSTab}\left(  Q\right)  \subseteq\left\{  S\in
\operatorname*{Tab}\left(  \lambda\right)  \text{ is domino-standard and
unswappable }\mid\text{\ }\phi\left(  S\right)  =Q\right\}  .
\label{pf.lem.G.G*eT.c1.pf.incl1}%
\end{equation}


We shall now prove the converse inclusion. For this purpose, let
$S\in\operatorname*{Tab}\left(  \lambda\right)  $ be domino-standard and
unswappable such that $\phi\left(  S\right)  =Q$. We shall show that
$S\in\operatorname*{BSTab}\left(  Q\right)  $.

Indeed, consider the elements of $S$ in the \textbf{even rows} (i.e., in the
$2$-nd, $4$-th, $6$-th, etc. rows). In each column of $S$, we can permute the
entries in the even rows arbitrarily using a sequence of biswaps (at the cost
of also permuting some entries in odd rows). (For instance, we can swap the
entries $S\left(  2r_{1},j\right)  $ and $S\left(  2r_{2},j\right)  $ by
applying the biswap that swaps the domino containing the former with the
domino containing the latter. As a side-effect, this biswap will also swap the
entries $S\left(  2r_{1}-1,j\right)  $ and $S\left(  2r_{2}-1,j\right)  $, but
this does not trouble us, since these entries do not lie in even rows. By a
sequence of such swaps, we can achieve any permutation of the entries in the
even rows in the $j$-th column of $S$.)

Thus, in particular, we can apply a sequence of biswaps to $S$ that results in
a tableau $S^{\prime}$ whose entries in the even rows increase top-to-bottom
down each column (i.e., that satisfies $S^{\prime}\left(  2,j\right)
<S^{\prime}\left(  4,j\right)  <S^{\prime}\left(  6,j\right)  <\cdots
<S^{\prime}\left(  2\left\lfloor \lambda_{j}^{t}/2\right\rfloor ,j\right)  $
for each $j\geq1$). Consider this tableau $S^{\prime}$. Then, $S^{\prime}$ is
obtained from $S$ by a sequence of biswaps. Since $S$ is domino-standard and
unswappable, the same holds for $S^{\prime}$ (because biswaps preserve
domino-standardness and unswappability). Fix $j\geq1$. Then,%
\[
S^{\prime}\left(  2,j\right)  <S^{\prime}\left(  4,j\right)  <S^{\prime
}\left(  6,j\right)  <\cdots<S^{\prime}\left(  2\left\lfloor \lambda_{j}%
^{t}/2\right\rfloor ,j\right)
\]
(by the construction of $S^{\prime}$) and
\[
S^{\prime}\left(  i-1,j\right)  <S^{\prime}\left(  i,j\right)
\ \ \ \ \ \ \ \ \ \ \text{for each even }i\in\left[  \lambda_{j}^{t}\right]
\]
(since $S^{\prime}$ is domino-standard), and furthermore, there exist no
$a,b\in\left[  \lambda_{j}^{t}\right]  $ satisfying $a\equiv
b\operatorname{mod}2$ and $S^{\prime}\left(  a,j\right)  <S^{\prime}\left(
b,j\right)  $ with the property that none of the numbers $h\in\left\{
S^{\prime}\left(  1,j\right)  ,\ S^{\prime}\left(  2,j\right)  ,\ \ldots
,\ S^{\prime}\left(  \lambda_{j}^{t},j\right)  \right\}  $ satisfy $S^{\prime
}\left(  a,j\right)  <h<S^{\prime}\left(  b,j\right)  $ (since $S^{\prime}$ is
unswappable, but such $a$ and $b$ would make $\left(  S^{\prime}\left(
a,j\right)  ,\ S^{\prime}\left(  b,j\right)  \right)  $ a swappable pair of
$S^{\prime}$). Therefore, Lemma \ref{lem.lacunar-squeeze} (applied to
$m=\lambda_{j}^{t}$ and $t_{i}=S^{\prime}\left(  i,j\right)  $) shows that%
\[
S^{\prime}\left(  1,j\right)  <S^{\prime}\left(  2,j\right)  <\cdots
<S^{\prime}\left(  \lambda_{j}^{t},j\right)  .
\]
We have proved this for each $j\geq1$, so we have proved that $S^{\prime}$ is
column-standard. Thus, $\phi\left(  S^{\prime}\right)  =S^{\prime}$ (since
$\phi$ fixes every column-standard tableau). But $S^{\prime}$ is obtained from
$S$ by a sequence of biswaps, hence is column-equivalent to $S$; thus,
$\phi\left(  S^{\prime}\right)  =\phi\left(  S\right)  =Q$. Therefore,
$Q=\phi\left(  S^{\prime}\right)  =S^{\prime}$. Since $S^{\prime}$ is obtained
from $S$ by a sequence of biswaps, we thus conclude that $Q$ is obtained from
$S$ by a sequence of biswaps. Therefore, in turn, $S$ is obtained from $Q$ by
a sequence of biswaps. Hence, $S\in\operatorname*{BSTab}\left(  Q\right)  $.

Forget that we fixed $S$. We thus have shown that $S\in\operatorname*{BSTab}%
\left(  Q\right)  $ whenever $S\in\operatorname*{Tab}\left(  \lambda\right)  $
is domino-standard and unswappable such that $\phi\left(  S\right)  =Q$. In
other words,%
\[
\left\{  S\in\operatorname*{Tab}\left(  \lambda\right)  \text{ is
domino-standard and unswappable }\mid\text{\ }\phi\left(  S\right)
=Q\right\}  \subseteq\operatorname*{BSTab}\left(  Q\right)  .
\]
Combining this with (\ref{pf.lem.G.G*eT.c1.pf.incl1}), we obtain%
\[
\left\{  S\in\operatorname*{Tab}\left(  \lambda\right)  \text{ is
domino-standard and unswappable }\mid\text{\ }\phi\left(  S\right)
=Q\right\}  =\operatorname*{BSTab}\left(  Q\right)  .
\]
Hence,%
\[
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}\\\text{and unswappable;}\\\phi\left(  S\right)  =Q}}\left(
-1\right)  ^{Q\circ S^{-1}}=\sum_{S\in\operatorname*{BSTab}\left(  Q\right)
}\left(  -1\right)  ^{Q\circ S^{-1}}.
\]


Furthermore, if $S\in\operatorname*{BSTab}\left(  Q\right)  $, then $\left(
-1\right)  ^{Q\circ S^{-1}}=1$ (because when we apply a biswap to an
$n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $, the sign
$\left(  -1\right)  ^{Q\circ S^{-1}}$ is unchanged; but $S\in
\operatorname*{BSTab}\left(  Q\right)  $ shows that $S$ can be obtained from
$Q$ by a sequence of biswaps; hence, $\left(  -1\right)  ^{Q\circ S^{-1}%
}=\left(  -1\right)  ^{Q\circ Q^{-1}}=\left(  -1\right)  ^{\operatorname*{id}%
}=1$). Hence,%
\[
\sum_{S\in\operatorname*{BSTab}\left(  Q\right)  }\underbrace{\left(
-1\right)  ^{Q\circ S^{-1}}}_{=1}=\sum_{S\in\operatorname*{BSTab}\left(
Q\right)  }1=\left\vert \operatorname*{BSTab}\left(  Q\right)  \right\vert =p
\]
(since $\operatorname*{BSTab}\left(  Q\right)  $ has size $p$). Therefore,%
\[
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}\\\text{and unswappable;}\\\phi\left(  S\right)  =Q}}\left(
-1\right)  ^{Q\circ S^{-1}}=\sum_{S\in\operatorname*{BSTab}\left(  Q\right)
}\left(  -1\right)  ^{Q\circ S^{-1}}=p.
\]
This proves (\ref{pf.lem.G.G*eT.c1.pf.need}), and thus concludes the proof of
Claim 1.
\end{proof}

Now,%
\begin{equation}
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}=\sum_{\substack{Q\in\operatorname*{Tab}%
\left(  \lambda\right)  \\\text{is column-standard}}}\ \ \sum_{\substack{S\in
\operatorname*{Tab}\left(  \lambda\right)  \\\text{is domino-standard;}%
\\\phi\left(  S\right)  =Q}}\mathbf{e}_{S} \label{pf.lem.G.G*eT.1a}%
\end{equation}
(here, we have split up the sum according to the value of $\phi\left(
S\right)  $, noting that $\phi\left(  S\right)  $ is always column-standard).
However, if $Q,S\in\operatorname*{Tab}\left(  \lambda\right)  $ are two
$n$-tableaux satisfying $\phi\left(  S\right)  =Q$, then $Q$ is
column-equivalent to $S$ (since $\phi$ is the column-sorting map, which only
moves the entries within their columns), so that $Q\circ S^{-1}\in
\mathcal{C}\left(  S\right)  $ and thus $\mathbf{e}_{Q}=\left(  -1\right)
^{Q\circ S^{-1}}\mathbf{e}_{S}$ (by \cite[Lemma 5.4.6 \textbf{(a)}]{sga},
since $Q=\left(  Q\circ S^{-1}\right)  S$ and $Q\circ S^{-1}\in\mathcal{C}%
\left(  S\right)  $), so that $\mathbf{e}_{S}=\left(  -1\right)  ^{Q\circ
S^{-1}}\mathbf{e}_{Q}$ (since the number $\left(  -1\right)  ^{Q\circ S^{-1}%
}\in\left\{  1,-1\right\}  $ is its own inverse). Hence,
(\ref{pf.lem.G.G*eT.1a}) can be rewritten as%
\begin{align*}
&  \sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}\\
&  =\sum_{\substack{Q\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
column-standard}}}\ \ \underbrace{\sum_{\substack{S\in\operatorname*{Tab}%
\left(  \lambda\right)  \\\text{is domino-standard;}\\\phi\left(  S\right)
=Q}}\left(  -1\right)  ^{Q\circ S^{-1}}}_{\substack{=p\\\text{(by Claim 1)}%
}}\mathbf{e}_{Q}\\
&  =\sum_{\substack{Q\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
column-standard}}}p\mathbf{e}_{Q}=\sum_{\substack{S\in\operatorname*{Tab}%
\left(  \lambda\right)  \\\text{is column-standard}}}p\mathbf{e}_{S}%
=p\cdot\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)
\\\text{is column-standard}}}\mathbf{e}_{S}.
\end{align*}
This proves Lemma \ref{lem.G.G*eT}.
\end{proof}

\begin{lemma}
\label{lem.G.G*eT-nz}In $\mathcal{S}^{\lambda}$, we have%
\[
\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
column-standard}}}\mathbf{e}_{S}\neq0.
\]

\end{lemma}

\begin{proof}
We recall that the Specht module $\mathcal{S}^{\lambda}$ is a submodule of the
Young module $\mathcal{M}^{\lambda}$, which has a basis consisting of all
$n$-tabloids $\overline{T}$ of shape $\lambda$ (see \cite[Definition
5.3.15]{sga}). The set $\left\{  n\text{-tabloids of shape }\lambda\right\}  $
is equipped with a total order called the \emph{Young last letter order} (see
\cite[Proposition 5.7.7]{sga}); explicitly, two $n$-tabloids $\overline{P}$
and $\overline{Q}$ satisfy $\overline{P}<\overline{Q}$ if and only if the
\textbf{largest} number $i\in\left[  n\right]  $ that lies in different rows
in $\overline{P}$ and $\overline{Q}$ appears further north in $\overline{P}$
than in $\overline{Q}$. Note that the largest $n$-tabloid $\overline{S}$ of
shape $\lambda$ with respect to this order is the $n$-tabloid $\overline
{S_{\max}}$, where $S_{\max}\in\operatorname*{Tab}\left(  \lambda\right)  $ is
the standard tableau that contains the numbers $1,2,\ldots,\lambda_{1}$ in its
first row, the numbers $\lambda_{1}+1,\lambda_{1}+2,\ldots,\lambda_{1}%
+\lambda_{2}$ in its second row, and so on (i.e., the cells of $S_{\max}$ are
filled with the numbers $1,2,\ldots,n$ in lexicographic order).

It is well-known (\cite[Lemma 5.7.9 \textbf{(b)}]{sga}) that each
column-standard $n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $
satisfies%
\[
\mathbf{e}_{S}=\overline{S}+\left(  \text{a linear combination of
}n\text{-tabloids }\overline{P}\text{ with }\overline{P}<\overline{S}\right)
.
\]
Hence,%
\begin{align}
&  \sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
column-standard}}}\mathbf{e}_{S}\nonumber\\
&  =\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
column-standard}}}\left(  \overline{S}+\left(  \text{a linear combination of
}n\text{-tabloids }\overline{P}\text{ with }\overline{P}<\overline{S}\right)
\right) \nonumber\\
&  =\kappa\overline{S_{\max}}+\left(  \text{a linear combination of
}n\text{-tabloids }\overline{P}\text{ with }\overline{P}<\overline{S_{\max}%
}\right)  \label{lem.G.G*eT-nz.4}%
\end{align}
(since $\overline{S_{\max}}$ is the largest $n$-tabloid $\overline{S}$ of
shape $\lambda$), where $\kappa$ is the number of column-standard $n$-tableaux
$S\in\operatorname*{Tab}\left(  \lambda\right)  $ satisfying $\overline
{S}=\overline{S_{\max}}$. Now, it is clear that $\kappa$ is a positive
integer, and in fact $\kappa$ can be easily computed explicitly: \textbf{Any}
$n$-tableau $S\in\operatorname*{Tab}\left(  \lambda\right)  $ that satisfies
$\overline{S}=\overline{S_{\max}}$ (that is, is row-equivalent to $S_{\max}$)
is column-standard (since \textbf{any} entry in the $i$-th row of $S_{\max}$
is larger than \textbf{any} entry in the $j$-th row of $S_{\max}$ when $i<j$,
and thus a horizontal permutation cannot break the column-standardness of
$S_{\max}$). Thus, $\kappa$ is simply the number of all $n$-tableaux
$S\in\operatorname*{Tab}\left(  \lambda\right)  $ satisfying $\overline
{S}=\overline{S_{\max}}$. In other words, $\kappa$ is the number of all
$n$-tableaux $S$ that are row-equivalent to $S_{\max}$. Therefore,%
\[
\kappa=\left\vert \mathcal{R}\left(  S_{\max}\right)  \right\vert =\lambda
_{1}!\lambda_{2}!\lambda_{3}!\cdots
\]
(by \cite[Proposition 5.5.7 \textbf{(a)}]{sga}). Either way, we conclude that
$\kappa\neq0$. Thus, the right hand side of the equality
(\ref{lem.G.G*eT-nz.4}) has the form \textquotedblleft nonzero multiple of
$\overline{S_{\max}}$ plus a linear combination of smaller $n$%
-tabloids\textquotedblright, and therefore is nonzero. Hence, so is the left
hand side. This proves Lemma \ref{lem.G.G*eT-nz}.
\end{proof}

\begin{proof}
[Proof of Proposition \ref{prop.G.G*Slam}.]Let $i_{1},i_{2},\ldots,i_{k}%
,j_{1},j_{2},\ldots,j_{k}$ be the $2k$ distinct integers%
\[
T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ,\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)
\]
(these are distinct, since all entries of $T$ are distinct). Then,
\begin{align*}
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}\mathbf{e}_{T}
&  =G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }^{\ast}\mathbf{e}_{T}\\
&  =\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}\ \ \ \ \ \ \ \ \ \ \left(  \text{by Lemma
\ref{lem.G.G*eT1}}\right) \\
&  =p\cdot\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)
\\\text{is column-standard}}}\mathbf{e}_{S}\ \ \ \ \ \ \ \ \ \ \left(
\text{by Lemma \ref{lem.G.G*eT}}\right)  .
\end{align*}
Since $p=\prod_{i\geq1}\left\lfloor \lambda_{i}^{t}/2\right\rfloor !\neq0$ and
$\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
column-standard}}}\mathbf{e}_{S}\neq0$ (by Lemma \ref{lem.G.G*eT-nz}), the
right hand side of this equality is $\neq0$. Thus, so is the left hand side.
In other words, $G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}%
^{\ast}\mathbf{e}_{T}\neq0$. This proves Proposition \ref{prop.G.G*Slam}.
\end{proof}

\subsubsection{\label{subsubsec.gelfand.proof.cons}Consequences of
nonvanishing}

In order to draw conclusions from Proposition \ref{prop.G.G*Slam}, we need the
following fact about Specht modules:

\begin{lemma}
\label{lem.GI=Hom}Let $V$ be a left ideal of $\mathcal{A}$. Let $\lambda$ be a
partition of $n$. Then, $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},V\right)  \cong V^{\ast}\mathcal{S}^{\lambda}$ as
$\mathbf{k}$-vector spaces.
\end{lemma}

\begin{proof}
We have $\mathcal{S}^{\lambda}\cong\mathcal{A}\mathbf{E}_{T}$, where
$\mathbf{E}_{T}=\nabla_{\operatorname*{Col}T}^{-}\nabla_{\operatorname*{Row}%
T}$ is the Young symmetrizer corresponding to any $n$-tableau $T$ of shape
$\lambda$ (see \cite[\S 5.5]{sga} for all these notations). We know that
$\mathbf{E}_{T}$ is quasi-idempotent, i.e., we have $\mathbf{E}_{T}%
^{2}=h^{\lambda}\mathbf{E}_{T}$ for a nonzero scalar $h^{\lambda}$. Thus,
setting $f:=\dfrac{\mathbf{E}_{T}}{h^{\lambda}}$, we have $f^{2}=f$ and
$\mathcal{S}^{\lambda}\cong\mathcal{A}f$.

From $\mathcal{S}^{\lambda}\cong\mathcal{A}f$, we obtain the $\mathbf{k}%
$-vector space isomorphism%
\begin{align*}
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda
},V\right)   &  \cong\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{A}f,V\right) \\
&  \cong fV\ \ \ \ \ \ \ \ \ \ \left(  \text{by Lemma \ref{lem.etingof-idp},
since }f^{2}=f\right) \\
&  \cong\left(  fV\right)  ^{\ast}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since the antipode }\mathcal{A}\rightarrow\mathcal{A},\ x\mapsto
x^{\ast}\\
\text{is a }\mathbf{k}\text{-vector space isomorphism}%
\end{array}
\right) \\
&  =V^{\ast}f^{\ast}\ \ \ \ \ \ \ \ \ \ \left(  \text{since the antipode is a
}\mathbf{k}\text{-algebra anti-morphism}\right)  .
\end{align*}


But $f=\dfrac{\mathbf{E}_{T}}{h^{\lambda}}$ yields $f^{\ast}=\dfrac
{\mathbf{E}_{T}^{\ast}}{h^{\lambda}}=\dfrac{\mathbf{F}_{T}}{h^{\lambda}}$
using the notation $\mathbf{F}_{T}$ from \cite[Proposition 5.11.19]{sga}.
Thus, $\mathcal{A}f^{\ast}=\mathcal{A}\mathbf{F}_{T}$. But $\mathcal{A}%
\mathbf{F}_{T}\cong\mathcal{S}^{\lambda}$ by \cite[Proposition 5.11.19
\textbf{(c)}]{sga}. Hence, $\mathcal{A}f^{\ast}=\mathcal{A}\mathbf{F}_{T}%
\cong\mathcal{S}^{\lambda}$.

Moreover, $V^{\ast}$ is a right ideal of $\mathcal{A}$ (since $V$ is a left
ideal), and therefore $V^{\ast}=V^{\ast}\mathcal{A}$. Hence,
\[
\underbrace{V^{\ast}}_{=V^{\ast}\mathcal{A}}f^{\ast}=V^{\ast}%
\underbrace{\mathcal{A}f^{\ast}}_{\cong\mathcal{S}^{\lambda}}\cong V^{\ast
}\mathcal{S}^{\lambda}.
\]
Therefore, our above isomorphism becomes%
\[
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda
},V\right)  \cong V^{\ast}f^{\ast}\cong V^{\ast}\mathcal{S}^{\lambda}.
\]
This proves Lemma \ref{lem.GI=Hom}.
\end{proof}

\begin{corollary}
\label{cor.G.G*Slamneq0}Let $\lambda$ be a partition of $n$. Then:

\begin{enumerate}
\item[\textbf{(a)}] We have $\mathcal{G}^{\ast}\mathcal{S}^{\lambda}\neq0$.

\item[\textbf{(b)}] We have $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},\mathcal{G}\right)  \neq0$.
\end{enumerate}
\end{corollary}

\begin{proof}
\textbf{(a)} This follows from Proposition \ref{prop.G.G*Slam}, since
$G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}\in\mathcal{G}$ and
$\mathbf{e}_{T}\in\mathcal{S}^{\lambda}$. \medskip

\textbf{(b)} Lemma \ref{lem.GI=Hom} yields $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda},\mathcal{G}\right)
\cong\mathcal{G}^{\ast}\mathcal{S}^{\lambda}\neq0$ by part \textbf{(a)}. This
completes the proof of part \textbf{(b)}.
\end{proof}

\subsubsection{Proof of the Gelfand model}

\begin{proof}
[Proof of Theorem \ref{thm.G.gelfand}.]A \emph{matching} of $\left[  n\right]
$ shall mean a set of disjoint $2$-element subsets of $\left[  n\right]  $
(that is, an $m$-matching of $\left[  n\right]  $ for some $m\in\mathbb{Z}$).

Corollary \ref{cor.G.G*Slamneq0} \textbf{(b)} shows that each Specht module
$\mathcal{S}^{\lambda}$ for $\lambda\vdash n$ is contained in $\mathcal{G}$
(that is, can be embedded in $\mathcal{G}$). Since these Specht modules are
irreducible and mutually non-isomorphic, this shows that their direct sum
$\bigoplus\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}$ is contained in
$\mathcal{G}$ as well (since Proposition \ref{prop.sum-irrep-direct} says that
a sum of non-isomorphic irreducible representations is always a direct sum).

Now we shall show that the dimensions of $\bigoplus\limits_{\lambda\vdash
n}\mathcal{S}^{\lambda}$ and $\mathcal{G}$ are the same. Indeed, recall the
filtration $\left(  0=\mathcal{G}_{-1}\subseteq\mathcal{G}_{0}\subseteq
\cdots\subseteq\mathcal{G}_{n}=\mathcal{G}\right)  $ from Subsection
\ref{subsec.gelfand.filtr}. Thus,%
\begin{align}
\dim\mathcal{G}  &  =\sum_{m=0}^{n}\underbrace{\dim\left(  \mathcal{G}%
_{m}/\mathcal{G}_{m-1}\right)  }_{\substack{\leq\left(  \text{\# of
}m\text{-matchings of }\left[  n\right]  \right)  \\\text{(by
(\ref{eq.thm.G.gelfand.dimbound}))}}}\nonumber\\
&  \leq\sum_{m=0}^{n}\left(  \text{\# of }m\text{-matchings of }\left[
n\right]  \right) \nonumber\\
&  =\left(  \text{\# of matchings of }\left[  n\right]  \right) \nonumber\\
&  =\left(  \text{\# of involutions of }\left[  n\right]  \right)
\label{pf.thm.G.gelfand.dim-leq}%
\end{align}
(since the matchings of $\left[  n\right]  $ are in bijection with the
involutions of $\left[  n\right]  $\ \ \ \ \footnote{The bijection is
well-known and simple: If $M=\left\{  \left\{  i_{1},j_{1}\right\}
,\ \left\{  i_{2},j_{2}\right\}  ,\ \ldots,\ \left\{  i_{m},j_{m}\right\}
\right\}  $ is a matching of $\left[  n\right]  $, then $t_{i_{1},j_{1}%
}t_{i_{2},j_{2}}\cdots t_{i_{m},j_{m}}\in S_{n}$ is an involution of $\left[
n\right]  $. Conversely, if $\sigma$ is an involution on $\left[  n\right]  $,
then the corresponding matching is $\left\{  \left\{  i,\sigma\left(
i\right)  \right\}  \ \mid\ i\in\left[  n\right]  \text{ with }\sigma\left(
i\right)  \neq i\right\}  $.}). But a well-known enumerative result (see,
e.g., \cite[Proposition 1.3.2]{Leeuwe95} or \cite[Corollary 5.21.11]{sga})
says that%
\begin{align}
&  \left(  \text{\# of involutions of }\left[  n\right]  \right) \nonumber\\
&  =\sum_{\lambda\vdash n}\left(  \text{\# of standard tableaux of shape
}\lambda\right)  . \label{eq.num-inv-syt}%
\end{align}
Furthermore, it is well-known that%
\begin{equation}
\dim\mathcal{S}^{\lambda}=\left(  \text{\# of standard tableaux of shape
}\lambda\right)  \label{eq.dimSlam}%
\end{equation}
for each $\lambda\vdash n$. Hence, (\ref{pf.thm.G.gelfand.dim-leq}) becomes
\begin{align*}
\dim\mathcal{G}  &  \leq\left(  \text{\# of involutions of }\left[  n\right]
\right) \\
&  =\sum_{\lambda\vdash n}\underbrace{\left(  \text{\# of standard tableaux of
shape }\lambda\right)  }_{\substack{=\dim\mathcal{S}^{\lambda}\\\text{(by
(\ref{eq.dimSlam}))}}}\ \ \ \ \ \ \ \ \ \ \left(  \text{by
(\ref{eq.num-inv-syt})}\right) \\
&  =\sum_{\lambda\vdash n}\dim\mathcal{S}^{\lambda}=\dim\bigoplus
\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}.
\end{align*}


Since $\bigoplus\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}$ is contained
in $\mathcal{G}$, we thus conclude that the direct sum $\bigoplus
\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}$ is $\mathcal{G}$. In other
words, $\mathcal{G}$ is a Gelfand model, qed.
\end{proof}

\subsection{Further corollaries}

Having proved Theorem \ref{thm.G.gelfand}, let us now reap some rewards from
our above arguments.

\begin{corollary}
\label{cor.G.G*Slamdim1}Let $\lambda$ be a partition of $n$. Then:

\begin{enumerate}
\item[\textbf{(a)}] We have $\dim\left(  \mathcal{G}^{\ast}\mathcal{S}%
^{\lambda}\right)  =1$.

\item[\textbf{(b)}] We have $\dim\left(  \operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda},\mathcal{G}\right)
\right)  =1$.

\item[\textbf{(c)}] We have
\[
\mathcal{G}^{\ast}\mathcal{S}^{\lambda}=\mathbf{k}\cdot\sum_{\substack{S\in
\operatorname*{Tab}\left(  \lambda\right)  \\\text{is column-standard}%
}}\mathbf{e}_{S}.
\]

\end{enumerate}
\end{corollary}

\begin{proof}
\textbf{(b)} Theorem \ref{thm.G.gelfand} yields that $\mathcal{G}$ is
multiplicity-free as a left $\mathcal{A}$-module. Meanwhile, $\mathcal{S}%
^{\lambda}$ is an irreducible representation of $S_{n}$. Thus, Lemma
\ref{lem.HomJK=1} (applied to $J=\mathcal{G}$ and $K=\mathcal{S}^{\lambda}$)
yields $\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},\mathcal{G}\right)  \right)  \leq1$. But Corollary
\ref{cor.G.G*Slamneq0} \textbf{(b)} yields $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda},\mathcal{G}\right)
\neq0$, thus $\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},\mathcal{G}\right)  \right)  \geq1$. Combining these two
inequalities, we obtain $\dim\left(  \operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  \mathcal{S}^{\lambda},\mathcal{G}\right)  \right)  =1$. This proves
part \textbf{(b)}. \medskip

\textbf{(a)} Lemma \ref{lem.GI=Hom} yields $\operatorname*{Hom}%
\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda},\mathcal{G}\right)
\cong\mathcal{G}^{\ast}\mathcal{S}^{\lambda}$. Hence, $\mathcal{G}^{\ast
}\mathcal{S}^{\lambda}\cong\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},\mathcal{G}\right)  $, so that $\dim\left(
\mathcal{G}^{\ast}\mathcal{S}^{\lambda}\right)  =\dim\left(
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda
},\mathcal{G}\right)  \right)  =1$ by part \textbf{(b)}. This proves part
\textbf{(a)}. \medskip

\textbf{(c)} Part \textbf{(a)} shows that $\dim\left(  \mathcal{G}^{\ast
}\mathcal{S}^{\lambda}\right)  =1$. Hence, in order to prove part
\textbf{(c)}, it suffices to show that the vector $\sum_{\substack{S\in
\operatorname*{Tab}\left(  \lambda\right)  \\\text{is column-standard}%
}}\mathbf{e}_{S}$ is nonzero and belongs to $\mathcal{G}^{\ast}\mathcal{S}%
^{\lambda}$. The former follows from Lemma \ref{lem.G.G*eT-nz}. The latter is
because $\mathcal{G}^{\ast}\mathcal{S}^{\lambda}$ contains%
\begin{align*}
&  G_{T\left(  c_{1}\right)  ,\ T\left(  c_{2}\right)  ,\ \ldots,\ T\left(
c_{k}\right)  ;\ T\left(  d_{1}\right)  ,\ T\left(  d_{2}\right)
,\ \ldots,\ T\left(  d_{k}\right)  }^{\ast}\mathbf{e}_{T}\\
&  =\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)  \\\text{is
domino-standard}}}\mathbf{e}_{S}\ \ \ \ \ \ \ \ \ \ \left(  \text{by Lemma
\ref{lem.G.G*eT1}}\right) \\
&  =p\cdot\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)
\\\text{is column-standard}}}\mathbf{e}_{S}\ \ \ \ \ \ \ \ \ \ \left(
\text{by Lemma \ref{lem.G.G*eT}}\right)
\end{align*}
and thus also $\sum_{\substack{S\in\operatorname*{Tab}\left(  \lambda\right)
\\\text{is column-standard}}}\mathbf{e}_{S}$ (since $p$ is invertible).
\end{proof}

\subsection{The decomposition of $\mathcal{G}_{m}$ into Specht modules}

Our next goal is to extend Theorem \ref{thm.G.gelfand} from the entire
representation $\mathcal{G}$ to its submodules $\mathcal{G}_{m}$, by
identifying which Specht modules $\mathcal{S}^{\lambda}$ it contains. This
requires some precursory work.

For each partition $\lambda$, let us set
\begin{equation}
k_{\lambda}:=\sum_{i\geq1}\left\lfloor \lambda_{i}^{t}/2\right\rfloor .
\label{eq.klam=}%
\end{equation}


If $m\in\mathbb{Z}$, and if $S$ is a set, then an $m$\emph{-matching} of $S$
shall mean a set of $m$ disjoint $2$-element subsets of $S$. For example,
$\left\{  \left\{  1,5\right\}  ,\ \left\{  2,3\right\}  \right\}  $ is a
$2$-matching of $\left[  7\right]  $. An $m$-matching of $\left[  n\right]  $
will simply be called an \textquotedblleft$m$-matching\textquotedblright,
without mention of $\left[  n\right]  $. We recall a well-known formula for
counting $m$-matchings:

\begin{proposition}
\label{prop.match-count}Let $m\in\mathbb{N}$. Let $N$ be an $n$-element set.
Then,%
\[
\left(  \text{\# of }m\text{-matchings of }N\right)  =\dbinom{n}{2m}%
\dfrac{\left(  2m\right)  !}{2^{m}m!}.
\]

\end{proposition}

\begin{proof}
We shall use the following terminology: If $M$ is an $m$-matching of $N$, then:

\begin{itemize}
\item The elements of $N$ that are contained in the edges of $M$ are called
the \emph{participants} of $M$.

\item For any participant $p$ of $M$, there is a unique participant $q$ of $M$
such that $\left\{  p,q\right\}  \in M$; this $q$ will be called the
$M$\emph{-partner} of $p$.
\end{itemize}

Note that any $m$-matching $M$ of $N$ necessarily has exactly $2m$
participants (since it has $m$ edges, all of which are disjoint, and each of
which contains $2$ elements).

Now, pick an arbitrary total order on the set $N$. Each $m$-matching $M$ of
$N$ can be constructed as follows:

\begin{enumerate}
\item Choose the $2m$ participants of $M$. This amounts to choosing a
$2m$-element subset of $N$, and thus can be done in $\dbinom{n}{2m}$ ways.

\item Let $p$ be the smallest participant of $M$. Choose the $M$-partner $q$
of $p$. This can be done in $2m-1$ ways (since $q$ must be one of the $2m$
participants but also differ from $p$). The edge $\left\{  p,q\right\}  $ will
belong to $M$. We shall refer to the participants $p$ and $q$ as
\emph{matched} (since they have already been assigned their $M$-partners), and
to the remaining $2m-2$ participants as \emph{unmatched}.

\item Let $p^{\prime}$ be the smallest unmatched participant of $M$. Choose
the $M$-partner $q^{\prime}$ of $p^{\prime}$. This can be done in $2m-3$ ways
(since $q^{\prime}$ must be one of the $2m-2$ unmatched participants but also
differ from $p^{\prime}$). The edge $\left\{  p^{\prime},q^{\prime}\right\}  $
will belong to $M$. The participants $p^{\prime}$ and $q^{\prime}$ now change
their status from \textquotedblleft unmatched\textquotedblright\ to
\textquotedblleft matched\textquotedblright, so that $2m-4$ participants
remain unmatched.

\item Let $p^{\prime\prime}$ be the smallest unmatched participant of $M$.
Choose the $M$-partner $q^{\prime\prime}$ of $p^{\prime\prime}$. This can be
done in $2m-5$ ways (since $q^{\prime\prime}$ must be one of the $2m-4$
unmatched participants but also differ from $p^{\prime\prime}$). The edge
$\left\{  p^{\prime\prime},q^{\prime\prime}\right\}  $ will belong to $M$. The
participants $p^{\prime\prime}$ and $q^{\prime\prime}$ now change their status
from \textquotedblleft unmatched\textquotedblright\ to \textquotedblleft
matched\textquotedblright, so that $2m-6$ participants remain unmatched.

\item And so on, until we are left with no more unmatched participants, and we
have discovered all $m$ edges in $M$.
\end{enumerate}

The total number of ways to perform this construction is
\[
\dbinom{n}{2m}\cdot\underbrace{\left(  2m-1\right)  \left(  2m-3\right)
\left(  2m-5\right)  \cdots1}_{=\dfrac{\left(  2m\right)  !}{\left(
2m\right)  \left(  2m-2\right)  \left(  2m-4\right)  \cdots2}=\dfrac{\left(
2m\right)  !}{2^{m}m!}}=\dbinom{n}{2m}\dfrac{\left(  2m\right)  !}{2^{m}m!}.
\]
Hence, the \# of $m$-matchings of $N$ is $\dbinom{n}{2m}\dfrac{\left(
2m\right)  !}{2^{m}m!}$. This proves Proposition \ref{prop.match-count}.
\end{proof}

Let us now show that the inequality in Lemma \ref{lem.G.matchbound} is an equality:

\begin{corollary}
\label{cor.G.matchbound.eq}Let $m\in\mathbb{Z}$. Then,
\[
\dim\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  =\left(  \text{\# of
}m\text{-matchings of }\left[  n\right]  \right)  .
\]

\end{corollary}

\begin{proof}
Forget that we fixed $m$. We must prove that
\begin{equation}
\dim\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  =\left(  \text{\# of
}m\text{-matchings of }\left[  n\right]  \right)
\label{pf.cor.G.matchbound.eq.goal}%
\end{equation}
for all $m\in\mathbb{Z}$. However, in our above proof of Theorem
\ref{thm.G.gelfand}, we added together the inequalities%
\begin{equation}
\dim\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  \leq\left(  \text{\# of
}m\text{-matchings of }\left[  n\right]  \right)
\label{pf.cor.G.matchbound.eq.1}%
\end{equation}
for all $m\in\left\{  0,1,\ldots,n\right\}  $, and obtained the inequality
$\dim\mathcal{G}\leq\dim\bigoplus\limits_{\lambda\vdash n}\mathcal{S}%
^{\lambda}$, which is actually an equality (since we showed shortly thereafter
that the direct sum $\bigoplus\limits_{\lambda\vdash n}\mathcal{S}^{\lambda}$
is $\mathcal{G}$). Hence, all the inequalities (\ref{pf.cor.G.matchbound.eq.1}%
) must be equalities. In other words, (\ref{pf.cor.G.matchbound.eq.goal})
holds for all $m\in\left\{  0,1,\ldots,n\right\}  $. Since
(\ref{pf.cor.G.matchbound.eq.goal}) also holds for all $m\in\mathbb{Z}%
\setminus\left\{  0,1,\ldots,n\right\}  $ (since for all such $m$, both sides
of (\ref{pf.cor.G.matchbound.eq.goal}) are $0$), we thus conclude that
(\ref{pf.cor.G.matchbound.eq.goal}) holds for all $m\in\mathbb{Z}$. This
proves Corollary \ref{cor.G.matchbound.eq}.
\end{proof}

Next, we need a combinatorial lemma:

\begin{lemma}
\label{lem.G.Gmcount}Let $m\in\mathbb{Z}$. Then,%
\begin{equation}
\left(  \text{\# of }m\text{-matchings of }\left[  n\right]  \right)
=\sum_{\substack{\lambda\vdash n;\\k_{\lambda}=m}}\left(  \text{\# of standard
tableaux of shape }\lambda\right)  .\nonumber
\end{equation}

\end{lemma}

\begin{proof}
[First proof of Lemma \ref{lem.G.Gmcount} (sketched).]Each $m$-matching
$M=\left\{  \left\{  i_{1},j_{1}\right\}  ,\ \left\{  i_{2},j_{2}\right\}
,\ \ldots,\ \left\{  i_{m},j_{m}\right\}  \right\}  $ of $\left[  n\right]  $
gives rise to an involution $t_{i_{1},j_{1}}t_{i_{2},j_{2}}\cdots
t_{i_{m},j_{m}}\in S_{n}$ that has exactly $n-2m$ fixed points (viz., all
elements of $\left[  n\right]  $ other than the $2m$ numbers $i_{1}%
,j_{1},i_{2},j_{2},\ldots,i_{m},j_{m}$). Thus, we obtain a bijection from the
set $\left\{  m\text{-matchings of }\left[  n\right]  \right\}  $ to the set
\newline$\left\{  \text{involutions of }\left[  n\right]  \text{ with exactly
}n-2m\text{ fixed points}\right\}  $. Hence,%
\begin{align}
&  \left(  \text{\# of }m\text{-matchings of }\left[  n\right]  \right)
\nonumber\\
&  =\left(  \text{\# of involutions of }\left[  n\right]  \text{ with exactly
}n-2m\text{ fixed points}\right)  . \label{pf.lem.G.Gmcount.1}%
\end{align}


But it is well-known that there is a bijection between the involutions of
$\left[  n\right]  $ and the standard tableaux of all shapes $\lambda\vdash
n$, given by the RSK algorithm (see, e.g., \cite[last paragraph of
\S 4.1]{Fulton97} or \cite[proof of Corollary 7.13.9]{Stanley-EC2}).
Furthermore, it is known (see \cite[\S 4.2, Exercise 4]{Fulton97} or
\cite[Exercise 7.28 (a)]{Stanley-EC2}) that when this bijection sends an
involution $w$ to a standard tableau $T$ of some shape $\lambda$, we have%
\begin{align*}
\left(  \text{\# of fixed points of }w\right)   &  =\left(  \text{\# of
odd-length columns of }Y\left(  \lambda\right)  \right) \\
&  =n-2k_{\lambda}.
\end{align*}
Hence, this bijection restricts to a bijection between the involutions of
$\left[  n\right]  $ with exactly $n-2m$ fixed points and the standard
tableaux of all shapes $\lambda\vdash n$ satisfying $n-2k_{\lambda}=n-2m$,
that is, $k_{\lambda}=m$. Thus, by the bijection principle,%
\begin{align*}
&  \left(  \text{\# of involutions of }\left[  n\right]  \text{ with exactly
}n-2m\text{ fixed points}\right) \\
&  =\sum_{\substack{\lambda\vdash n;\\k_{\lambda}=m}}\left(  \text{\# of
standard tableaux of shape }\lambda\right)  .
\end{align*}


In view of this, we can rewrite (\ref{pf.lem.G.Gmcount.1}) as%
\[
\left(  \text{\# of }m\text{-matchings of }\left[  n\right]  \right)
=\sum_{\substack{\lambda\vdash n;\\k_{\lambda}=m}}\left(  \text{\# of standard
tableaux of shape }\lambda\right)  .
\]
This proves Lemma \ref{lem.G.Gmcount}.
\end{proof}

\begin{proof}
[Second proof of Lemma \ref{lem.G.Gmcount} (sketched).]The following proof
avoids the use of the RSK algorithm. Instead, we proceed similarly to the
proof of (\ref{eq.num-inv-syt}) in \cite[\S 1.3]{Leeuwe95}. (See also
\cite[\S 3.2]{Gri-hook-talk}.)

Given two partitions $\lambda$ and $\mu$, we write $\lambda\lessdot\mu$ if and
only if $Y\left(  \lambda\right)  \subseteq Y\left(  \mu\right)  $ and
$\left\vert Y\left(  \mu\right)  \setminus Y\left(  \lambda\right)
\right\vert =1$ (in other words: if and only if $\mu$ covers $\lambda$ in
Young's lattice). Note that $\lambda\lessdot\mu$ entails $\left\vert
\lambda\right\vert =\left\vert \mu\right\vert -1$.

For any $n\in\mathbb{N}$, we define the polynomial%
\[
\sigma\left(  n\right)  :=\sum_{\lambda\vdash n}f^{\lambda}t^{k_{\lambda}}%
\in\mathbb{Z}\left[  t\right]  ,
\]
where $f^{\lambda}$ is the number of standard tableaux of shape $\lambda$. Our
main goal will be proving the recurrence
\begin{equation}
\sigma\left(  n\right)  =\sigma\left(  n-1\right)  +\left(  n-1\right)
t\ \sigma\left(  n-2\right)  \label{pf.lem.G.Gmcount.rec}%
\end{equation}
for all $n\geq2$.

To this end, we begin by showing the following statement (which generalizes
\cite[(1)]{Leeuwe95}):

\begin{statement}
\textit{Sublemma A:} For any partition $\lambda$, we have%
\[
\sum_{\substack{\mu\text{ is a partition;}\\\lambda\lessdot\mu}}t^{k_{\mu}%
}=t^{k_{\lambda}}+t\sum_{\substack{\mu\text{ is a partition;}\\\mu
\lessdot\lambda}}t^{k_{\mu}}.
\]

\end{statement}

\begin{proof}
[Proof of Sublemma A]Regard partitions as infinite sequences of integers (with
infinitely many $0$'s in their tails), and regard such sequences as elements
of the $\mathbb{Z}$-module $\mathbb{Z}^{\infty}$. In particular, write
$\lambda$ as $\left(  \lambda_{1},\lambda_{2},\lambda_{3},\ldots\right)  $ (so
that $\lambda_{i}=0$ for all sufficiently large $i$). Let $\left\{
i_{1}<i_{2}<\cdots<i_{m}\right\}  $ be the set of all $i\geq1$ satisfying
$\lambda_{i}>\lambda_{i+1}$. For each $p\geq1$, let $\epsilon_{p}$ be the
vector $\left(  0,0,\ldots,0,1,0,0,0,\ldots\right)  \in\mathbb{Z}^{\infty}$
with the $1$ in its $p$-th position. Then:\footnote{Here is an example,
showing a partition $\lambda$ as well as all partitions $\mu$ satisfying
$\lambda\lessdot\mu$ (these are obtained by adding one of the green cells to
the Young diagram of $Y\left(  \lambda\right)  $) as well as as all partitions
$\mu$ satisfying $\mu\lessdot\lambda$ (these are obtained by removing one of
the red cells from the Young diagram of $Y\left(  \lambda\right)  $):%
\[%
%TCIMACRO{\TeXButton{tikz tableau}{\begin{tikzpicture}%
%[scale=0.7, rotate=+90, xscale=-1]
%\fill[green!60!black] (0,-10) -- (0,-11) -- (1,-11) -- (1,-10) -- cycle;
%\fill[red] (3,-10) -- (3,-9) -- (2,-9) -- (2,-10) -- cycle;
%\fill[green!60!black] (3,-6) -- (3,-7) -- (4,-7) -- (4,-6) -- cycle;
%\fill[red] (5,-6) -- (5,-5) -- (4,-5) -- (4,-6) -- cycle;
%\fill[green!60!black] (5,-3) -- (5,-4) -- (6,-4) -- (6,-3) -- cycle;
%\fill[red] (8,-3) -- (7,-3) -- (7,-2) -- (8,-2) -- cycle;
%\fill[green!60!black] (8,2) -- (9,2) -- (9,1) -- (8,1) -- cycle;
%\draw
%[thick] (0,2) -- (8,2) -- (8,-3) -- (5,-3) -- (5,-6) -- (3,-6) -- (3,-10) -- (0,-10) -- cycle;
%\node(a) at (0.5, 2.5) {$1$};
%\node(b) at (2.5, 2.5) {$i_1$};
%\node(c) at (3.5, 3) {$i_1+1$};
%\node(d) at (4.5, 2.5) {$i_2$};
%\node(e) at (5.5, 3) {$i_2+1$};
%\node(f) at (7.5, 2.5) {$i_3$};
%\node(g) at (8.5, 3) {$i_3+1$};
%\end{tikzpicture}}}%
%BeginExpansion
\begin{tikzpicture}[scale=0.7, rotate=+90, xscale=-1]
\fill[green!60!black] (0,-10) -- (0,-11) -- (1,-11) -- (1,-10) -- cycle;
\fill[red] (3,-10) -- (3,-9) -- (2,-9) -- (2,-10) -- cycle;
\fill[green!60!black] (3,-6) -- (3,-7) -- (4,-7) -- (4,-6) -- cycle;
\fill[red] (5,-6) -- (5,-5) -- (4,-5) -- (4,-6) -- cycle;
\fill[green!60!black] (5,-3) -- (5,-4) -- (6,-4) -- (6,-3) -- cycle;
\fill[red] (8,-3) -- (7,-3) -- (7,-2) -- (8,-2) -- cycle;
\fill[green!60!black] (8,2) -- (9,2) -- (9,1) -- (8,1) -- cycle;
\draw
[thick] (0,2) -- (8,2) -- (8,-3) -- (5,-3) -- (5,-6) -- (3,-6) -- (3,-10) -- (0,-10) -- cycle;
\node(a) at (0.5, 2.5) {$1$};
\node(b) at (2.5, 2.5) {$i_1$};
\node(c) at (3.5, 3) {$i_1+1$};
\node(d) at (4.5, 2.5) {$i_2$};
\node(e) at (5.5, 3) {$i_2+1$};
\node(f) at (7.5, 2.5) {$i_3$};
\node(g) at (8.5, 3) {$i_3+1$};
\end{tikzpicture}%
%EndExpansion
\]
}

\begin{itemize}
\item The partitions $\mu$ satisfying $\lambda\lessdot\mu$ are precisely%
\[
\lambda+\epsilon_{1},\ \ \lambda+\epsilon_{i_{1}+1},\ \ \lambda+\epsilon
_{i_{2}+1},\ \ \ldots,\ \ \lambda+\epsilon_{i_{m}+1},
\]
and their respective numbers $k_{\mu}$ are%
\begin{align}
k_{\lambda+\epsilon_{1}}  &  =k_{\lambda}\ \ \ \ \ \ \ \ \ \ \text{and}%
\nonumber\\
k_{\lambda+\epsilon_{i_{p}+1}}  &  =%
\begin{cases}
k_{\lambda}, & \text{if }i_{p}\text{ is even};\\
k_{\lambda}+1, & \text{if }i_{p}\text{ is odd}.
\end{cases}
\label{pf.lem.G.Gmcount.a.c1.2}%
\end{align}
Hence,%
\begin{equation}
\sum_{\substack{\mu\text{ is a partition;}\\\lambda\lessdot\mu}}t^{k_{\mu}%
}=t^{k_{\lambda}}+\sum_{p=1}^{m}%
\begin{cases}
t^{k_{\lambda}}, & \text{if }i_{p}\text{ is even};\\
t^{k_{\lambda}+1}, & \text{if }i_{p}\text{ is odd}.
\end{cases}
\label{pf.lem.G.Gmcount.a.c1.3}%
\end{equation}


\item The partitions $\mu$ satisfying $\mu\lessdot\lambda$ are precisely%
\[
\lambda-\epsilon_{i_{1}},\ \ \lambda-\epsilon_{i_{2}},\ \ \ldots
,\ \ \lambda-\epsilon_{i_{m}},
\]
and their respective numbers $k_{\mu}$ are%
\begin{equation}
k_{\lambda-\epsilon_{i_{p}}}=%
\begin{cases}
k_{\lambda}-1, & \text{if }i_{p}\text{ is even};\\
k_{\lambda}, & \text{if }i_{p}\text{ is odd}.
\end{cases}
\label{pf.lem.G.Gmcount.a.c2.2}%
\end{equation}
Hence,%
\begin{equation}
\sum_{\substack{\mu\text{ is a partition;}\\\mu\lessdot\lambda}}t^{k_{\mu}%
}=\sum_{p=1}^{m}%
\begin{cases}
t^{k_{\lambda}-1}, & \text{if }i_{p}\text{ is even};\\
t^{k_{\lambda}}, & \text{if }i_{p}\text{ is odd}.
\end{cases}
\label{pf.lem.G.Gmcount.a.c2.3}%
\end{equation}

\end{itemize}

Now, our goal is to prove that
\[
\sum_{\substack{\mu\text{ is a partition;}\\\lambda\lessdot\mu}}t^{k_{\mu}%
}=t^{k_{\lambda}}+t\sum_{\substack{\mu\text{ is a partition;}\\\mu
\lessdot\lambda}}t^{k_{\mu}}.
\]
Using (\ref{pf.lem.G.Gmcount.a.c1.3}) and (\ref{pf.lem.G.Gmcount.a.c2.3}), we
can rewrite this as%
\[
t^{k_{\lambda}}+\sum_{p=1}^{m}%
\begin{cases}
t^{k_{\lambda}}, & \text{if }i_{p}\text{ is even};\\
t^{k_{\lambda}+1}, & \text{if }i_{p}\text{ is odd}%
\end{cases}
=t^{k_{\lambda}}+t\sum_{p=1}^{m}%
\begin{cases}
t^{k_{\lambda}-1}, & \text{if }i_{p}\text{ is even};\\
t^{k_{\lambda}}, & \text{if }i_{p}\text{ is odd.}%
\end{cases}
\]
But this is obvious. Hence, Sublemma A is proved.
\end{proof}

\begin{statement}
\textit{Sublemma B:} Given two partitions $\lambda\neq\nu$, we have%
\begin{align*}
&  \left(  \text{\# of partitions }\mu\text{ such that }\lambda\lessdot
\mu\text{ and }\nu\lessdot\mu\right) \\
&  =\left(  \text{\# of partitions }\mu\text{ such that }\mu\lessdot
\lambda\text{ and }\mu\lessdot\nu\right)  .
\end{align*}

\end{statement}

\begin{proof}
[Proof of Sublemma B]This is \cite[(2)]{Leeuwe95}.
\end{proof}

Hereon for the rest of this proof, all summation indices are understood to be
partitions. Thus, for example, the summation sign \textquotedblleft$\sum
_{\mu;\ \mu\lessdot\lambda}$\textquotedblright\ means a sum over all
partitions $\mu$ satisfying $\mu\lessdot\lambda$.

\begin{statement}
\textit{Sublemma C:} For any nonempty partition $\lambda$, we have%
\[
f^{\lambda}=\sum_{\mu;\ \mu\lessdot\lambda}f^{\mu}.
\]

\end{statement}

\begin{proof}
[Proof of Sublemma C]This is \cite[(3)]{Leeuwe95}, and is pretty obvious: Any
standard tableau of shape $\lambda$ must have the number $n$ in exactly one of
its \textquotedblleft corner cells\textquotedblright\ (i.e., those cells of
$Y\left(  \lambda\right)  $ whose removal from $Y\left(  \lambda\right)  $
would yield the diagram $Y\left(  \mu\right)  $ of a partition $\mu$), and
upon removing this entry, produces a standard tableau of shape $\mu$ for a
unique partition $\mu$ satisfying $\mu\lessdot\lambda$. This gives a bijection
from the set of all standard tableaux of shape $\lambda$ to the set of all
standard tableaux of shape $\mu$ with $\mu\lessdot\lambda$. Sublemma C follows
by the bijection principle.
\end{proof}

\begin{statement}
\textit{Sublemma D:} For any partition $\lambda$, we have%
\[
\sum_{\mu;\ \lambda\lessdot\mu}f^{\mu}=\left(  \left\vert \lambda\right\vert
+1\right)  f^{\lambda}.
\]

\end{statement}

\begin{proof}
[Proof of Sublemma D]See \cite[Lemma 1.3.1]{Leeuwe95} or \cite[Lemma
D]{Gri-hook-talk}.
\end{proof}

Now, let $n\geq2$. Then,
\begin{align*}
\sigma\left(  n\right)   &  =\sum_{\lambda\vdash n}f^{\lambda}t^{k_{\lambda}%
}=\sum_{\lambda\vdash n}\left(  \sum_{\mu;\ \mu\lessdot\lambda}f^{\mu}\right)
t^{k_{\lambda}}\ \ \ \ \ \ \ \ \ \ \left(  \text{by Sublemma C}\right) \\
&  =\sum_{\mu}f^{\mu}\sum_{\substack{\lambda\vdash n;\\\mu\lessdot\lambda
}}t^{k_{\lambda}}\\
&  =\sum_{\mu\vdash n-1}f^{\mu}\sum_{\lambda;\ \mu\lessdot\lambda
}t^{k_{\lambda}}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{because under the condition }\mu\lessdot\lambda\text{,}\\
\text{we have }\left\vert \mu\right\vert =\left\vert \lambda\right\vert
-1\text{, and thus the}\\
\text{condition }\lambda\vdash n\text{ is equivalent to }\mu\vdash n-1
\end{array}
\right) \\
&  =\sum_{\lambda\vdash n-1}f^{\lambda}\sum_{\mu;\ \lambda\lessdot\mu
}t^{k_{\mu}}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we have renamed }\mu\text{ and }\lambda\\
\text{as }\lambda\text{ and }\mu
\end{array}
\right) \\
&  =\sum_{\lambda\vdash n-1}f^{\lambda}\left(  t^{k_{\lambda}}+t\sum
_{\mu;\ \mu\lessdot\lambda}t^{k_{\mu}}\right)  \ \ \ \ \ \ \ \ \ \ \left(
\text{by Sublemma A}\right) \\
&  =\underbrace{\sum_{\lambda\vdash n-1}f^{\lambda}t^{k_{\lambda}}}%
_{=\sigma\left(  n-1\right)  }+\underbrace{\sum_{\lambda\vdash n-1}f^{\lambda
}t\sum_{\mu;\ \mu\lessdot\lambda}t^{k_{\mu}}}_{\substack{=\sum_{\mu\vdash
n-1}f^{\mu}t\sum_{\lambda;\ \lambda\lessdot\mu}t^{k_{\lambda}}\\\text{(here,
we renamed }\lambda\text{ and }\mu\text{ as }\mu\text{ and }\lambda\text{)}%
}}\\
&  =\sigma\left(  n-1\right)  +\sum_{\mu\vdash n-1}f^{\mu}t\sum_{\lambda
;\ \lambda\lessdot\mu}t^{k_{\lambda}}.
\end{align*}
In view of
\begin{align*}
&  \sum_{\mu\vdash n-1}f^{\mu}t\sum_{\lambda;\ \lambda\lessdot\mu
}t^{k_{\lambda}}\\
&  =\sum_{\mu}f^{\mu}t\sum_{\substack{\lambda\vdash n-2;\\\lambda\lessdot\mu
}}t^{k_{\lambda}}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{because under the condition }\lambda\lessdot\mu\text{,}\\
\text{we have }\left\vert \lambda\right\vert =\left\vert \mu\right\vert
-1\text{, and thus the}\\
\text{condition }\mu\vdash n-1\text{ is equivalent to }\lambda\vdash n-2
\end{array}
\right) \\
&  =\sum_{\lambda\vdash n-2}t^{k_{\lambda}}t\underbrace{\sum_{\mu
;\ \lambda\lessdot\mu}f^{\mu}}_{\substack{=\left(  \left\vert \lambda
\right\vert +1\right)  f^{\lambda}\\\text{(by Sublemma D)}}}=\sum
_{\lambda\vdash n-2}t^{k_{\lambda}}t\underbrace{\left(  \left\vert
\lambda\right\vert +1\right)  }_{\substack{=n-1\\\text{(since }\lambda\vdash
n-2\text{)}}}f^{\lambda}\\
&  =\left(  n-1\right)  t\sum_{\lambda\vdash n-2}t^{k_{\lambda}}f^{\lambda
}=\left(  n-1\right)  t\underbrace{\sum_{\lambda\vdash n-2}f^{\lambda
}t^{k_{\lambda}}}_{=\sigma\left(  n-2\right)  }=\left(  n-1\right)
t\ \sigma\left(  n-2\right)  ,
\end{align*}
we can rewrite this as
\[
\sigma\left(  n\right)  =\sigma\left(  n-1\right)  +\left(  n-1\right)
t\ \sigma\left(  n-2\right)  .
\]
This proves (\ref{pf.lem.G.Gmcount.rec}).

Let $\left[  t^{m}\right]  f$ denote the coefficient of $t^{m}$ in an
arbitrary polynomial $f\in\mathbb{Z}\left[  t\right]  $. (This is $0$ when
$m<0$ or $m>\deg f$.) The recurrence (\ref{pf.lem.G.Gmcount.rec}) can be
rewritten as the recurrence%
\[
\left[  t^{m}\right]  \left(  \sigma\left(  n\right)  \right)  =\left[
t^{m}\right]  \left(  \sigma\left(  n-1\right)  \right)  +\left(  n-1\right)
\cdot\left[  t^{m-1}\right]  \left(  \sigma\left(  n-2\right)  \right)
\]
for the coefficients of $\sigma\left(  n\right)  $, where $m\in\mathbb{Z}$ is
arbitrary and where $n\geq2$. Using this recurrence (and the fact that
$\sigma\left(  0\right)  =\sigma\left(  1\right)  =1$), it is now
straightforward to see that%
\[
\left[  t^{m}\right]  \left(  \sigma\left(  n\right)  \right)  =\dbinom{n}%
{2m}\dfrac{\left(  2m\right)  !}{2^{m}m!}\ \ \ \ \ \ \ \ \ \ \text{for each
}m\in\mathbb{N}.
\]
But the left hand side $\left[  t^{m}\right]  \left(  \sigma\left(  n\right)
\right)  $ of this equality is $\sum_{\substack{\lambda\vdash n;\\k_{\lambda
}=m}}f^{\lambda}$ by the definition of $\sigma\left(  n\right)  $. Hence, for
each $m\in\mathbb{N}$, we have%
\[
\sum_{\substack{\lambda\vdash n;\\k_{\lambda}=m}}f^{\lambda}=\left[
t^{m}\right]  \left(  \sigma\left(  n\right)  \right)  =\dbinom{n}{2m}%
\dfrac{\left(  2m\right)  !}{2^{m}m!}=\left(  \text{\# of }m\text{-matchings
of }\left[  n\right]  \right)
\]
(by Proposition \ref{prop.match-count}). This equality also holds for negative
integers $m$ (since both sides are $0$). Thus, it holds for all $m\in
\mathbb{Z}$. Lemma \ref{lem.G.Gmcount} is proved again.
\end{proof}

We can now describe the submodules $\mathcal{G}_{m}$ of $\mathcal{G}$ and
their quotients as representations of $S_{n}$:

\begin{theorem}
\label{thm.G.Gm=sum}Let $m\in\mathbb{Z}$. Then,%
\[
\mathcal{G}_{m}\cong\bigoplus_{\substack{\lambda\vdash n;\\k_{\lambda}\leq
m}}\mathcal{S}^{\lambda}\ \ \ \ \ \ \ \ \ \ \text{and}%
\ \ \ \ \ \ \ \ \ \ \mathcal{G}_{m}/\mathcal{G}_{m-1}\cong\bigoplus
_{\substack{\lambda\vdash n;\\k_{\lambda}=m}}\mathcal{S}^{\lambda}.
\]

\end{theorem}

\begin{proof}
Let $\lambda$ be a partition of $n$ satisfying $k_{\lambda}\leq m$. Then, we
can choose any $n$-tableau $T$ of shape $\lambda$, and then Proposition
\ref{prop.G.G*Slam} (applied to $k=k_{\lambda}$) shows that there exist some
$2k_{\lambda}$ distinct elements $i_{1},i_{2},\ldots,i_{k_{\lambda}}%
,j_{1},j_{2},\ldots,j_{k_{\lambda}}\in\left[  n\right]  $ such that
\[
G_{i_{1},i_{2},\ldots,i_{k_{\lambda}};\ j_{1},j_{2},\ldots,j_{k_{\lambda}}%
}^{\ast}\mathbf{e}_{T}\neq0.
\]
This shows that $\mathcal{G}_{m}^{\ast}\mathcal{S}^{\lambda}\neq0$ (because
$G_{i_{1},i_{2},\ldots,i_{k_{\lambda}};\ j_{1},j_{2},\ldots,j_{k_{\lambda}}%
}^{\ast}\in\mathcal{G}_{m}^{\ast}$ (since $k_{\lambda}\leq m$) and
$\mathbf{e}_{T}\in\mathcal{S}^{\lambda}$). However, Lemma \ref{lem.GI=Hom}
yields $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{S}%
^{\lambda},\mathcal{G}_{m}\right)  \cong\mathcal{G}_{m}^{\ast}\mathcal{S}%
^{\lambda}\neq0$. That is, the Specht module $\mathcal{S}^{\lambda}$ is
contained in $\mathcal{G}_{m}$ (that is, can be embedded in $\mathcal{G}_{m}$).

Forget that we fixed $\lambda$. We thus have shown that for any partition
$\lambda$ of $n$ satisfying $k_{\lambda}\leq m$, the Specht module
$\mathcal{S}^{\lambda}$ is contained in $\mathcal{G}_{m}$. Since these Specht
modules are irreducible and mutually non-isomorphic, this shows that their
direct sum $\bigoplus\limits_{\substack{\lambda\vdash n;\\k_{\lambda}\leq
m}}\mathcal{S}^{\lambda}$ is contained in $\mathcal{G}_{m}$ as well (since
Proposition \ref{prop.sum-irrep-direct} says that a sum of non-isomorphic
irreducible representations is always a direct sum).

Now we shall show that the dimensions of $\bigoplus\limits_{\substack{\lambda
\vdash n;\\k_{\lambda}\leq m}}\mathcal{S}^{\lambda}$ and $\mathcal{G}_{m}$ are
the same. Indeed, recall the filtration $\left(  0=\mathcal{G}_{-1}%
\subseteq\mathcal{G}_{0}\subseteq\cdots\subseteq\mathcal{G}_{n}=\mathcal{G}%
\right)  $ from Subsection \ref{subsec.gelfand.filtr}. Thus, $\left(
0=\mathcal{G}_{-1}\subseteq\mathcal{G}_{0}\subseteq\cdots\subseteq
\mathcal{G}_{m}=\mathcal{G}_{m}\right)  $ is a filtration of $\mathcal{G}_{m}%
$. Hence,%
\begin{align*}
\dim\left(  \mathcal{G}_{m}\right)   &  =\sum_{i=0}^{m}\underbrace{\dim\left(
\mathcal{G}_{i}/\mathcal{G}_{i-1}\right)  }_{\substack{\leq\left(  \text{\# of
}i\text{-matchings of }\left[  n\right]  \right)  \\\text{(by Lemma
\ref{lem.G.matchbound})}}}\\
&  \leq\sum_{i=0}^{m}\underbrace{\left(  \text{\# of }i\text{-matchings of
}\left[  n\right]  \right)  }_{\substack{=\sum_{\substack{\lambda\vdash
n;\\k_{\lambda}=i}}\left(  \text{\# of standard tableaux of shape }%
\lambda\right)  \\\text{(by Lemma \ref{lem.G.Gmcount}, applied to }i\text{
instead of }m\text{)}}}\\
&  =\underbrace{\sum_{i=0}^{m}\ \ \sum_{\substack{\lambda\vdash n;\\k_{\lambda
}=i}}}_{=\sum_{\substack{\lambda\vdash n;\\k_{\lambda}\leq m}}}%
\underbrace{\left(  \text{\# of standard tableaux of shape }\lambda\right)
}_{\substack{=\dim\mathcal{S}^{\lambda}\\\text{(by (\ref{eq.dimSlam}))}}}\\
&  =\sum_{\substack{\lambda\vdash n;\\k_{\lambda}\leq m}}\dim\mathcal{S}%
^{\lambda}=\dim\bigoplus\limits_{\substack{\lambda\vdash n;\\k_{\lambda}\leq
m}}\mathcal{S}^{\lambda}.
\end{align*}


Since $\bigoplus\limits_{\substack{\lambda\vdash n;\\k_{\lambda}\leq
m}}\mathcal{S}^{\lambda}$ is contained in $\mathcal{G}_{m}$, we thus conclude
that the direct sum $\bigoplus\limits_{\substack{\lambda\vdash n;\\k_{\lambda
}\leq m}}\mathcal{S}^{\lambda}$ is $\mathcal{G}_{m}$. This proves%
\begin{equation}
\mathcal{G}_{m}\cong\bigoplus_{\substack{\lambda\vdash n;\\k_{\lambda}\leq
m}}\mathcal{S}^{\lambda}. \label{pf.thm.G.Gm=sum.Gm=}%
\end{equation}
The same reasoning (applied to $m-1$ instead of $m$) shows that%
\begin{equation}
\mathcal{G}_{m-1}\cong\bigoplus_{\substack{\lambda\vdash n;\\k_{\lambda}\leq
m-1}}\mathcal{S}^{\lambda}. \label{pf.thm.G.Gm=sum.Gm-1=}%
\end{equation}


Now it remains to prove that $\mathcal{G}_{m}/\mathcal{G}_{m-1}\cong%
\bigoplus\limits_{\substack{\lambda\vdash n;\\k_{\lambda}=m}}\mathcal{S}%
^{\lambda}$. For this purpose, we recall the following fact (an easy
consequence of the Krull--Remak--Schmidt theorem \cite[Theorem 3.8.1]%
{EGHetc11}, or -- because $\mathcal{A}$ is semisimple -- of the
Jordan--H\"{o}lder theorem \cite[Theorem 3.7.1]{EGHetc11}):

\begin{statement}
\textit{Cancellativity of $\mathcal{A}$-modules:} Let $U,V,W$ be three
finite-dimensional left $\mathcal{A}$-modules. If $U\oplus W\cong V\oplus W$,
then $U\cong V$.
\end{statement}

The semisimplicity of $\mathcal{A}$ yields%
\begin{align*}
\mathcal{G}_{m}  &  \cong\mathcal{G}_{m-1}\oplus\left(  \mathcal{G}%
_{m}/\mathcal{G}_{m-1}\right)  \cong\left(  \mathcal{G}_{m}/\mathcal{G}%
_{m-1}\right)  \oplus\mathcal{G}_{m-1}\\
&  \cong\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  \oplus\left(
\bigoplus_{\substack{\lambda\vdash n;\\k_{\lambda}\leq m-1}}\mathcal{S}%
^{\lambda}\right)  \ \ \ \ \ \ \ \ \ \ \left(  \text{by
(\ref{pf.thm.G.Gm=sum.Gm-1=})}\right)  ,
\end{align*}
so that%
\begin{align*}
\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  \oplus\left(  \bigoplus
_{\substack{\lambda\vdash n;\\k_{\lambda}\leq m-1}}\mathcal{S}^{\lambda
}\right)   &  \cong\mathcal{G}_{m}\cong\bigoplus_{\substack{\lambda\vdash
n;\\k_{\lambda}\leq m}}\mathcal{S}^{\lambda}\ \ \ \ \ \ \ \ \ \ \left(
\text{by (\ref{pf.thm.G.Gm=sum.Gm=})}\right) \\
&  \cong\left(  \bigoplus_{\substack{\lambda\vdash n;\\k_{\lambda}%
=m}}\mathcal{S}^{\lambda}\right)  \oplus\left(  \bigoplus_{\substack{\lambda
\vdash n;\\k_{\lambda}\leq m-1}}\mathcal{S}^{\lambda}\right)  .
\end{align*}
Applying the cancellativity of $\mathcal{A}$-modules to $U=\mathcal{G}%
_{m}/\mathcal{G}_{m-1}$ and $V=\bigoplus_{\substack{\lambda\vdash
n;\\k_{\lambda}=m}}\mathcal{S}^{\lambda}$ and $W=\bigoplus_{\substack{\lambda
\vdash n;\\k_{\lambda}\leq m-1}}\mathcal{S}^{\lambda}$, we thus obtain%
\[
\mathcal{G}_{m}/\mathcal{G}_{m-1}\cong\bigoplus_{\substack{\lambda\vdash
n;\\k_{\lambda}=m}}\mathcal{S}^{\lambda}.
\]
This completes the proof of Theorem \ref{thm.G.Gm=sum}.
\end{proof}

\begin{corollary}
\label{cor.G.GmS=0}Let $\lambda$ be a partition of $n$. Let $m\in\mathbb{Z}$
be such that $m<k_{\lambda}$. Then, $\mathcal{G}_{m}^{\ast}\mathcal{S}%
^{\lambda}=0$.
\end{corollary}

\begin{proof}
Lemma \ref{lem.GI=Hom} yields $\operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  \mathcal{S}^{\lambda},\mathcal{G}_{m}\right)  \cong\mathcal{G}%
_{m}^{\ast}\mathcal{S}^{\lambda}$ as vector spaces.

If $\mu$ is a partition of $n$ satisfying $k_{\mu}\leq m$, then $\mu
\neq\lambda$ (since $k_{\mu}\leq m<k_{\lambda}$ entails $k_{\mu}\neq
k_{\lambda}$ and thus $\mu\neq\lambda$) and therefore $\lambda\neq\mu$, so
that
\begin{equation}
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda
},\mathcal{S}^{\mu}\right)  =0 \label{pf.cor.G.GmS=0.1}%
\end{equation}
by Schur's lemma (since the irreducible $S_{n}$-representations $\mathcal{S}%
^{\lambda}$ and $\mathcal{S}^{\mu}$ are not isomorphic (since $\lambda\neq\mu$)).

But Theorem \ref{thm.G.Gm=sum} (with the index $\lambda$ renamed as $\mu$)
yields $\mathcal{G}_{m}\cong\bigoplus_{\substack{\mu\vdash n;\\k_{\mu}\leq
m}}\mathcal{S}^{\mu}$. Hence,%
\begin{align*}
\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(  \mathcal{S}^{\lambda
},\mathcal{G}_{m}\right)   &  \cong\operatorname*{Hom}\nolimits_{\mathcal{A}%
}\left(  \mathcal{S}^{\lambda},\bigoplus_{\substack{\mu\vdash n;\\k_{\mu}\leq
m}}\mathcal{S}^{\mu}\right)  \cong\bigoplus_{\substack{\mu\vdash n;\\k_{\mu
}\leq m}}\underbrace{\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},\mathcal{S}^{\mu}\right)  }_{\substack{=0\\\text{(by
(\ref{pf.cor.G.GmS=0.1}))}}} =0.
\end{align*}
Comparing this with $\operatorname*{Hom}\nolimits_{\mathcal{A}}\left(
\mathcal{S}^{\lambda},\mathcal{G}_{m}\right)  \cong\mathcal{G}_{m}^{\ast
}\mathcal{S}^{\lambda}$, we obtain $\mathcal{G}_{m}^{\ast}\mathcal{S}%
^{\lambda}=0$, and so Corollary \ref{cor.G.GmS=0} is proved.
\end{proof}

\subsection{Relation to the involution Gelfand model}

The Gelfand model $\mathcal{G}$ of $S_{n}$ is closely related to the famous
involution Gelfand model $V_{n}$ from \cite[\S 1.1]{AdPoRo08} (see also
\cite{KodVer04}).

Indeed, let $m\in\mathbb{Z}$. Let $F_{n,m}$ be the free $\mathbf{k}$-module
with basis consisting of all the $2m$-tuples $\left(  i_{1},i_{2},\ldots
,i_{m};\ j_{1},j_{2},\ldots,j_{m}\right)  $ of distinct elements of $\left[
n\right]  $ (viewed as formal symbols). This $F_{n,m}$ becomes an $S_{n}%
$-representation, where $S_{n}$ acts entrywise on the tuples (i.e., a
permutation is applied to each entry of the tuple). Now, let $V_{n,m}$ be the
quotient vector space of $F_{n,m}$ modulo the relations

\begin{itemize}
\item
\[
\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}\right)
\equiv\left(  i_{\sigma\left(  1\right)  },i_{\sigma\left(  2\right)  }%
,\ldots,i_{\sigma\left(  m\right)  };\ j_{\sigma\left(  1\right)  }%
,j_{\sigma\left(  2\right)  },\ldots,j_{\sigma\left(  m\right)  }\right)
\]
for all $\sigma\in S_{m}$ and all $\left(  i_{1},i_{2},\ldots,i_{m}%
;\ j_{1},j_{2},\ldots,j_{m}\right)  $, and

\item
\begin{align*}
&  \left(  i_{1},i_{2},\ldots,i_{p-1},j_{p},i_{p+1},\ldots,i_{m};\ j_{1}%
,j_{2},\ldots,j_{p-1},i_{p},j_{p+1},\ldots,j_{m}\right) \\
&  \equiv-\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}\right)
\end{align*}
for all $\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}\right)  $
and all $p\in\left[  m\right]  $.
\end{itemize}

This quotient space $V_{n,m}$ is still an $S_{n}$-representation (since the
relations are preserved under the $S_{n}$-action). It is easy to see that it
has a basis consisting of the elements%
\[
\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}\right)
\]
indexed by all $m$-matchings $\left\{  \left\{  i_{1},j_{1}\right\}
,\ \left\{  i_{2},j_{2}\right\}  ,\ \ldots,\ \left\{  i_{m},j_{m}\right\}
\right\}  $ of $\left[  n\right]  $, as long as we make sure to pick one
representative $\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots
,j_{m}\right)  $ for each $m$-matching (different representatives will lead to
different signs of $\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2}%
,\ldots,j_{m}\right)  $).

As we saw in the proof of Lemma \ref{lem.G.matchbound}, the quotient vector
space $\mathcal{G}_{m}/\mathcal{G}_{m-1}$ is spanned by the family%
\[
\left(  \overline{G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}%
}\right)  _{i_{1},i_{2},\ldots,i_{m},j_{1},j_{2},\ldots,j_{m}\text{ are
}2m\text{ distinct elements of }\left[  n\right]  },
\]
and this family is subject to the obvious relation%
\[
\overline{G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}}%
=\overline{G_{i_{\sigma\left(  1\right)  },i_{\sigma\left(  2\right)  }%
,\ldots,i_{\sigma\left(  m\right)  };\ j_{\sigma\left(  1\right)  }%
,j_{\sigma\left(  2\right)  },\ldots,j_{\sigma\left(  m\right)  }}}%
\]
for all $\sigma\in S_{m}$ (indeed, this holds even in $\mathcal{G}_{m}$,
without the overlines) and the less obvious relation
(\ref{pf.lem.G.matchbound.redund}) for all $p\in\left[  m\right]  $. Thus,
there is an $S_{n}$-equivariant linear map%
\begin{align*}
\psi_{m}:V_{n,m}  &  \rightarrow\mathcal{G}_{m}/\mathcal{G}_{m-1},\\
\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}\right)   &
\mapsto\overline{G_{i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots,j_{m}}}.
\end{align*}
This map $\psi_{m}$ is surjective, since $\mathcal{G}_{m}/\mathcal{G}_{m-1}$
is spanned by the family (\ref{pf.lem.G.matchbound.family1}). Since its domain
$V_{n,m}$ and its codomain $\mathcal{G}_{m}/\mathcal{G}_{m-1}$ have the same
dimension (by Corollary \ref{cor.G.matchbound.eq}), this entails that this map
$\psi_{m}$ is also injective, and thus is an isomorphism of $S_{n}%
$-representations. Hence,
\[
\mathcal{G}_{m}/\mathcal{G}_{m-1}\cong V_{n,m}\ \ \ \ \ \ \ \ \ \ \text{as
}S_{n}\text{-representations.}%
\]
But the $S_{n}$-representation $V_{n,m}$ is just an isomorphic copy of the
$m$-th degree component of the famous involution Gelfand model $V_{n}$ from
\cite[\S 1.1]{AdPoRo08} (see also \cite{KodVer04}): Indeed, we can replace
each $2m$-tuple $\left(  i_{1},i_{2},\ldots,i_{m};\ j_{1},j_{2},\ldots
,j_{m}\right)  \in V_{n,m}$ by the involution $t_{i_{1},j_{1}}t_{i_{2},j_{2}%
}\cdots t_{i_{m}j_{m}}$ multiplied by $\left(  -1\right)  ^{\text{\# of
}s\text{ satisfying }i_{s}>j_{s}}$ to get an isomorphism from the former to
the latter. Hence, $\mathcal{G}_{m}/\mathcal{G}_{m-1}$ is isomorphic to the
$m$-th degree component of $V_{n}$ as well.

Now, forget that we fixed $m$. We thus have shown that for each $m\in
\mathbb{Z}$, the $S_{n}$-representation $\mathcal{G}_{m}/\mathcal{G}_{m-1}$ is
isomorphic to the $m$-th degree component of the involution Gelfand model
$V_{n}$. Hence, the associated graded object $\bigoplus\limits_{m=0}%
^{n}\left(  \mathcal{G}_{m}/\mathcal{G}_{m-1}\right)  $ of our filtered
$S_{n}$-representation $\mathcal{G}$ is isomorphic to the Gelfand model
$V_{n}$. But since we are in characteristic $0$, Maschke's theorem ensures
that all exact sequences of $S_{n}$-representations split, and therefore
$\mathcal{G}\cong\bigoplus\limits_{m=0}^{n}\left(  \mathcal{G}_{m}%
/\mathcal{G}_{m-1}\right)  $ as representations of $S_{n}$. Hence,
$\mathcal{G}$ is also isomorphic to the Gelfand model $V_{n}$. This gives a
new proof of the fact that $V_{n}$ is a Gelfand model.

\subsection{Questions}

\begin{question}
What is $\mathcal{G}^{\ast}\mathcal{G}$ ? Proposition \ref{prop.J*J.struct}
\textbf{(c)} shows that this is a nonunital subalgebra of $\mathcal{A}$
isomorphic to $\mathbf{k}^{p\left(  n\right)  }$, where $p\left(  n\right)
=\left\vert \left\{  \lambda\vdash n\right\}  \right\vert $. But how exactly
does it lie inside $\mathcal{A}$ ? Is there a simple basis?
\end{question}

\begin{noncompile}
Maybe the Young--Lascoux PNP elements from Alain Lascoux, \textit{The
symmetric group}, draft, 2002,
\url{https://libgen.is/book/index.php?md5=C6FEA6CE2BFD71990037B6ABDC822642} ? No.
\end{noncompile}

\begin{question}
How much of the above still holds in the Hecke algebra?
\end{question}

\section{\label{sec.dyadic}The dyadic shuffles}

We shall now reap some concrete harvest of our theory.

As we recall, $\mathcal{A}=\mathbf{k}\left[  S_{n}\right]  $ is the group
algebra of the symmetric group $S_{n}=\left\{  \text{permutations of }\left[
n\right]  \right\}  $ over the characteristic-$0$ field $\mathbf{k}$. The
antipode map of $\mathcal{A}$ is the $\mathbf{k}$-linear map $a\mapsto
a^{\ast}$ that sends each $w\in S_{n}$ to $w^{-1}$. This is a $\mathbf{k}%
$-algebra anti-automorphism of $\mathcal{A}$.

\subsection{Definition via matchings}

An \emph{edge} will mean a $2$-element subset of $\left[  n\right]  $.

A permutation $w\in S_{n}$ is said to \emph{increase} on a subset of $\left[
n\right]  $ if the restriction of $w$ to this subset is an increasing
function. In particular, a permutation $w\in S_{n}$ increases on an edge
$\left\{  i<j\right\}  $ if and only if $w\left(  i\right)  <w\left(
j\right)  $.

For instance, the permutation $w\in S_{4}$ with one-line notation $\left[
3124\right]  $ increases on the edges $\left\{  1,4\right\}  ,\ \left\{
2,3\right\}  ,\ \left\{  2,4\right\}  ,\ \left\{  3,4\right\}  $ but not on
the edges $\left\{  1,2\right\}  ,\ \left\{  1,3\right\}  $.

A set of $k$ disjoint edges will be called a \emph{$k$-matching}. A
permutation $w\in S_{n}$ is said to \emph{increase} on a $k$-matching $M$ if
and only if $w$ increases on each edge $P\in M$.

For instance, the permutation $w\in S_{5}$ with one-line notation $\left[
24513\right]  $ increases on the $2$-matching $\left\{  \left\{  1,3\right\}
,\left\{  4,5\right\}  \right\}  $, but not on the $2$-matching $\left\{
\left\{  1,3\right\}  ,\left\{  2,5\right\}  \right\}  $, since it fails to
increase on the edge $\left\{  2,5\right\}  $.

Given a permutation $w\in S_{n}$ and an integer $k\geq0$, we define
$\operatorname*{incmat}_{k}\left(  w\right)  $ to be the number of all
$k$-matchings on which $w$ increases. This number is called
$\operatorname{noninv}_{\left(  2^{k},1^{n-2k}\right)  }\left(  w\right)  $ in
\cite{RSW}.

For instance, the permutation $\left[  24513\right]  \in S_{5}$ (written here
in one-line notation) increases on exactly four $2$-matchings, which are
color-coded on its one-line notation here:
\[
{\color{red}{24}}5{\color{blue}{13}},\qquad{\color{red}{2}}4{\color{red}{5}}%
{\color{blue}{13}},\qquad2{\color{red}{45}}{\color{blue}{13}},\qquad
{\color{blue}{2}}{\color{red}{45}}1{\color{blue}{3}}%
\]
(each color represents an edge, which consists of the positions at which this
color appears). Thus, $\operatorname*{incmat}_{2}\left(  \left[  24513\right]
\right)  =4$.

\begin{definition}
\label{def.dyadic.def}For any $k\in\mathbb{N}$, we define the \emph{dyadic
shuffle}%
\[
\mathcal{S}_{n,k}:=\sum_{w\in S_{n}}\operatorname*{incmat}\nolimits_{k}\left(
w\right)  w\in\mathcal{A}.
\]

\end{definition}

This $\mathcal{S}_{n,k}$ is denoted by $\nu_{\left(  2^{k},1^{n-2k}\right)  }$
in \cite{RSW}.

\begin{example}
\label{exa.dyadic.0123}\textbf{(a)} We have%
\[
\mathcal{S}_{n,0}=\sum_{w\in S_{n}}\underbrace{\operatorname*{incmat}%
\nolimits_{0}\left(  w\right)  }_{\substack{=1\\\text{(since there is only one
}0\text{-matching,}\\\text{namely }\left\{  {}\right\}  \text{, and each
}w\text{ increases on it)}}}w=\sum_{w\in S_{n}}w.
\]


\textbf{(b)} To compute $\mathcal{S}_{n,1}$, we note that the $1$-matchings
are just singletons consisting of a single edge $\left\{  i<j\right\}  $, and
a given permutation $w\in S_{n}$ increases on such a matching if and only if
$w\left(  i\right)  <w\left(  j\right)  $. Thus, for any permutation $w\in
S_{n}$, the number $\operatorname*{incmat}\nolimits_{1}\left(  w\right)  $
counts the pairs $\left(  i<j\right)  $ of elements of $\left[  n\right]  $
satisfying $w\left(  i\right)  <w\left(  j\right)  $. These pairs are known as
the \emph{noninversions} of $w$. For instance, for $n=3$, we have
\begin{align*}
\mathcal{S}_{3,1}  &  =\sum_{w\in S_{3}}\operatorname*{incmat}\nolimits_{1}%
\left(  w\right)  w\\
&  =3\left[  123\right]  +2\left[  132\right]  +2\left[  213\right]  +\left[
231\right]  +\left[  312\right]  ,
\end{align*}
where $\left[  i_{1}i_{2}\cdots i_{n}\right]  $ denotes the permutation with
one-line notation $\left(  i_{1},i_{2},\ldots,i_{n}\right)  $. \medskip

\textbf{(c)} If $2k>n$, then there exist no $k$-matchings, since there are no
$2k$ distinct elements in $\left[  n\right]  $. Hence, in this case, we have
$\operatorname*{incmat}\nolimits_{k}\left(  w\right)  =0$ for all $w\in S_{n}%
$, and thus $\mathcal{S}_{n,k}=0$.
\end{example}

Among the main results of \cite{RSW} are the following two theorems (both
parts of \cite[Theorem 1.6]{RSW}):

\begin{theorem}
\label{thm.dyadic.comm}The dyadic shuffles $\mathcal{S}_{n,0},\mathcal{S}%
_{n,1},\mathcal{S}_{n,2},\ldots$ pairwise commute. That is, $\left[
\mathcal{S}_{n,i},\mathcal{S}_{n,j}\right]  =0$ for all $i,j\in\mathbb{N}$.
\end{theorem}

\begin{theorem}
\label{thm.dyadic.int}For each $k\in\mathbb{N}$, there exists a multiset
$Z_{k}$ of integers such that $\prod_{\lambda\in Z_{k}}\left(  \mathcal{S}%
_{n,k}-\lambda\right)  =0$. (In other words, the minimal polynomial of
$\mathcal{S}_{n,k}$ over $\mathbf{k}$ factors into linear factors defined over
$\mathbb{Z}$.)
\end{theorem}

We shall recover these results as consequences of our theory in this section.
First, however, we shall give another definition of the dyadic shuffles.

\subsection{Definition via descents}

The \emph{descent set} $\operatorname*{Des}w$ of a permutation $w\in S_{n}$ is
defined to be the set of all $i\in\left[  n-1\right]  $ such that $w\left(
i\right)  >w\left(  i+1\right)  $. Its elements $i$ are called the
\emph{descents} of $w$.

A \emph{composition} of $n$ means a tuple $\left(  \alpha_{1},\alpha
_{2},\ldots,\alpha_{k}\right)  $ of positive integers satisfying $\alpha
_{1}+\alpha_{2}+\cdots+\alpha_{k}=n$.

For any composition $\alpha=\left(  \alpha_{1},\alpha_{2},\ldots,\alpha
_{k}\right)  $ of $n$, define a subset $D\left(  \alpha\right)  $ of $\left[
n-1\right]  $ by
\[
D\left(  \alpha\right)  :=\left\{  \alpha_{1},\alpha_{1}+\alpha_{2}%
,\ldots,\alpha_{1}+\alpha_{2}+\cdots+\alpha_{k-1}\right\}  ,
\]
and define the element%
\begin{equation}
\mathcal{X}_{\alpha}:=\sum_{\substack{w\in S_{n};\\\operatorname*{Des}%
w\subseteq D\left(  \alpha\right)  }}w^{-1}\in\mathcal{A}. \label{eq.X.def}%
\end{equation}
In other words, $\mathcal{X}_{\alpha}$ is the sum of the minimum-length right
coset representatives in $S_{\alpha}\setminus S_{n}$, where $S_{\alpha}$ is
the Young subgroup of $S_{n}$ corresponding to $\alpha$ (that is, the group of
permutations that permute the smallest $\alpha_{1}$ elements of $\left[
n\right]  $ among themselves, the next-smallest $\alpha_{2}$ elements of
$\left[  n\right]  $ among themselves, and so on).

These $\mathcal{X}_{\alpha}$ have significant overlap with the $G_{i_{1}%
,i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}$ defined in
(\ref{eq.gelfand.def}):

\begin{proposition}
\label{prop.X=G}Let $\alpha$ be any composition of $n$ that consists of $k$
many $2$'s and $n-2k$ many $1$'s in any order. Then,
\[
\mathcal{X}_{\alpha}=G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}%
\]
for an appropriate choice of $2k$ distinct elements $i_{1},i_{2},\ldots
,i_{k},j_{1},j_{2},\ldots,j_{k}\in\left[  n\right]  $.
\end{proposition}

\begin{proof}
An example first: If $\alpha$ is the composition $\left(  1,2,2,1,2\right)  $,
then $X_{\alpha}=G_{2,4,7;\ 3,5,8}$, because the condition
$\operatorname*{Des}w\subseteq D\left(  1,2,2,1,2\right)  $ on a permutation
$w\in S_{8}$ is equivalent to the condition%
\[
\left(  w\left(  2\right)  <w\left(  3\right)  \text{ and }w\left(  4\right)
<w\left(  5\right)  \text{ and }w\left(  7\right)  <w\left(  8\right)
\right)  .
\]


Let us now handle the general case. The composition $\alpha$ has $k+\left(
n-2k\right)  =n-k$ entries, which include $k$ many $2$'s and $n-2k$ many
$1$'s. Any of the $k$ many $2$'s in $\alpha$ leads to two consecutive elements
$\alpha_{1}+\alpha_{2}+\cdots+\alpha_{i-1}$ and $\alpha_{1}+\alpha_{2}%
+\cdots+\alpha_{i}$ of $D\left(  \alpha\right)  \cup\left\{  0,n\right\}  $
having a \textquotedblleft gap\textquotedblright\ of $\alpha_{i}=2$ between
them, so that the intermediate integer $\alpha_{1}+\alpha_{2}+\cdots
+\alpha_{i-1}+1$ belongs to the complementary set $\left[  n-1\right]
\setminus D\left(  \alpha\right)  $. This accounts for all elements of
$\left[  n-1\right]  \setminus D\left(  \alpha\right)  $. Thus, we can write
the set $\left[  n-1\right]  \setminus D\left(  \alpha\right)  $ in the form%
\[
\left[  n-1\right]  \setminus D\left(  \alpha\right)  =\left\{  i_{1}%
<i_{2}<\cdots<i_{k}\right\}
\]
for some $k$ numbers $i_{1}<i_{2}<\cdots<i_{k}$ in $\left[  n-1\right]  $ that
are \textquotedblleft socially distanced\textquotedblright\ (i.e., each
$i_{s}$ differs from the previous $i_{s-1}$ by at least $2$). Consider these
$i_{1},i_{2},\ldots,i_{k}$. Hence, the $2k$ numbers $i_{1},i_{1}+1,i_{2}%
,i_{2}+1,\ldots,i_{k},i_{k}+1$ are distinct (since each $i_{s}$ differs from
the previous $i_{s-1}$ by at least $2$). Hence, $G_{i_{1},i_{2},\ldots
,i_{k};\ i_{1}+1,i_{2}+1,\ldots,i_{k}+1}$ is well-defined.

Now, a permutation $w\in S_{n}$ satisfies $\operatorname*{Des}w\subseteq
D\left(  \alpha\right)  $ if and only if \textbf{none} of $i_{1},i_{2}%
,\ldots,i_{k}$ is a descent of $w$ (since $D\left(  \alpha\right)  =\left[
n-1\right]  \setminus\left\{  i_{1},i_{2},\ldots,i_{k}\right\}  $), that is,
if and only if it satisfies $w\left(  i_{s}\right)  <w\left(  i_{s}+1\right)
$ for each $s\in\left[  k\right]  $. Thus, the definition (\ref{eq.X.def}) of
$\mathcal{X}_{\alpha}$ can be rewritten as
\[
\mathcal{X}_{\alpha}=\sum_{\substack{w\in S_{n};\\w\left(  i_{s}\right)
<w\left(  i_{s}+1\right)  \text{ for all }s\in\left[  k\right]  }%
}w^{-1}=G_{i_{1},i_{2},\ldots,i_{k};\ i_{1}+1,i_{2}+1,\ldots,i_{k}+1}%
\]
(by (\ref{eq.gelfand.def})). Hence, $\mathcal{X}_{\alpha}=G_{i_{1}%
,i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}$ for an appropriate choice of
$2k$ distinct elements $i_{1},i_{2},\ldots,i_{k},j_{1},j_{2},\ldots,j_{k}%
\in\left[  n\right]  $ (namely, for $j_{s}=i_{s}+1$). This proves Proposition
\ref{prop.X=G}.
\end{proof}

\begin{remark}
Let $i_{1},i_{2},\ldots,i_{k},j_{1},j_{2},\ldots,j_{k}\in\left[  n\right]  $
be $2k$ distinct elements. If $i_{s}<j_{s}$ for all $s\in\left[  k\right]  $,
then the permutations $w^{-1}$ for $w\in S_{n}$ satisfying $w\left(
i_{s}\right)  <w\left(  j_{s}\right)  $ for all $s\in\left[  k\right]  $ are
the minimum-length right coset representatives in $\left(  S_{\left\{
i_{1},j_{1}\right\}  }\times S_{\left\{  i_{2},j_{2}\right\}  }\times
\cdots\times S_{\left\{  i_{k},j_{k}\right\}  }\right)  \setminus S_{n}$,
where $S_{\left\{  i_{1},j_{1}\right\}  }\times S_{\left\{  i_{2}%
,j_{2}\right\}  }\times\cdots\times S_{\left\{  i_{k},j_{k}\right\}  }$ is the
order-$2^{k}$ subgroup of $S_{n}$ generated by the commuting transpositions
$t_{i_{1},j_{1}},t_{i_{2},j_{2}},\ldots,t_{i_{k},j_{k}}$. Thus, $G_{i_{1}%
,i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}$ is the sum of these
minimum-length right coset representatives in this case. But this does not
hold if some $i_{s}>j_{s}$.
\end{remark}

Now we claim:

\begin{theorem}
\label{thm.S=S}Let $k\in\mathbb{N}$ be such that $2k\leq n$. Then,%
\begin{equation}
\mathcal{S}_{n,k}=\frac{1}{k!\left(  n-2k\right)  !}\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }^{\ast}\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }.
\label{eq.thm.S=S.1}%
\end{equation}
Here, $\left(  2^{k},1^{n-2k}\right)  $ denotes the composition of $n$ that
begins with $k$ many $2$'s and continues with $n-2k$ many $1$'s.
\end{theorem}

\begin{proof}
We have
\begin{align*}
D\left(  \left(  2^{k},1^{n-2k}\right)  \right)   &  =\left\{  2,4,6,\ldots
,2k\right\}  \cup\underbrace{\left\{  2k+1,2k+2,2k+3,\ldots,n-1\right\}
}_{\text{all integers from }2k+1\text{ to }n-1}\\
&  =\left[  n-1\right]  \setminus\left\{  1,3,5,\ldots,2k-1\right\}  .
\end{align*}
Thus, a permutation $w\in S_{n}$ satisfies $\operatorname*{Des}w\subseteq
D\left(  \left(  2^{k},1^{n-2k}\right)  \right)  $ if and only if it satisfies
$\operatorname*{Des}w\subseteq\left[  n-1\right]  \setminus\left\{
1,3,5,\ldots,2k-1\right\}  $, that is, if and only if \textbf{none} of
$1,3,5,\ldots,2k-1$ is a descent of $w$, that is, if and only if it satisfies
$w\left(  2s-1\right)  <w\left(  2s\right)  $ for all $s\in\left[  k\right]
$. Hence, the definition (\ref{eq.X.def}) of $\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }$ can be rewritten as%
\begin{equation}
\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }=\sum_{\substack{w\in
S_{n};\\w\left(  2s-1\right)  <w\left(  2s\right)  \text{ for all }s\in\left[
k\right]  }}w^{-1}. \label{pf:prop:noninv:1}%
\end{equation}
Applying the antipode map $a\mapsto a^{\ast}$ to this equality, we obtain
\begin{equation}
\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }^{\ast}=\sum_{\substack{x\in
S_{n};\\x\left(  2s-1\right)  <x\left(  2s\right)  \text{ for all }s\in\left[
k\right]  }}x \label{pf:prop:noninv:2}%
\end{equation}
(since the antipode map sends $w^{-1}$ to $w$ for each $w\in S_{n}$).
Multiplying the equalities \eqref{pf:prop:noninv:2} and
\eqref{pf:prop:noninv:1} together, we find
\begin{align*}
\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }^{\ast}\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }  &  =\sum_{\substack{x\in S_{n};\\x\left(
2s-1\right)  <x\left(  2s\right)  \text{ for all }s\in\left[  k\right]
}}x\sum_{\substack{w\in S_{n};\\w\left(  2s-1\right)  <w\left(  2s\right)
\text{ for all }s\in\left[  k\right]  }}w^{-1}\\
&  =\sum_{\substack{x,w\in S_{n};\\x\left(  2s-1\right)  <x\left(  2s\right)
\text{ for all }s\in\left[  k\right]  ;\\w\left(  2s-1\right)  <w\left(
2s\right)  \text{ for all }s\in\left[  k\right]  }}xw^{-1}\\
&  =\sum_{\substack{u,w\in S_{n};\\\left(  uw\right)  \left(  2s-1\right)
<\left(  uw\right)  \left(  2s\right)  \text{ for all }s\in\left[  k\right]
;\\w\left(  2s-1\right)  <w\left(  2s\right)  \text{ for all }s\in\left[
k\right]  }}u
\end{align*}
(here, we have substituted $uw$ for $x$ in the sum). In other words,
\begin{equation}
\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }^{\ast}\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }=\sum_{u\in S_{n}}\rho\left(  u\right)  u,
\label{pf:prop:noninv:4}%
\end{equation}
where $\rho\left(  u\right)  \in\mathbb{Z}$ is the number of all permutations
$w\in S_{n}$ that satisfy%
\[
\left(  uw\right)  \left(  2s-1\right)  <\left(  uw\right)  \left(  2s\right)
\text{ and }w\left(  2s-1\right)  <w\left(  2s\right)  \text{ for all }%
s\in\left[  k\right]  .
\]
Clearly, it suffices to show that%
\[
\rho\left(  u\right)  =k!\left(  n-2k\right)  !\operatorname*{incmat}%
\nolimits_{k}\left(  u\right)  \ \ \ \ \ \ \ \ \ \ \text{for each }u\in S_{n}%
\]
(because then, dividing \eqref{pf:prop:noninv:4} by $k!\left(  n-2k\right)  !$
will result in (\ref{eq.thm.S=S.1})).

This we can show bijectively: Fix $u\in S_{n}$. Let $K$ be the set of all
permutations $w\in S_{n}$ that satisfy%
\[
\left(  uw\right)  \left(  2s-1\right)  <\left(  uw\right)  \left(  2s\right)
\text{ and }w\left(  2s-1\right)  <w\left(  2s\right)  \text{ for all }%
s\in\left[  k\right]  .
\]
Thus, $\rho\left(  u\right)  =\left\vert K\right\vert $. For each permutation
$w\in K$, the $k$-matching%
\[
M_{w}:=\left\{  \left\{  w\left(  1\right)  ,w\left(  2\right)  \right\}
,\ \left\{  w\left(  3\right)  ,w\left(  4\right)  \right\}  ,\ \ldots
,\ \left\{  w\left(  2k-1\right)  ,w\left(  2k\right)  \right\}  \right\}
\]
has the property that $u$ increases on $M_{w}$ (by the definition of $K$).
Conversely, any $k$-matching on which $u$ increases can be written as $M_{w}$
for $k!\left(  n-2k\right)  !$ many distinct permutations $w\in K$ (indeed, we
must ensure that the $k$ edges of the $k$-matching are the $k$ edges $\left\{
w\left(  1\right)  ,w\left(  2\right)  \right\}  ,\ \left\{  w\left(
3\right)  ,w\left(  4\right)  \right\}  ,\ \ldots,\ \left\{  w\left(
2k-1\right)  ,w\left(  2k\right)  \right\}  $ in one of $k!$ possible
orders\footnote{In order to satisfy the condition $w\left(  2s-1\right)
<w\left(  2s\right)  $ for all $s\in\left[  k\right]  $, we must make sure to
let $w\left(  2s-1\right)  $ be the smaller and $w\left(  2s\right)  $ the
larger of the two elements of the corresponding edge of the $k$-matching.},
and then the values $w\left(  2k+1\right)  ,\ w\left(  2k+2\right)
,\ \ldots,\ w\left(  n\right)  $ must be the remaining $n-2k$ elements of
$\left[  n\right]  $ in one of $\left(  n-2k\right)  !$ possible orders).
Hence, there is a $k!\left(  n-2k\right)  !$-to-$1$ correspondence between the
permutations $w\in K$ and the $k$-matchings on which $u$ increases. Therefore,
$\left\vert K\right\vert $ equals $k!\left(  n-2k\right)  !$ times the number
of the latter matchings, which of course is $\operatorname*{incmat}%
\nolimits_{k}\left(  u\right)  $. In other words, $\rho\left(  u\right)
=k!\left(  n-2k\right)  !\operatorname*{incmat}\nolimits_{k}\left(  u\right)
$, since $\rho\left(  u\right)  =\left\vert K\right\vert $. This completes the
proof of $\rho\left(  u\right)  =k!\left(  n-2k\right)
!\operatorname*{incmat}\nolimits_{k}\left(  u\right)  $. This, in turn,
completes the proof of Theorem \ref{thm.S=S}.
\end{proof}

\begin{proposition}
\label{prop.S-inv} Let $k\in\mathbb{N}$. Then, $\mathcal{S}_{n,k}^{\ast
}=\mathcal{S}_{n,k}$.
\end{proposition}

\begin{proof}
This follows from (\ref{eq.thm.S=S.1}), since $\left(  a^{\ast}a\right)
^{\ast}=a^{\ast}a$ for every $a\in\mathcal{A}$. Alternatively, this can be
derived from the definition of $\mathcal{S}_{n,k}$, since every permutation
$w\in S_{n}$ satisfies $\operatorname*{incmat}\nolimits_{k}\left(  w\right)
=\operatorname*{incmat}\nolimits_{k}\left(  w^{-1}\right)  $ (this follows
from a direct bijection: $w$ increases on an edge $\left\{  i,j\right\}  $ if
and only if $w^{-1}$ increases on $\left\{  w\left(  i\right)  ,w\left(
j\right)  \right\}  $).
\end{proof}

\begin{corollary}
\label{cor.SnkinG*G}Recall the left ideal $\mathcal{G}$ defined in Theorem
\ref{thm.G.gelfand}. Then, each $k\in\mathbb{N}$ satisfies%
\begin{equation}
\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }\in\mathcal{G}%
\ \ \ \ \ \ \ \ \ \ \text{if }2k\leq n, \label{eq.cor.SnkinG*G.X}%
\end{equation}
and%
\begin{equation}
\mathcal{S}_{n,k}\in\mathcal{G}^{\ast}\mathcal{G}. \label{eq.cor.SnkinG*G.S}%
\end{equation}

\end{corollary}

\begin{proof}
WLOG assume that $2k\leq n$ (since otherwise, (\ref{eq.cor.SnkinG*G.X}) is
vacuously true, whereas (\ref{eq.cor.SnkinG*G.S}) is obvious because Example
\ref{exa.dyadic.0123} \textbf{(c)} shows that $\mathcal{S}_{n,k}=0$).

Thus, Proposition \ref{prop.X=G} (applied to $\alpha=\left(  2^{k}%
,1^{n-2k}\right)  $) shows that
\[
\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }=G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}%
\]
for an appropriate choice of $2k$ distinct elements $i_{1},i_{2},\ldots
,i_{k},j_{1},j_{2},\ldots,j_{k}\in\left[  n\right]  $ (indeed, it is easy to
see that $\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }=G_{1,3,\ldots
,2k-1;\ 2,4,\ldots,2k}$). Hence, $\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)
}\in\mathcal{G}$ by the definition of $\mathcal{G}$. This proves
(\ref{eq.cor.SnkinG*G.X}). Furthermore, (\ref{eq.thm.S=S.1}) becomes%
\[
\mathcal{S}_{n,k}=\frac{1}{k!\left(  n-2k\right)  !}\underbrace{\mathcal{X}%
_{\left(  2^{k},1^{n-2k}\right)  }^{\ast}}_{\substack{\in\mathcal{G}^{\ast
}\\\text{(since }\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }\in
\mathcal{G}\text{)}}}\underbrace{\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)
}}_{\in\mathcal{G}}\in\mathcal{G}^{\ast}\mathcal{G}.
\]
This proves (\ref{eq.cor.SnkinG*G.S}).
\end{proof}

We can generalize Theorem \ref{thm.S=S} further:

\begin{theorem}
\label{thm.S=G*G}Let $k\in\mathbb{N}$ be such that $2k\leq n$. Then:

\begin{enumerate}
\item[\textbf{(a)}] Let $\alpha$ be any composition of $n$ that consists of
$k$ many $2$'s and $n-2k$ many $1$'s in any order. Then,%
\begin{equation}
\mathcal{S}_{n,k}=\frac{1}{k!\left(  n-2k\right)  !}\mathcal{X}_{\alpha}%
^{\ast}\mathcal{X}_{\alpha}. \label{eq.thm.S=S.gen}%
\end{equation}


\item[\textbf{(b)}] Let $i_{1},i_{2},\ldots,i_{k},j_{1},j_{2},\ldots,j_{k}$ be
any $2k$ distinct elements of $\left[  n\right]  $. Then,
\[
\mathcal{S}_{n,k}=\frac{1}{k!\left(  n-2k\right)  !}G_{i_{1},i_{2}%
,\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}%
\]
(where $G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}$ is as defined
in (\ref{eq.gelfand.def})).
\end{enumerate}
\end{theorem}

\begin{proof}
\textbf{(b)} The $2k$ numbers $i_{1},i_{2},\ldots,i_{k},j_{1},j_{2}%
,\ldots,j_{k}$ are distinct. In other words, the $2k$ numbers $i_{1}%
,j_{1},i_{2},j_{2},\ldots,i_{k},j_{k}$ are distinct. Thus, there is a
permutation $u\in S_{n}$ that sends the numbers $1,2,3,4,\ldots,2k-1,2k$ to
the numbers $i_{1},j_{1},i_{2},j_{2},\ldots,i_{k},j_{k}$, respectively. Pick
such a $u$. Thus, for each $s\in\left[  k\right]  $, we have%
\begin{equation}
i_{s}=u\left(  2s-1\right)  \ \ \ \ \ \ \ \ \ \ \text{and}%
\ \ \ \ \ \ \ \ \ \ j_{s}=u\left(  2s\right)  . \label{pf.thm.S=S.gen.ul}%
\end{equation}


Now, we shall show that%
\[
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}=u\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }.
\]
Indeed, (\ref{eq.gelfand.def}) shows that%
\begin{align*}
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}  &  =\sum
_{\substack{w\in S_{n};\\w\left(  i_{s}\right)  <w\left(  j_{s}\right)  \text{
for all }s\in\left[  k\right]  }}w^{-1}\\
&  =\sum_{\substack{w\in S_{n};\\w\left(  u\left(  2s-1\right)  \right)
<w\left(  u\left(  2s\right)  \right)  \text{ for all }s\in\left[  k\right]
}}w^{-1}\ \ \ \ \ \ \ \ \ \ \left(  \text{by (\ref{pf.thm.S=S.gen.ul})}\right)
\\
&  =\underbrace{\sum_{\substack{w\in S_{n};\\\left(  wu^{-1}\right)  \left(
u\left(  2s-1\right)  \right)  <\left(  wu^{-1}\right)  \left(  u\left(
2s\right)  \right)  \text{ for all }s\in\left[  k\right]  }}}_{\substack{=\sum
_{\substack{w\in S_{n};\\w\left(  2s-1\right)  <w\left(  2s\right)  \text{ for
all }s\in\left[  k\right]  }}\\\text{(since }\left(  wu^{-1}\right)  \left(
u\left(  2s-1\right)  \right)  =w\left(  2s-1\right)  \\\text{and }\left(
wu^{-1}\right)  \left(  u\left(  2s\right)  \right)  =w\left(  2s\right)
\text{)}}}\underbrace{\left(  wu^{-1}\right)  ^{-1}}_{=uw^{-1}}\\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(  \text{here, we have
substituted }wu^{-1}\text{ for }w\text{ in the sum}\right) \\
&  =\sum_{\substack{w\in S_{n};\\w\left(  2s-1\right)  <w\left(  2s\right)
\text{ for all }s\in\left[  k\right]  }}uw^{-1}=u\underbrace{\sum
_{\substack{w\in S_{n};\\w\left(  2s-1\right)  <w\left(  2s\right)  \text{ for
all }s\in\left[  k\right]  }}w^{-1}}_{\substack{=\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }\\\text{(by (\ref{pf:prop:noninv:1}))}}}\\
&  =u\mathcal{X}_{\left(  2^{k},1^{n-2k}\right)  }.
\end{align*}


Applying the antipode map $a\mapsto a^{\ast}$ to this equality (recalling that
this map is an anti-algebra morphism), we obtain
\[
G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}=\mathcal{X}%
_{\left(  2^{k},1^{n-2k}\right)  }^{\ast}\underbrace{u^{\ast}}%
_{\substack{=u^{-1}\\\text{(since }u\in S_{n}\text{)}}}=\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }^{\ast}u^{-1}.
\]
Hence,%
\begin{align*}
&  \frac{1}{k!\left(  n-2k\right)  !}\underbrace{G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}}_{=\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }^{\ast}u^{-1}}\underbrace{G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}}_{=u\mathcal{X}_{\left(  2^{k}%
,1^{n-2k}\right)  }}\\
&  =\frac{1}{k!\left(  n-2k\right)  !}\mathcal{X}_{\left(  2^{k}%
,1^{n-2k}\right)  }^{\ast}\underbrace{u^{-1}u}_{=1}\mathcal{X}_{\left(
2^{k},1^{n-2k}\right)  }=\frac{1}{k!\left(  n-2k\right)  !}\mathcal{X}%
_{\left(  2^{k},1^{n-2k}\right)  }^{\ast}\mathcal{X}_{\left(  2^{k}%
,1^{n-2k}\right)  }\\
&  =\mathcal{S}_{n,k}\ \ \ \ \ \ \ \ \ \ \left(  \text{by (\ref{eq.thm.S=S.1}%
)}\right)  .
\end{align*}
This proves Theorem \ref{thm.S=G*G} \textbf{(b)}. \medskip

\textbf{(a)} Proposition \ref{prop.X=G} yields that%
\[
\mathcal{X}_{\alpha}=G_{i_{1},i_{2},\ldots,i_{k};\ j_{1},j_{2},\ldots,j_{k}}%
\]
for an appropriate choice of $2k$ distinct elements $i_{1},i_{2},\ldots
,i_{k},j_{1},j_{2},\ldots,j_{k}\in\left[  n\right]  $. Consider these $2k$
elements. Then, using $\mathcal{X}_{\alpha}=G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}$, we obtain%
\begin{align*}
\frac{1}{k!\left(  n-2k\right)  !}\mathcal{X}_{\alpha}^{\ast}\mathcal{X}%
_{\alpha}  &  =\frac{1}{k!\left(  n-2k\right)  !}G_{i_{1},i_{2},\ldots
,i_{k};\ j_{1},j_{2},\ldots,j_{k}}^{\ast}G_{i_{1},i_{2},\ldots,i_{k}%
;\ j_{1},j_{2},\ldots,j_{k}}\\
&  =\mathcal{S}_{n,k}\ \ \ \ \ \ \ \ \ \ \left(  \text{by Theorem
\ref{thm.S=G*G} \textbf{(b)}}\right)  .
\end{align*}
This proves Theorem \ref{thm.S=G*G} \textbf{(a)}.
\end{proof}

\subsection{Commutativity and integrality of eigenvalues}

We can now prove Theorem \ref{thm.dyadic.comm} and Theorem
\ref{thm.dyadic.int}:

\begin{proof}
[Proof of Theorem \ref{thm.dyadic.comm}.]Theorem \ref{thm.G.gelfand} shows
that the left ideal $\mathcal{G}$ of $\mathcal{A}$ is a Gelfand model of
$S_{n}$. In particular, it is thus multiplicity-free. Hence, Theorem
\ref{thm.mf3} (applied to $J=\mathcal{G}$) yields $\left[  \mathcal{G}^{\ast
}\mathcal{G},\ \mathcal{G}^{\ast}\mathcal{G}\right]  =0$. In other words, any
two elements of $\mathcal{G}^{\ast}\mathcal{G}$ commute. Since
(\ref{eq.cor.SnkinG*G.S}) shows that all of $\mathcal{S}_{n,0},\mathcal{S}%
_{n,1},\mathcal{S}_{n,2},\ldots$ are elements of $\mathcal{G}^{\ast
}\mathcal{G}$, we thus conclude that $\mathcal{S}_{n,0},\mathcal{S}%
_{n,1},\mathcal{S}_{n,2},\ldots$ pairwise commute. This proves Theorem
\ref{thm.dyadic.comm}.
\end{proof}

Various variants of Theorem \ref{thm.dyadic.comm} can be proved in the same
way: for instance, we conclude that if $\alpha,\beta,\gamma,\delta$ are four
compositions of $n$ that consist only of $1$'s and $2$'s, then $\mathcal{X}%
_{\alpha}^{\ast}\mathcal{X}_{\beta}$ and $\mathcal{X}_{\gamma}^{\ast
}\mathcal{X}_{\delta}$ commute. We can also use Theorem \ref{thm.mf2} to
conclude that $\mathcal{X}_{\alpha}\left[  \mathcal{X}_{\beta},\mathcal{X}%
_{\gamma}\right]  =0$ and $\mathcal{S}_{n,k}\left[  \mathcal{X}_{\alpha
},\mathcal{X}_{\beta}\right]  =0$ and so on.

\begin{proof}
[Proof of Theorem \ref{thm.dyadic.int}.]Let $k\in\mathbb{N}$. Theorem
\ref{thm.G.gelfand} shows that the left ideal $\mathcal{G}$ of $\mathcal{A}$
is a Gelfand model of $S_{n}$. In particular, it is thus multiplicity-free.
But (\ref{eq.cor.SnkinG*G.S}) yields $\mathcal{S}_{n,k}\in\mathcal{G}^{\ast
}\mathcal{G}\subseteq\mathcal{G}$ (since $\mathcal{G}$ is a left ideal of
$\mathcal{A}$). Therefore, Theorem \ref{thm.minpol-factors} (applied to
$J=\mathcal{G}$ and $a=\mathcal{S}_{n,k}$) shows that there exists a multiset
$Z_{a}$ of elements of $\mathbf{k}$ such that $\prod_{\lambda\in Z_{a}}\left(
\mathcal{S}_{n,k}-\lambda\right)  =0$.

But we want a multiset of integers, not a multiset of elements of $\mathbf{k}%
$. For this, we use a standard trick: Apply the argument above to
$\mathbf{k}=\mathbb{Q}$; then, $Z_{a}$ is a multiset of elements of
$\mathbb{Q}$. In other words, the left action of $\mathcal{S}_{n,k}$ on
$\mathbb{Q}\left[  S_{n}\right]  $ is a linear endomorphism whose minimal
polynomial factors into linear factors over $\mathbb{Q}$. That is, all
eigenvalues of this endomorphism belong to $\mathbb{Q}$. But this endomorphism
is defined over $\mathbb{Z}$, so it is represented by an integer matrix, and
thus its eigenvalues (being the roots of the characteristic polynomial of this
matrix) must be integral over $\mathbb{Z}$. Thus, these eigenvalues belong to
$\mathbb{Q}$ but are integral over $\mathbb{Z}$. The only such numbers are
integers. Hence, these eigenvalues are integers. Denoting their multiset (with
their multiplicities) by $Z_{k}$, we then conclude that $Z_{k}$ is a multiset
of integers satisfying $\prod_{\lambda\in Z_{k}}\left(  \mathcal{S}%
_{n,k}-\lambda\right)  =0$. This holds over $\mathbb{Q}$, thus also over
$\mathbb{Z}$, and thus (by base change) over all $\mathbf{k}$. This proves
Theorem \ref{thm.dyadic.int}.
\end{proof}

\subsection{The bottom-to-random connection}

Let us set%
\[
\mathcal{B}_{n}:=\mathcal{X}_{\left(  n-1,1\right)  }%
\ \ \ \ \ \ \ \ \ \ \text{for any }n\geq1
\]
(and $\mathcal{B}_{0}:=0$). This element $\mathcal{B}_{n}$ is known as the
\emph{bottom-to-random shuffle}, and is part of a family of shuffles studied
in \cite{BCGS25} and references therein.

\begin{remark}
\label{rmk:Bn=sumz}For each $j\in\left[  n\right]  $, let $z_{j}\in S_{n}$ be
the cycle $\left(  n,n-1,\ldots,j\right)  $ (which is $\operatorname{id}$ when
$j=n$). Then,
\begin{equation}
\mathcal{B}_{n}=\mathcal{X}_{\left(  n-1,1\right)  }=z_{1}+z_{2}+\cdots+z_{n}.
\label{eq:rmk:Bn=sumz:eq}%
\end{equation}

\end{remark}

\begin{proof}
We have $D\left(  \left(  n-1,1\right)  \right)  =\left\{  n-1\right\}  $.
Hence, the permutations $w\in S_{n}$ satisfying $\operatorname*{Des}w\subseteq
D\left(  \left(  n-1,1\right)  \right)  $ are the permutations $w\in S_{n}$
whose only descent (if they have any) is $n-1$. But these permutations are
exactly the cycles $\left(  j,j+1,\ldots,n\right)  $ for $j\in\left[
n\right]  $, and their inverses are the cycles $\left(  n,n-1,\ldots,j\right)
=z_{j}$ for $j\in\left[  n\right]  $. Hence, $\mathcal{X}_{\left(
n-1,1\right)  }=z_{1}+z_{2}+\cdots+z_{n}$ follows from the definition of
$\mathcal{X}_{\left(  n-1,1\right)  }$.
\end{proof}

Let us set $\mathcal{S}_{n,-1}:=0$. We now claim the following:

\begin{theorem}
\label{thm.SB}Let $k\in\mathbb{N}$. Then,%
\begin{align}
\binom{n-2\left(  k-1\right)  }{2}\mathcal{S}_{n,k-1}  &  =\mathcal{S}%
_{n,k}\left(  \mathcal{B}_{n}-\left(  n-2k\right)  \right)
\label{pf.thm.SB.goal}\\
&  =\left(  \mathcal{B}_{n}^{\ast}-\left(  n-2k\right)  \right)
\mathcal{S}_{n,k}. \label{pf.thm.SB.goal*}%
\end{align}

\end{theorem}

\begin{proof}
The antipode map $a\mapsto a^{\ast}$ fixes the elements $\mathcal{S}_{n,k}$
and $\mathcal{S}_{n,k-1}$ (by Proposition \ref{prop.S-inv}), but is an
anti-algebra isomorphism. Hence, applying it to the equality
(\ref{pf.thm.SB.goal}) yields the equality (\ref{pf.thm.SB.goal*}), and vice
versa (since $a^{\ast\ast}=a$ for each $a\in\mathcal{A}$). Therefore, it will
suffice to prove (\ref{pf.thm.SB.goal*}).

For any $i\in\mathbb{Z}$, let $\mathcal{M}_{i}\left(  \left[  n\right]
\right)  $ denote the set of all $i$-matchings of $\left[  n\right]  $. We
shall use Iverson bracket notation for truth values (that is, if $\mathcal{A}$
is a statement, then $\left[  \mathcal{A}\right]  $ shall be the integer $1$
if $\mathcal{A}$ is true and the integer $0$ otherwise); thus, each $w\in
S_{n}$ satisfies%
\begin{equation}
\operatorname{incmat}_{k}\left(  w\right)  =\sum_{P\in\mathcal{M}_{k}\left(
\left[  n\right]  \right)  }\left[  w\text{ increases on }P\right]
\label{pf.thm.SB.1}%
\end{equation}
(by the definition of $\operatorname{incmat}_{k}\left(  w\right)  $). Hence,
the definition of $\mathcal{S}_{n,k}$ becomes%
\begin{align}
\mathcal{S}_{n,k}  &  =\sum_{w\in S_{n}}\operatorname{incmat}_{k}\left(
w\right)  w\nonumber\\
&  =\sum_{w\in S_{n}}\ \ \sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]
\right)  }\left[  w\text{ increases on }P\right]  w\ \ \ \ \ \ \ \ \ \ \left(
\text{by (\ref{pf.thm.SB.1})}\right) \nonumber\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }P\right]  w.
\label{pf.thm.SB.2}%
\end{align}
Also, applying the antipode map $a\mapsto a^{\ast}$ to
(\ref{eq:rmk:Bn=sumz:eq}), we obtain
\[
\mathcal{B}_{n}^{\ast}=z_{1}^{-1}+z_{2}^{-1}+\cdots+z_{n}^{-1}=\sum
_{j\in\left[  n\right]  }z_{j}^{-1}.
\]
These two equalities yield%
\begin{align}
&  \left(  \mathcal{B}_{n}^{\ast}-\left(  n-2k\right)  \right)  \mathcal{S}%
_{n,k}\nonumber\\
&  =\left(  \sum_{j\in\left[  n\right]  }z_{j}^{-1}-\left(  n-2k\right)
\right)  \sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }P\right]  w\nonumber\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }P\right]  \left(
\sum_{j\in\left[  n\right]  }z_{j}^{-1}-\left(  n-2k\right)  \right)
w\nonumber\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{j\in\left[  n\right]  }\ \ \sum_{w\in S_{n}}\left[  w\text{
increases on }P\right]  z_{j}^{-1}w\nonumber\\
&  \ \ \ \ \ \ \ \ \ \ -\left(  n-2k\right)  \sum_{P\in\mathcal{M}_{k}\left(
\left[  n\right]  \right)  }\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on
}P\right]  w\nonumber\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{j\in\left[  n\right]  }\ \ \sum_{w\in S_{n}}\left[  z_{j}w\text{
increases on }P\right]  w\nonumber\\
&  \ \ \ \ \ \ \ \ \ \ -\left(  n-2k\right)  \sum_{P\in\mathcal{M}_{k}\left(
\left[  n\right]  \right)  }\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on
}P\right]  w \label{pf.thm.SB.3}%
\end{align}
(here, we have substituted $z_{j}w$ for $w$ in the third sum).

On the other hand, applying (\ref{pf.thm.SB.2}) to $k-1$ instead of $k$, we
find%
\[
\mathcal{S}_{n,k-1}=\sum_{P\in\mathcal{M}_{k-1}\left(  \left[  n\right]
\right)  }\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }P\right]  w.
\]
\footnote{This equality holds even when $k=0$, since both of its sides are $0$
in this case (because $\mathcal{S}_{n,-1}=0$ and because $\mathcal{M}%
_{-1}\left(  \left[  n\right]  \right)  =\varnothing$).} Multiplying this
equality by $\dbinom{n-2\left(  k-1\right)  }{2}$, we obtain%
\begin{align}
&  \binom{n-2\left(  k-1\right)  }{2}\mathcal{S}_{n,k-1}\nonumber\\
&  =\sum_{P\in\mathcal{M}_{k-1}\left(  \left[  n\right]  \right)  }%
\binom{n-2\left(  k-1\right)  }{2}\sum_{w\in S_{n}}\left[  w\text{ increases
on }P\right]  w\nonumber\\
&  =\sum_{Q\in\mathcal{M}_{k-1}\left(  \left[  n\right]  \right)  }%
\binom{n-2\left(  k-1\right)  }{2}\sum_{w\in S_{n}}\left[  w\text{ increases
on }Q\right]  w. \label{pf.thm.SB.4a}%
\end{align}
However, the map%
\begin{align*}
\left\{  \left(  P,e\right)  \ \mid\ P\in\mathcal{M}_{k}\left(  \left[
n\right]  \right)  \text{ and }e\in P\right\}   &  \rightarrow\mathcal{M}%
_{k-1}\left(  \left[  n\right]  \right)  ,\\
\left(  P,e\right)   &  \mapsto P\setminus\left\{  e\right\}
\end{align*}
(in common language: remove an edge from a $k$-matching to obtain a $\left(
k-1\right)  $-matching) is a $\dbinom{n-2\left(  k-1\right)  }{2}$-to-$1$ map
(i.e., each $Q\in\mathcal{M}_{k-1}\left(  \left[  n\right]  \right)  $ has
exactly $\dbinom{n-2\left(  k-1\right)  }{2}$ many preimages under this
map)\footnote{To prove this, we need to check that for each $Q\in
\mathcal{M}_{k-1}\left(  \left[  n\right]  \right)  $, there are exactly
$\dbinom{n-2(k-1)}{2}$ many pairs $\left(  P,e\right)  $ satisfying
$P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  $ and $e\in P$ and
$P\setminus\left\{  e\right\}  =Q$. But this is clear: Any such pair is
obtained by picking an edge $e$ that is disjoint from all the $k-1$ edges of
$Q$ (this can be done in $\dbinom{n-2(k-1)}{2}$ many ways, since there are
$n-2\left(  k-1\right)  $ elements to choose the elements of $e$ from), and
setting $P=Q\cup\left\{  e\right\}  $ (this can be done in only one way).}.
Hence, for any $w\in S_{n}$, we have%
\begin{align*}
&  \sum_{Q\in\mathcal{M}_{k-1}\left(  \left[  n\right]  \right)  }%
\binom{n-2\left(  k-1\right)  }{2}\sum_{w\in S_{n}}\left[  w\text{ increases
on }Q\right]  w\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{e\in P}\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }%
P\setminus\left\{  e\right\}  \right]  w.
\end{align*}


Thus, (\ref{pf.thm.SB.4a}) rewrites as
\begin{align}
&  \binom{n-2\left(  k-1\right)  }{2}\mathcal{S}_{n,k-1}\nonumber\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{e\in P}\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }%
P\setminus\left\{  e\right\}  \right]  w. \label{pf.thm.SB.4}%
\end{align}


In view of (\ref{pf.thm.SB.3}) and (\ref{pf.thm.SB.4}), we can rewrite our
goal (\ref{pf.thm.SB.2}) as%
\begin{align}
&  \sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }\ \ \sum
_{j\in\left[  n\right]  }\ \ \sum_{w\in S_{n}}\left[  z_{j}w\text{ increases
on }P\right]  w\nonumber\\
&  \ \ \ \ \ \ \ \ \ \ -\left(  n-2k\right)  \sum_{P\in\mathcal{M}_{k}\left(
\left[  n\right]  \right)  }\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on
}P\right]  w\nonumber\\
&  =\sum_{P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  }%
\ \ \sum_{e\in P}\ \ \sum_{w\in S_{n}}\left[  w\text{ increases on }%
P\setminus\left\{  e\right\}  \right]  w. \label{pf.thm.SB.2rewr}%
\end{align}
We shall prove this \textquotedblleft addend by addend\textquotedblright;
i.e., we shall show that every $P\in\mathcal{M}_{k}\left(  \left[  n\right]
\right)  $ and every $w\in S_{n}$ satisfy
\begin{align}
&  \sum_{j\in\left[  n\right]  }\left[  z_{j}w\text{ increases on }P\right]
-\left(  n-2k\right)  \left[  w\text{ increases on }P\right] \nonumber\\
&  =\sum_{e\in P}\left[  w\text{ increases on }P\setminus\left\{  e\right\}
\right]  . \label{pf.thm.SB.5}%
\end{align}
Once this is shown, multiplying this equality by $w$ and summing it over all
$P\in\mathcal{M}_{k}\left(  \left[  n\right]  \right)  $ and $w\in S_{n}$ will
yield the desired (\ref{pf.thm.SB.2rewr}).

Thus, let us now show (\ref{pf.thm.SB.5}). Let $P\in\mathcal{M}_{k}\left(
\left[  n\right]  \right)  $ and $w\in S_{n}$. Then, it is easy to see that%
\[
\left[  w\text{ increases on }P\right]  =\prod_{f\in P}\left[  w\text{
increases on }f\right]
\]
and likewise%
\begin{align*}
\left[  w\text{ increases on }P\setminus\left\{  e\right\}  \right]   &
=\prod_{f\in P\setminus\left\{  e\right\}  }\left[  w\text{ increases on
}f\right] \\
&  \ \ \ \ \ \ \ \ \ \ \text{for every }e\in P,
\end{align*}
and furthermore, each $j\in\left[  n\right]  $ satisfies%
\begin{align}
&  \left[  z_{j}w\text{ increases on }P\right] \nonumber\\
&  =\prod_{f\in P}\left[  z_{j}w\text{ increases on }f\right] \nonumber\\
&  =\prod_{f\in P}%
\begin{cases}
\left[  w\text{ increases on }f\right]  , & \text{if }j\notin w\left(
f\right)  ;\\
1, & \text{if }j=w\left(  \max f\right)  ;\\
0, & \text{if }j=w\left(  \min f\right)
\end{cases}
\label{pf.thm.SB.8}%
\end{align}
(the last equality sign requires a moment of thought\footnote{\textit{Proof:}
We just need to show that%
\[
\left[  z_{j}w\text{ increases on }f\right]  =%
\begin{cases}
\left[  w\text{ increases on }f\right]  , & \text{if }j\notin w\left(
f\right)  ;\\
1, & \text{if }j=w\left(  \max f\right)  ;\\
0, & \text{if }j=w\left(  \min f\right)
\end{cases}
\]
for each $j\in\left[  n\right]  $ and $f\in P$. So let $j\in\left[  n\right]
$ and $f\in P$. Write the edge $f$ as $\left\{  p<q\right\}  $. Hence, the
permutation $w\in S_{n}$ increases on $f$ if and only if $w\left(  p\right)
<w\left(  q\right)  $. Likewise, $z_{j}w$ increases on $f$ if and only if
$\left(  z_{j}w\right)  \left(  p\right)  <\left(  z_{j}w\right)  \left(
q\right)  $, that is, $z_{j}\left(  w\left(  p\right)  \right)  <z_{j}\left(
w\left(  q\right)  \right)  $. Now, we are in one of the following three
cases:
\par
\begin{itemize}
\item \textit{Case 1:} We have $j\notin w\left(  f\right)  $. In this case, we
have $j\notin w\left(  f\right)  =\left\{  w\left(  p\right)  ,w\left(
q\right)  \right\}  $ (since $f=\left\{  p,q\right\}  $), so that $w\left(
p\right)  ,w\left(  q\right)  \in\left[  n\right]  \setminus\left\{
j\right\}  $. Hence, the inequality $z_{j}\left(  w\left(  p\right)  \right)
<z_{j}\left(  w\left(  q\right)  \right)  $ is equivalent to $w\left(
p\right)  <w\left(  q\right)  $ (because since the permutation $z_{j}$ is
increasing on the set $\left[  n\right]  \setminus\left\{  j\right\}  $, which
contains both $w\left(  p\right)  $ and $w\left(  q\right)  $). In other
words, the permutation $z_{j}w$ increases on $f$ if and only if the
permutation $w$ does. In other words,%
\[
\left[  z_{j}w\text{ increases on }f\right]  =\left[  w\text{ increases on
}f\right]  .
\]
\par
\item \textit{Case 2:} We have $j=w\left(  \max f\right)  $. But $f=\left\{
p<q\right\}  $ entails $q=\max f$, so that $w\left(  q\right)  =w\left(  \max
f\right)  =j$ and therefore $z_{j}\left(  w\left(  q\right)  \right)
=z_{j}\left(  j\right)  =n$. But $p\neq q$ and thus $z_{j}\left(  w\left(
p\right)  \right)  \neq z_{j}\left(  w\left(  q\right)  \right)  =n$. Hence,
$z_{j}\left(  w\left(  p\right)  \right)  <n=z_{j}\left(  w\left(  q\right)
\right)  $ holds automatically in this case. In other words, $z_{j}w$
increases on $f$ automatically. Hence,%
\[
\left[  z_{j}w\text{ increases on }f\right]  =1.
\]
\par
\item \textit{Case 3:} We have $j=w\left(  \min f\right)  $. In view of
$f=\left\{  p<q\right\}  $, this rewrites as $j=w\left(  p\right)  $. Hence,
$z_{j}\left(  w\left(  p\right)  \right)  =z_{j}\left(  j\right)  =n$ is
always larger than $z_{j}\left(  w\left(  q\right)  \right)  $ (since $p\neq
q$). In other words, $z_{j}w$ does not increase on $f$. Hence,%
\[
\left[  z_{j}w\text{ increases on }f\right]  =0.
\]
\end{itemize}
\par
Combining the results of these three cases, we obtain%
\[
\left[  z_{j}w\text{ increases on }f\right]  =%
\begin{cases}
\left[  w\text{ increases on }f\right]  , & \text{if }j\notin w\left(
f\right)  ;\\
1, & \text{if }j=w\left(  \max f\right)  ;\\
0, & \text{if }j=w\left(  \min f\right)  ,
\end{cases}
\]
qed.}). Note that the product in (\ref{pf.thm.SB.8}) either agrees entirely
with the product $\prod_{f\in P}\left[  w\text{ increases on }f\right]  $ or
differs from the latter product in only one factor, since $j=w\left(  \max
f\right)  $ or $j=w\left(  \min f\right)  $ can happen only for one edge $f\in
P$.

Now, we have three possible cases:

\begin{itemize}
\item \textit{Case 1:} The permutation $w$ increases on fewer than $k-1$ edges
of $P$. In this case, the equality (\ref{pf.thm.SB.5}) is true since all the
truth values in it are $0$ (this includes $\left[  z_{j}w\text{ increases on
}P\right]  $, since the product in (\ref{pf.thm.SB.8}) agrees with the product
$\prod_{f\in P}\left[  w\text{ increases on }f\right]  $ in all but at most
one factors, but at least two factors of the latter product are $0$).

\item \textit{Case 2:} The permutation $w$ increases on exactly $k-1$ edges of
$P$. In this case, let $g$ be the unique edge of $P$ on which $w$ does not
increase. Then, (\ref{pf.thm.SB.8}) shows that $\left[  z_{j}w\text{ increases
on }P\right]  $ equals $1$ for $j=w\left(  \max g\right)  $ and equals $0$ for
all other $j$'s. Hence, the equality (\ref{pf.thm.SB.5}) rewrites as%
\[
1-\left(  n-2k\right)  \cdot0=1,
\]
which is true.

\item \textit{Case 3:} The permutation $w$ increases on all $k$ edges of $P$.
In this case, (\ref{pf.thm.SB.8}) shows that $\left[  z_{j}w\text{ increases
on }P\right]  $ equals $1$ for $\left(  n-2k\right)  +k$ values of $j$
(namely, for all $j\in\left[  n\right]  $ that belong to no edge in $P$, as
well as for the $w\left(  \max f\right)  $ for all $f\in P$). Hence, the
equality (\ref{pf.thm.SB.5}) rewrites as%
\[
\left(  \left(  n-2k\right)  +k\right)  -\left(  n-2k\right)  \cdot1=k,
\]
which is true again.
\end{itemize}

So (\ref{pf.thm.SB.5}) has been proved, and thus (\ref{pf.thm.SB.goal*}) as
well. As we said above, this completes the proof of Theorem \ref{thm.SB}.
\end{proof}

\begin{corollary}
\label{cor.SB=BS}Let $k\in\mathbb{N}$. Then, $\mathcal{S}_{n,k}\mathcal{B}%
_{n}=\mathcal{B}_{n}^{\ast}\mathcal{S}_{n,k}$.
\end{corollary}

\begin{noncompile}
If $k=0$, then this is obvious (because in this case, $\mathcal{S}%
_{n,k}=\mathcal{S}_{n,0}=\sum_{w\in S_{n}}w$ and thus $\mathcal{S}%
_{n,k}u=u\mathcal{S}_{n,k}=\mathcal{S}_{n,k}$ for each $u\in S_{n}$). Thus, we
WLOG assume that $k$ is positive. Hence,
\end{noncompile}

\begin{proof}
Theorem \ref{thm.SB} yields $\mathcal{S}_{n,k}\left(  \mathcal{B}_{n}-\left(
n-2k\right)  \right)  =\left(  \mathcal{B}_{n}^{\ast}-\left(  n-2k\right)
\right)  \mathcal{S}_{n,k}$. Adding $\left(  n-2k\right)  \mathcal{S}_{n,k}$
to both sides of this equality, we obtain $\mathcal{S}_{n,k}\mathcal{B}%
_{n}=\mathcal{B}_{n}^{\ast}\mathcal{S}_{n,k}$. This proves Corollary
\ref{cor.SB=BS}.
\end{proof}

The following corollary was conjectured by Nadia Lafreni\`{e}re:

\begin{corollary}
\label{cor.S-nadia}Let $k\in\mathbb{N}$. Let $V$ be a left $\mathcal{A}%
$-module. Let $v\in V$ be such that $\mathcal{S}_{n,k-1}v=0$ and
$\mathcal{S}_{n,k}v=\mu v$ for some nonzero scalar $\mu\in\mathbf{k}$. Then,
$\mathcal{B}_{n}^{\ast}v=\left(  n-2k\right)  v$.
\end{corollary}

\begin{proof}
Theorem \ref{thm.SB} yields
\[
\left(  \mathcal{B}_{n}^{\ast}-\left(  n-2k\right)  \right)  \mathcal{S}%
_{n,k}=\binom{n-2\left(  k-1\right)  }{2}\mathcal{S}_{n,k-1}.
\]
Hence,%
\[
\left(  \mathcal{B}_{n}^{\ast}-\left(  n-2k\right)  \right)  \mathcal{S}%
_{n,k}v=\binom{n-2\left(  k-1\right)  }{2}\underbrace{\mathcal{S}_{n,k-1}%
v}_{=0}=0.
\]
Since $\mathcal{S}_{n,k}v=\mu v$, we can rewrite this as $\left(
\mathcal{B}_{n}^{\ast}-\left(  n-2k\right)  \right)  \mu v=0$. Dividing this
equality by $\mu$ (since $\mu$ is nonzero), we obtain $\left(  \mathcal{B}%
_{n}^{\ast}-\left(  n-2k\right)  \right)  v=0$, so that $\mathcal{B}_{n}%
^{\ast}v=\left(  n-2k\right)  v$.
\end{proof}

\subsection{Connection with $w_{0}$}

Let $w_{0}\in S_{n}$ be the permutation that sends $1,2,\ldots,n$ to
$n,n-1,\ldots,1$. Clearly, $w_{0}$ is an involution, i.e., we have $w_{0}%
^{-1}=w_{0}$. Moreover, the map $w_{0}$ is strictly decreasing. We claim the following:

\begin{proposition}
\label{prop.S-w0}Let $k\in\mathbb{N}$. Then, $w_{0}\mathcal{S}_{n,k}%
=\mathcal{S}_{n,k}w_{0}$.
\end{proposition}

\begin{proof}
This is equivalent to $w_{0}\mathcal{S}_{n,k}w_{0}^{-1}=\mathcal{S}_{n,k}$.
This, in turn, is equivalent to $\operatorname*{incmat}\nolimits_{k}\left(
w_{0}^{-1}uw_{0}\right)  =\operatorname*{incmat}\nolimits_{k}\left(  u\right)
$ for all $u\in S_{n}$ (by the definition of $\mathcal{S}_{n,k}$). So let us
prove the latter equality.

Fix $u\in S_{n}$. The symmetric group $S_{n}$ acts on the set $\left[
n\right]  $; hence it also acts on the set of edges (a permutation $w\in
S_{n}$ sends an edge $\left\{  i,j\right\}  $ to $w\left(  \left\{
i,j\right\}  \right)  :=\left\{  w\left(  i\right)  ,w\left(  j\right)
\right\}  $) and also acts on the set of $k$-matchings (a permutation $w\in
S_{n}$ sends a $k$-matching $M$ to the $k$-matching $w\left(  M\right)
:=\left\{  w\left(  e\right)  \ \mid\ e\in M\right\}  $).

We claim the following:

\begin{statement}
\textit{Claim 1:} Let $e$ be an edge. Then, $u$ increases on $e$ if and only
if $w_{0}^{-1}uw_{0}$ increases on $w_{0}\left(  e\right)  $.
\end{statement}

\begin{proof}
[Proof of Claim 1.]Write the edge $e$ as $e=\left\{  i<j\right\}  $. Then,
from $i<j$, we obtain $w_{0}\left(  i\right)  >w_{0}\left(  j\right)  $ (since
$w_{0}$ is strictly decreasing). Now, from $e=\left\{  i<j\right\}  $, we
obtain
\[
w_{0}\left(  e\right)  =\left\{  w_{0}\left(  i\right)  ,\ w_{0}\left(
j\right)  \right\}  =\left\{  w_{0}\left(  j\right)  <w_{0}\left(  i\right)
\right\}
\]
(since $w_{0}\left(  i\right)  >w_{0}\left(  j\right)  $). Hence, we have the
following chain of equivalences:%
\begin{align*}
\  &  \ \left(  \underbrace{w_{0}^{-1}}_{=w_{0}}u\underbrace{w_{0}}%
_{=w_{0}^{-1}}\text{ increases on }\underbrace{w_{0}\left(  e\right)
}_{=\left\{  w_{0}\left(  j\right)  <w_{0}\left(  i\right)  \right\}  }\right)
\\
&  \Longleftrightarrow\ \left(  w_{0}uw_{0}^{-1}\text{ increases on }\left\{
w_{0}\left(  j\right)  <w_{0}\left(  i\right)  \right\}  \right) \\
&  \Longleftrightarrow\ \left(  \underbrace{\left(  w_{0}uw_{0}^{-1}\right)
\left(  w_{0}\left(  j\right)  \right)  }_{=w_{0}\left(  u\left(  j\right)
\right)  }<\underbrace{\left(  w_{0}uw_{0}^{-1}\right)  \left(  w_{0}\left(
i\right)  \right)  }_{=w_{0}\left(  u\left(  i\right)  \right)  }\right) \\
&  \Longleftrightarrow\ \left(  w_{0}\left(  u\left(  j\right)  \right)
<w_{0}\left(  u\left(  i\right)  \right)  \right) \\
&  \Longleftrightarrow\ \left(  u\left(  j\right)  >u\left(  i\right)
\right)  \ \ \ \ \ \ \ \ \ \ \left(  \text{since }w_{0}\text{ is strictly
decreasing}\right) \\
&  \Longleftrightarrow\ \left(  u\left(  i\right)  <u\left(  j\right)  \right)
\\
&  \Longleftrightarrow\ \left(  u\text{ increases on }e\right)
\ \ \ \ \ \ \ \ \ \ \left(  \text{since }e=\left\{  i<j\right\}  \right)  .
\end{align*}
In other words, $\left(  u\text{ increases on }e\right)  \Longleftrightarrow
\left(  w_{0}^{-1}uw_{0}\text{ increases on }w_{0}\left(  e\right)  \right)
$. This proves Claim 1.
\end{proof}

\begin{statement}
\textit{Claim 2:} Let $M$ be a $k$-matching. Then, $u$ increases on $M$ if and
only if $w_{0}^{-1}uw_{0}$ increases on $w_{0}\left(  M\right)  $.
\end{statement}

\begin{proof}
[Proof of Claim 2.]We have the following chain of equivalences:%
\begin{align*}
&  \ \left(  u\text{ increases on }M\right) \\
&  \Longleftrightarrow\ \left(  u\text{ increases on }e\text{ for each }e\in
M\right) \\
&  \Longleftrightarrow\ \left(  w_{0}^{-1}uw_{0}\text{ increases on }%
w_{0}\left(  e\right)  \text{ for each }e\in M\right) \\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(  \text{by Claim 1}\right) \\
&  \Longleftrightarrow\ \left(  w_{0}^{-1}uw_{0}\text{ increases on each }f\in
w_{0}\left(  M\right)  \right) \\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since the edges }f\in w_{0}\left(  M\right) \\
\text{are precisely the }w_{0}\left(  e\right)  \text{ for }e\in M
\end{array}
\right) \\
&  \Longleftrightarrow\ \left(  w_{0}^{-1}uw_{0}\text{ increases on }%
w_{0}\left(  M\right)  \right)  .
\end{align*}
This proves Claim 2.
\end{proof}

Now, the definition of $\operatorname*{incmat}\nolimits_{k}\left(  w_{0}%
^{-1}uw_{0}\right)  $ yields%
\begin{align*}
&  \operatorname*{incmat}\nolimits_{k}\left(  w_{0}^{-1}uw_{0}\right) \\
&  =\left(  \text{\# of all }k\text{-matchings }M\text{ such that }w_{0}%
^{-1}uw_{0}\text{ increases on }M\right) \\
&  =\left(  \text{\# of all }k\text{-matchings }M\text{ such that }w_{0}%
^{-1}uw_{0}\text{ increases on }w_{0}\left(  M\right)  \right) \\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we substituted }w_{0}\left(  M\right)  \text{ for }M\text{,
since}\\
\text{the action of }w_{0}\text{ on }\left\{  k\text{-matchings}\right\}
\text{ is bijective}%
\end{array}
\right) \\
&  =\left(  \text{\# of all }k\text{-matchings }M\text{ such that }u\text{
increases on }M\right) \\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(  \text{by Claim 2}\right) \\
&  =\operatorname*{incmat}\nolimits_{k}\left(  u\right)
\ \ \ \ \ \ \ \ \ \ \left(  \text{by the definition of }\operatorname*{incmat}%
\nolimits_{k}\left(  u\right)  \right)  .
\end{align*}
This completes our proof.
\end{proof}

We can actually improve Proposition \ref{prop.S-w0}:

\begin{proposition}
\label{prop.S-w0sum}Let $k\in\mathbb{N}$. Then,
\[
w_{0}\mathcal{S}_{n,k}=\mathcal{S}_{n,k}w_{0}=\sum_{i=0}^{k}\left(  -1\right)
^{i}\dbinom{n-2i}{2k-2i}\dfrac{\left(  2k-2i\right)  !}{2^{k-i}\left(
k-i\right)  !}\mathcal{S}_{n,i}.
\]

\end{proposition}

\begin{proof}
We begin with some simple observations:

If $M$ is a $k$-matching, then any subset $I$ of $M$ is an $i$-matching, where
$i=\left\vert I\right\vert $. Moreover:

\begin{statement}
\textit{Claim 1:} Let $i\in\left\{  0,1,\ldots,k\right\}  $. Let $I$ be an
$i$-matching. Then, the number of all $k$-matchings $M$ satisfying $I\subseteq
M$ is $\dbinom{n-2i}{2k-2i}\dfrac{\left(  2k-2i\right)  !}{2^{k-i}\left(
k-i\right)  !}$.
\end{statement}

\begin{proof}
[Proof of Claim 1.]The $i$ edges of $I$ are disjoint (since $I$ is a
matching), and each of them has size $2$. Let $N_{I}$ be their union. Thus,
$N_{I}$ is a $2i$-element subset of $\left[  n\right]  $. Hence, $\left[
n\right]  \setminus N_{I}$ is an $\left(  n-2i\right)  $-element set.

In order to construct a $k$-matching $M$ satisfying $I\subseteq M$, we must
only choose its remaining $k-i$ edges (after the $i$ edges inherited from
$I$). These $k-i$ edges must be $k-i$ disjoint $2$-element subsets of $\left[
n\right]  \setminus N_{I}$ (since they must be disjoint from the edges in
$I$). In other words, they must form a $\left(  k-i\right)  $-matching of
$\left[  n\right]  \setminus N_{I}$. This requirement is both necessary and
sufficient. Hence,%
\begin{align*}
&  \left(  \text{\# of }k\text{-matchings }M\text{ satisfying }I\subseteq
M\right) \\
&  =\left(  \text{\# of }\left(  k-i\right)  \text{-matchings of }\left[
n\right]  \setminus N_{I}\right) \\
&  =\dbinom{n-2i}{2\left(  k-i\right)  }\dfrac{\left(  2\left(  k-i\right)
\right)  !}{2^{k-i}\left(  k-i\right)  !}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{by Proposition \ref{prop.match-count},}\\
\text{applied to }n-2i\text{, }k-i\text{ and }\left[  n\right]  \setminus
N_{I}\\
\text{instead of }n\text{, }k\text{ and }N
\end{array}
\right) \\
&  =\dbinom{n-2i}{2k-2i}\dfrac{\left(  2k-2i\right)  !}{2^{k-i}\left(
k-i\right)  !}.
\end{align*}
This proves Claim 1.
\end{proof}

\begin{statement}
\textit{Claim 2:} Let $w\in S_{n}$ be a permutation. Let $e$ be an edge. Then,
the permutation $w$ increases on $e$ if and only if the permutation $w_{0}w$
does not increase on $e$.
\end{statement}

\begin{proof}
[Proof of Claim 2.]Write the edge $e$ as $e=\left\{  i<j\right\}  $. Then,
$w_{0}\left(  e\right)  =w_{0}\left(  \left\{  i<j\right\}  \right)  =\left\{
w_{0}\left(  i\right)  >w_{0}\left(  j\right)  \right\}  $ (since $w_{0}$ is
strictly decreasing, so that $i<j$ entails $w_{0}\left(  i\right)
>w_{0}\left(  j\right)  $). Moreover, from $i<j$, we obtain $i\neq j$, so that
$w\left(  i\right)  \neq w\left(  j\right)  $ (since $w$ is a permutation and
thus injective). Now, we have the following chain of equivalences:%
\begin{align*}
&  \ \left(  w\text{ increases on }e\right) \\
&  \Longleftrightarrow\ \left(  w\left(  i\right)  <w\left(  j\right)
\right)  \ \ \ \ \ \ \ \ \ \ \left(  \text{since }e=\left\{  i<j\right\}
\right) \\
&  \Longleftrightarrow\ \left(  w\left(  i\right)  \leq w\left(  j\right)
\right)  \ \ \ \ \ \ \ \ \ \ \left(  \text{since }w\left(  i\right)  \neq
w\left(  j\right)  \right) \\
&  \Longleftrightarrow\ \left(  w_{0}\left(  w\left(  i\right)  \right)  \geq
w_{0}\left(  w\left(  j\right)  \right)  \right)  \ \ \ \ \ \ \ \ \ \ \left(
\text{since }w_{0}\text{ is strictly decreasing}\right) \\
&  \Longleftrightarrow\ \left(  \left(  w_{0}w\right)  \left(  i\right)
\geq\left(  w_{0}w\right)  \left(  j\right)  \right)
\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since }w_{0}\left(  w\left(  i\right)  \right)  =\left(  w_{0}w\right)
\left(  i\right) \\
\text{and }w_{0}\left(  w\left(  j\right)  \right)  =\left(  w_{0}w\right)
\left(  j\right)
\end{array}
\right) \\
&  \Longleftrightarrow\ \left(  \text{we don't have }\left(  w_{0}w\right)
\left(  i\right)  <\left(  w_{0}w\right)  \left(  j\right)  \right) \\
&  \Longleftrightarrow\ \left(  w_{0}w\text{ does not increase on }e\right)
\ \ \ \ \ \ \ \ \ \ \left(  \text{since }e=\left\{  i<j\right\}  \right)  .
\end{align*}
This proves Claim 2.
\end{proof}

However, for each $w\in S_{n}$, we have%
\begin{equation}
\operatorname*{incmat}\nolimits_{k}\left(  w\right)  =\sum_{\substack{M\text{
is a }k\text{-matching;}\\w\text{ increases on }M}}1
\label{pf.prop.S-w0sum.sum1}%
\end{equation}
(since a sum of $1$s equals the number of addends). Hence, the definition of
$\mathcal{S}_{n,k}$ becomes%
\begin{align}
\mathcal{S}_{n,k}  &  =\sum_{w\in S_{n}}\operatorname*{incmat}\nolimits_{k}%
\left(  w\right)  w=\sum_{w\in S_{n}}\ \ \sum_{\substack{M\text{ is a
}k\text{-matching;}\\w\text{ increases on }M}}1w\ \ \ \ \ \ \ \ \ \ \left(
\text{by (\ref{pf.prop.S-w0sum.sum1})}\right) \nonumber\\
&  =\sum_{M\text{ is a }k\text{-matching}}\ \ \sum_{\substack{w\in S_{n}\text{
increases}\\\text{on }M}}w. \label{pf.prop.S-w0sum.4}%
\end{align}
By the same argument, for any $i\in\mathbb{N}$, we have%
\begin{equation}
\mathcal{S}_{n,i}=\sum_{I\text{ is an }i\text{-matching}}\ \ \sum
_{\substack{w\in S_{n}\text{ increases}\\\text{on }I}}w.
\label{pf.prop.S-w0sum.4i}%
\end{equation}


However, multiplying the equality (\ref{pf.prop.S-w0sum.4}) by $w_{0}$ from
the left, we find%
\begin{align}
w_{0}\mathcal{S}_{n,k}  &  =w_{0}\sum_{M\text{ is a }k\text{-matching}%
}\ \ \sum_{\substack{w\in S_{n}\text{ increases}\\\text{on }M}}w\nonumber\\
&  =\sum_{M\text{ is a }k\text{-matching}}\ \ \sum_{\substack{w\in S_{n}\text{
increases}\\\text{on }M}}w_{0}w. \label{pf.prop.S-w0sum.5}%
\end{align}


However, for any $k$-matching $M$ and any $w\in S_{n}$, we have the chain of
equivalences%
\begin{align*}
&  \ \left(  w\text{ increases on }M\right) \\
&  \Longleftrightarrow\ \left(  \left(  w\text{ increases on }e\right)  \text{
for each edge }e\in M\right) \\
&  \Longleftrightarrow\ \left(  \left(  w_{0}w\text{ does not increase on
}e\right)  \text{ for each edge }e\in M\right)  \ \ \ \ \ \ \ \ \ \ \left(
\text{by Claim 2}\right) \\
&  \Longleftrightarrow\ \left(  w_{0}w\text{ increases on no edge }e\in
M\right)  .
\end{align*}
Thus, for each $k$-matching $M$, we have%
\begin{align*}
&  \sum_{\substack{w\in S_{n}\text{ increases}\\\text{on }M}}w_{0}w\\
&  =\sum_{\substack{w\in S_{n};\\w_{0}w\text{ increases on no edge }e\in
M}}w_{0}w\\
&  =\sum_{\substack{w\in S_{n}\text{ increases}\\\text{on no edge }e\in
M}}w\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we have substituted }w\text{ for }w_{0}w\\
\text{in the sum}%
\end{array}
\right) \\
&  =\sum_{I\subseteq M}\left(  -1\right)  ^{\left\vert I\right\vert }%
\sum_{\substack{w\in S_{n}\text{ increases}\\\text{on each edge }e\in
I}}w\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{by the Principle of Inclusion and Exclusion,}\\
\text{specifically \cite[Theorem 6.2.9]{21s}}\\
\text{with }M\text{ instead of }\left[  n\right]
\end{array}
\right) \\
&  =\sum_{I\subseteq M}\left(  -1\right)  ^{\left\vert I\right\vert }%
\sum_{\substack{w\in S_{n}\text{ increases}\\\text{on }I}%
}w\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since \textquotedblleft}w\text{ increases on each edge }e\in
I\text{\textquotedblright}\\
\text{is equivalent to \textquotedblleft}w\text{ increases on }%
I\text{\textquotedblright}%
\end{array}
\right) \\
&  =\sum_{i=0}^{k}\ \ \sum_{\substack{I\subseteq M;\\\left\vert I\right\vert
=i}}\left(  -1\right)  ^{\left\vert I\right\vert }\sum_{\substack{w\in
S_{n}\text{ increases}\\\text{on }I}}w
\end{align*}
(here, we have split up the first sum according to the value $\left\vert
I\right\vert $, since $\left\vert I\right\vert \in\left\{  0,1,\ldots
,k\right\}  $ for each $I\subseteq M$). Plugging this into
(\ref{pf.prop.S-w0sum.5}), we obtain%
\begin{align*}
w_{0}\mathcal{S}_{n,k}  &  =\sum_{M\text{ is a }k\text{-matching}}%
\ \ \sum_{i=0}^{k}\ \ \sum_{\substack{I\subseteq M;\\\left\vert I\right\vert
=i}}\left(  -1\right)  ^{\left\vert I\right\vert }\sum_{\substack{w\in
S_{n}\text{ increases}\\\text{on }I}}w\\
&  =\sum_{i=0}^{k}\ \ \underbrace{\sum_{M\text{ is a }k\text{-matching}%
}\ \ \sum_{\substack{I\subseteq M;\\\left\vert I\right\vert =i}}}%
_{\substack{=\sum_{I\text{ is an }i\text{-matching}}\ \ \sum
_{\substack{M\text{ is a }k\text{-matching;}\\I\subseteq M}}\\\text{(since an
}i\text{-element subset of a }k\text{-matching}\\\text{is always an
}i\text{-matching)}}}\underbrace{\left(  -1\right)  ^{\left\vert I\right\vert
}}_{\substack{=\left(  -1\right)  ^{i}\\\text{(since }\left\vert I\right\vert
=i\text{)}}}\sum_{\substack{w\in S_{n}\text{ increases}\\\text{on }I}}w\\
&  =\sum_{i=0}^{k}\ \ \sum_{I\text{ is an }i\text{-matching}}%
\ \ \underbrace{\sum_{\substack{M\text{ is a }k\text{-matching;}\\I\subseteq
M}}\left(  -1\right)  ^{i}}_{\substack{=\dbinom{n-2i}{2k-2i}\dfrac{\left(
2k-2i\right)  !}{2^{k-i}\left(  k-i\right)  !}\cdot\left(  -1\right)
^{i}\\\text{(since Claim 1 says that this sum has}\\\dbinom{n-2i}{2k-2i}%
\dfrac{\left(  2k-2i\right)  !}{2^{k-i}\left(  k-i\right)  !}\text{
addends,}\\\text{all of which equal }\left(  -1\right)  ^{i}\text{)}}%
}\sum_{\substack{w\in S_{n}\text{ increases}\\\text{on }I}}w\\
&  =\sum_{i=0}^{k}\dbinom{n-2i}{2k-2i}\dfrac{\left(  2k-2i\right)  !}%
{2^{k-i}\left(  k-i\right)  !}\cdot\left(  -1\right)  ^{i}\underbrace{\sum
_{I\text{ is an }i\text{-matching}}\ \ \sum_{\substack{w\in S_{n}\text{
increases}\\\text{on }I}}w}_{\substack{=\mathcal{S}_{n,i}\\\text{(by
(\ref{pf.prop.S-w0sum.4i}))}}}\\
&  =\sum_{i=0}^{k}\left(  -1\right)  ^{i}\dbinom{n-2i}{2k-2i}\dfrac{\left(
2k-2i\right)  !}{2^{k-i}\left(  k-i\right)  !}\mathcal{S}_{n,i}.
\end{align*}
Combining this with the equality $w_{0}\mathcal{S}_{n,k}=\mathcal{S}%
_{n,k}w_{0}$ from Proposition \ref{prop.S-w0}, we obtain the claim of
Proposition \ref{prop.S-w0sum}.
\end{proof}

\begin{thebibliography}{99999999}                                                                                         %


\bibitem[AdPoRo08]{AdPoRo08}%
\href{https://doi.org/10.1016/j.jalgebra.2008.03.030}{Ron M. Adin, Alexander
Postnikov, Yuval Roichman, \textit{Combinatorial Gelfand models}, Journal of
Algebra \textbf{320} (2008), Issue 3, pp. 1311--1325.}

\bibitem[AFBCCL24]{AFBCCL24}\href{https://arxiv.org/abs/2407.08644v2}{Ilani
Axelrod-Freed, Sarah Brauner, Judy Hsin-Hui Chiang, Patricia Commins, Veronica
Lang, \textit{Spectrum of random-to-random shuffling in the Hecke algebra},
arXiv:2407.08644v2.}

\bibitem[AguAra01]{AguAra01}\href{https://doi.org/10.1081/AGB-100002136}{J. L.
Aguado and J. O. Araujo, \textit{A Gel'fand model for the symmetric group},
Communications in Algebra \textbf{29} (4) (2001), pp. 1841--1851.}

\bibitem[BCGS25]{BCGS25}\href{https://arxiv.org/abs/2503.17580v3}{Sarah
Brauner, Patricia Commins, Darij Grinberg, Franco Saliola, \textit{The
q-deformed random-to-random family in the Hecke algebra}, arXiv:2503.17580v3.}

\bibitem[BleLau92]{BleLau92}%
\href{https://emis.dsd.sztaki.hu/journals/SLC/opapers/s29laue.html}{D.
Blessenohl and H. Laue, \textit{Algebraic combinatorics related to the free
Lie algebra}, S\'{e}minaire Lotharingien de Combinatoire, B29e (1992), 24 pp.}

\bibitem[DiaFul23]{DiaFul23}\href{https://doi.org/10.1090/mbk/146}{Persi
Diaconis, Jason Fulman, \textit{The Mathematics of Shuffling Cards}, AMS
2023.}

\bibitem[DieSal18]{DieSal18}%
\href{https://doi.org/10.1016/j.aim.2017.10.034}{Antonius B. Dieker, Franco V.
Saliola, \textit{Spectral analysis of random-to-random Markov chains},
Advances in Mathematics \textbf{323} (2018), pp. 427--485.}

\bibitem[EGHetc11]{EGHetc11}%
\href{http://www-math.mit.edu/~etingof/reprbook.pdf}{Pavel Etingof, Oleg
Golberg, Sebastian Hensel, Tiankai Liu, Alex Schwendner, Dmitry Vaintrob,
Elena Yudovina, \textit{Introduction to Representation Theory}, with
historical interludes by Slava Gerovitch, Student Mathematical Library
\textbf{59}, AMS 2011, updated version 2018}.{}

\bibitem[Fulton97]{Fulton97}%
\href{https://doi.org/10.1017/CBO9780511626241}{William Fulton, \textit{Young
Tableaux, With Applications to Representation Theory and Geometry, }London
Mathematical Society Student Texts \textbf{35}, Cambridge University Press
1997}.\newline See \url{https://mathoverflow.net/questions/456463} for errata.

\bibitem[GerSch87]{GerSch87}%
\href{https://doi.org/10.1016/0022-4049(87)90112-5}{M. Gerstenhaber, S. D.
Schack, \textit{A Hodge-type decomposition for commutative algebra
cohomology}, Journal of Pure and Applied Algebra \textbf{48} (1987), Issues
1--2, pp. 229--247.}

\bibitem[Grinbe21]{21s}Darij Grinberg, \textit{An Introduction to Algebraic
Combinatorics (Math 531, Winter 2024 lecture notes)}, 8 February 2025.\newline\url{http://www.cip.ifi.lmu.de/~grinberg/t/21s/lecs.pdf}

\bibitem[Grinbe24]{sga}\href{https://arxiv.org/abs/2507.20706v1}{Darij
Grinberg, \textit{An introduction to the symmetric group algebra [Math 701,
Spring 2024 lecture notes]}, 27 July 2025, arXiv:2507.20706v1.}\newline\url{https://www.cip.ifi.lmu.de/~grinberg/t/24s/sga.pdf}

\bibitem[Grinbe25]{s2b3}\href{https://arxiv.org/abs/2508.00752v2}{Darij
Grinberg, \textit{The representation theory of somewhere-to-below shuffles},
arXiv:2508.00752v2.}

\bibitem[Grinbe2X]{Gri-hook-talk}Darij Grinberg, \textit{The hook length
formula [talk slides]}, 2025-01-09.\newline\url{https://www.cip.ifi.lmu.de/~grinberg/t/24s/hooktalk.pdf}

\bibitem[KarPur23]{KarPur23}\href{https://arxiv.org/abs/2309.04645v1}{Steven
N. Karp, Kevin Purbhoo, \textit{Universal Pl\"{u}cker coordinates for the
Wronski map and positivity in real Schubert calculus}, arXiv:2309.04645v1.}

\bibitem[KodVer04]{KodVer04}\href{https://arxiv.org/abs/math/0402216v1}{Vijay
Kodiyalam, D.-N. Verma, \textit{A natural representation model for symmetric
groups}, arXiv:math/0402216v1.}

\bibitem[Lafren19]{Lafren}Nadia Lafreni\`{e}re, \textit{Valeurs propres des
op\'{e}rateurs de m\'{e}langes sym\'{e}tris\'{e}s}, thesis at UQAM, 2019,
\href{https://arxiv.org/abs/1912.07718v1}{1912.07718v1}.

\bibitem[Leeuwe95]{Leeuwe95}\href{https://doi.org/10.37236/1273}{Marc van
Leeuwen, \textit{The Robinson-Schensted and Sch\"{u}tzenberger algorithms, an
elementary approach}, The Electronic Journal of Combinatorics \textbf{3}
(1996), Issue 2 (1996), \#R15.}\newline A preprint is found at
\url{http://wwwmathlabo.univ-poitiers.fr/~maavl/pdf/foata-fest.pdf} .

\bibitem[ProSta98]{ProSta98}%
\href{https://doi.org/10.1006/jcta.1999.2967}{James Propp, Richard Stanley,
\textit{Domino tilings with barriers}, J. Combin. Theory Ser. A \textbf{87}
(1999) no. 2, pp. 347--356.}\newline A preprint is found at \url{https://arxiv.org/abs/math/9801067v3}

\bibitem[ReSaWe11]{RSW}\href{https://arxiv.org/abs/1102.2460v2}{Victor Reiner,
Franco Saliola, Volkmar Welker, \textit{Spectra of Symmetrized Shuffling
Operators}, arXiv:1102.2460v2}. Published in: Memoirs of the AMS \textbf{228} (2014).

\bibitem[Reuten93]{Reuten93}Christophe Reutenauer, \textit{Free Lie Algebras},
Oxford University Press 1993.

\bibitem[Sagan01]{Sagan01}%
\href{https://doi.org/10.1007/978-1-4757-6804-6}{Bruce Sagan, \textit{The
Symmetric Group}, Graduate Texts in Mathematics \textbf{203}, 2nd edition
2001.}\newline See
\url{https://users.math.msu.edu/users/bsagan/Books/Sym/errata.pdf} for errata.

\bibitem[Stanle23]{Stanley-EC2}%
\href{https://doi.org/10.1017/9781009262538}{Richard P. Stanley,
\textit{Enumerative Combinatorics, volume 2}, Second edition, Cambridge
University Press 2023.}\newline See \url{http://math.mit.edu/~rstan/ec/} for errata.

\bibitem[Weyl53]{Weyl53}Hermann Weyl, \textit{The Classical Groups: their
invariants and representations}, 2nd edition, Princeton University Press 1953.

\bibitem[Wildon18]{Wildon18}Mark Wildon, \textit{Representation theory of the
symmetric group}, 5 April 2018.\newline\url{https://www.ma.rhul.ac.uk/~uvah099/Maths/Sym/SymGroup2014.pdf}

\bibitem[Young77]{Young77}\href{https://doi.org/10.3138/9781487575625}{Alfred
Young, \textit{The Collected Papers of Alfred Young 1873--1940}, University of
Toronto Press 1977.}
\end{thebibliography}


\end{document}