\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{framed}
\usepackage{comment}
\usepackage{color}
\usepackage[breaklinks=True]{hyperref}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{needspace}
\usepackage{tabls}
\usepackage{tikz}
\usepackage{ytableau}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Monday, April 06, 2026 20:20:17}
%TCIDATA{SuppressPackageManagement}
%TCIDATA{<META NAME="GraphicsSave" CONTENT="32">}
%TCIDATA{<META NAME="SaveForMode" CONTENT="1">}
%TCIDATA{BibliographyScheme=Manual}
%TCIDATA{Language=American English}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\newcounter{exer}
\newcounter{exera}
\numberwithin{exer}{section}
\theoremstyle{definition}
\newtheorem{theo}{Theorem}[section]
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[theo]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{convention}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{Question}
\newenvironment{question}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{warning}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{exam}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exam}[#1]\begin{leftbar}}
{\end{leftbar}\end{exam}}
\newtheorem{exmp}[exer]{Exercise}
\newenvironment{exercise}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\newenvironment{statement}{\begin{quote}}{\end{quote}}
\newenvironment{fineprint}{\begin{small}}{\end{small}}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{convention}[1][Convention]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{question}[1][Question]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{warning}[1][Warning]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{teachingnote}[1][Teaching note]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\NOEXPAND{\symd}{\mathbin{\bigtriangleup}}
\fi
\let\sumnonlimits\sum
\let\prodnonlimits\prod
\let\cupnonlimits\bigcup
\let\capnonlimits\bigcap
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\renewcommand{\bigcup}{\cupnonlimits\limits}
\renewcommand{\bigcap}{\capnonlimits\limits}
\setlength\tablinesep{3pt}
\setlength\arraylinesep{3pt}
\setlength\extrarulesep{3pt}
\setlength\textheight{22.5cm}
\setlength\textwidth{14.8cm}
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\newcommand{\CC}{\mathbb{C}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\KK}{\mathbb{K}}
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\tup}[1]{\left( #1 \right)}
\newcommand{\ive}[1]{\left[ #1 \right]}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\mono}{\hookrightarrow}
\newcommand{\epi}{\twoheadrightarrow}
\newcommand{\iso}{\overset{\cong}{\to}}
\newcommand{\arinj}{\ar@{_{(}->}}
\newcommand{\arinjrev}{\ar@{^{(}->}}
\newcommand{\arsurj}{\ar@{->>}}
\newcommand{\arelem}{\ar@{|->}}
\newcommand{\arback}{\ar@{<-}}
\newcommand{\symd}{\mathbin{\bigtriangleup}}
\newcommand{\Ker}{\operatorname{Ker}}
\newcommand{\Coker}{\operatorname{Coker}}
\iffalse
\NOEXPAND{\enddocument}{\end{document}}
\fi
\usetikzlibrary{arrows.meta}
\usetikzlibrary{calc}
\usetikzlibrary{chains}
\usetikzlibrary{shapes}
\usetikzlibrary{decorations.pathmorphing}
\usetikzlibrary{lindenmayersystems}
\definecolor{darkgreen}{rgb}{0,.5,0}
\newtheoremstyle{plainsl}
{8pt plus 2pt minus 4pt}
{8pt plus 2pt minus 4pt}
{\slshape}
{0pt}
{\bfseries}
{.}
{5pt plus 1pt minus 1pt}
{}
\theoremstyle{plainsl}
\ihead{Note on the Young--Jucys--Murphy elements, version \today}
\ohead{page \thepage}
\cfoot{}
\begin{document}

\title{Note on the Young--Jucys--Murphy elements}
\author{Darij Grinberg}
\date{rough draft, \today}
\maketitle

\begin{abstract}
\textbf{Abstract.} We give a new proof that the $k$-th Young--Jucys--Murphy
element in the group algebra of $S_{n}$ is annihilated by the polynomial
$\prod_{i=-k+1}^{k-1}\left(  t-i\right)  $. This proof is inspired by Igor
Makhlin's proof on \href{https://mathoverflow.net/a/83493/}{MathOverflow
\#83150}, but uses no linear algebra whatsoever. At its core lies an algebraic
computation found with the help of LLMs.

\end{abstract}

This note is meant to give a new answer to
\href{https://mathoverflow.net/questions/83150}{MathOverflow question \#83150}
and an answer to \href{https://mathoverflow.net/questions/420318}{MathOverflow
question \#420318}.

\textbf{Acknowledgments.} The idea of this is note is owed to Igor Makhlin's
MathOverflow question and answer \cite{Makhlin-yjm}. The proof of Lemma
\ref{lem.xys} is based upon somewhat rough and inchoate suggestions of two
LLMs: GPT-5.4 and Claude Opus 4.6 (see below).

\section{The Young--Jucys--Murphy elements}

We let $\mathbb{N}$ denote the set $\left\{  0,1,2,\ldots\right\}  $. We fix
an $n\in\mathbb{N}$, and consider the symmetric group $S_{n}$; this group
consists of the permutations of the set $\left[  n\right]  :=\left\{
1,2,\ldots,n\right\}  $. Furthermore, let $\mathbf{k}$ be any commutative
ring, and let $\mathbf{k}\left[  S_{n}\right]  $ be the group algebra of
$S_{n}$ over $\mathbf{k}$.

For any two distinct elements $i,j$ of $\left[  n\right]  $, we let
$t_{i,j}\in S_{n}$ be the transposition that swaps $i$ with $j$. The
\emph{Young--Jucys--Murphy elements} $J_{1},J_{2},\ldots,J_{n}$ of
$\mathbf{k}\left[  S_{n}\right]  $ are defined by%
\[
J_{k}:=\sum_{i=1}^{k-1}t_{i,k}\in\mathbf{k}\left[  S_{n}\right]
\ \ \ \ \ \ \ \ \ \ \text{for each }k\in\left[  n\right]
\]
(so that $J_{1}=\left(  \text{empty sum}\right)  =0$). It is known that these
elements $J_{1},J_{2},\ldots,J_{n}$ commute (see, e.g., \cite[Theorem
3.4.2]{sga}, where they are denoted by $\mathbf{m}_{1},\mathbf{m}_{2}%
,\ldots,\mathbf{m}_{n}$). This fact has a very easy proof. The following fact
is also well-known (\cite[Theorem 3.4.5]{sga}), but less easy:

\begin{theorem}
\label{thm.yjm.poly=0}For each $k\in\left[  n\right]  $, we have
$\prod_{i=-k+1}^{k-1}\left(  J_{k}-i\right)  =0$.
\end{theorem}

When $\mathbf{k}$ is a field of characteristic $0$, this can be reinterpreted
as saying that the linear operator on $\mathbf{k}\left[  S_{n}\right]  $ given
by left multiplication by $J_{k}$ (that is, the operator sending each
$a\in\mathbf{k}\left[  S_{n}\right]  $ to $J_{k}a$) is diagonalizable, and
that all its eigenvalues are integers between $-k+1$ and $k-1$ (inclusive). In
this form, this theorem originates in Murphy's 1981 paper \cite{Murphy81}%
\footnote{He denotes the $J_{k}$ as $L_{k}$.}; indeed, he shows a stronger
result \cite[(3.8)]{Murphy81} which describes an eigenbasis of this operator
and matches the eigenvectors to the eigenvalues (which can be used to compute
their multiplicities). Unsurprisingly, his eigenbasis relies on the
irreducible representations of $S_{n}$ (the Specht modules). In a later paper
\cite[(5.2)]{Murphy92}, he states Theorem \ref{thm.yjm.poly=0} explicitly
(even in the more general setting of Hecke algebras). A more conservative
variant of Murphy's proof is given by Garsia in \cite[Theorem 2.13]{GarEge20}.
A different proof, also using representations, is given by Lascoux in
\cite[Lemma 43]{Lascou02}\footnote{He denotes the $J_{k}$ as $\xi_{k}$. Note
also that he misspells $\left(  x-n+1\right)  \cdots x\cdots\left(
x+n-1\right)  $ as $\left(  x-n\right)  \cdots x\cdots\left(  x+n\right)  $.}.
A different tradition of proofs of Theorem \ref{thm.yjm.poly=0}, more
self-contained but using some linear algebra, goes back to the work of
Okounkov and Vershik \cite[Theorem 5.1]{VerOko05} (from which the theorem
easily follows, even if it is not stated there); a particularly beautiful
example is Igor Makhlin's proof in \cite{Makhlin-yjm}. Yet, even this proof
trades the representation theory for a use of the spectral theorem (every
symmetric matrix over $\mathbb{R}$ is diagonalizable); while fairly basic, it
is a tool rather alien to the combinatorial essence of the theorem.

In this note, we will give a completely elementary proof of Theorem
\ref{thm.yjm.poly=0}, relying only on a tricky inductive computation.

\section{The antipode and the positivity trick}

Before we come to the proof, we state a simple lemma about the group algebra
of $S_{n}$ that will come surprisingly handy.

We say that a commutative ring $\mathbf{k}$ is \emph{formally real} if it has
the following property: If $m$ elements $a_{1},a_{2},\ldots,a_{m}$ of
$\mathbf{k}$ satisfy $a_{1}^{2}+a_{2}^{2}+\cdots+a_{m}^{2}=0$, then
$a_{1}=a_{2}=\cdots=a_{m}=0$. In other words, a commutative ring $\mathbf{k}$
is formally real if and only if a (finite) sum of squares of elements of
$\mathbf{k}$ is never $0$ unless all the elements being squared are $0$. For
instance, any (totally) ordered integral domain is formally real (because a
square $a^{2}$ in such a domain is always positive unless $a=0$, and a
nonempty sum of positive elements cannot be $0$). In particular, $\mathbb{Z}$
is formally real. This is all we need in the following.

\begin{noncompile}
The following is easy to see, but no longer needed:

\begin{lemma}
\label{lem.fr.1}Let $\mathbf{k}$ be a formally real commutative ring. Let $N$
be a positive integer. Let $a\in\mathbf{k}$ be such that $Na=0$. Then, $a=0$.
\end{lemma}

\begin{proof}
We have $\underbrace{a^{2}+a^{2}+\cdots+a^{2}}_{N\text{ times}}=Na^{2}%
=\underbrace{Na}_{=0}a=0$. Since $\mathbf{k}$ is formally real, this entails
that $a=a=\cdots=a=0$. Since $N>0$, this means that $a=0$. This proves Lemma
\ref{lem.fr.1}.
\end{proof}
\end{noncompile}

Next, we define a well-known map on the group algebra $\mathbf{k}\left[
S_{n}\right]  $.

We let $S:\mathbf{k}\left[  S_{n}\right]  \rightarrow\mathbf{k}\left[
S_{n}\right]  $ be the $\mathbf{k}$-linear map that sends each permutation
$w\in S_{n}$ to $w^{-1}$. This is well-defined, since $\left(  w\right)
_{w\in S_{n}}$ is a basis of the $\mathbf{k}$-module $\mathbf{k}\left[
S_{n}\right]  $. This map $S$ is called the \emph{antipode} of $\mathbf{k}%
\left[  S_{n}\right]  $.

The following is well-known and easy to check (\cite[Theorem 3.11.14]{sga}):

\begin{theorem}
\label{thm.S.auto}\textbf{(a)} The map $S:\mathbf{k}\left[  S_{n}\right]
\rightarrow\mathbf{k}\left[  S_{n}\right]  $ is a $\mathbf{k}$-algebra
anti-automorphism. \medskip

\textbf{(b)} It is furthermore an involution (i.e., it satisfies $S\circ
S=\operatorname*{id}$).
\end{theorem}

Furthermore, any transposition $t_{i,k}\in S_{n}$ is fixed under the antipode
$S$ (since the definition of $S$ yields $S\left(  t_{i,k}\right)
=t_{i,k}^{-1}=t_{i,k}$). Hence, any sum of transpositions in $\mathbf{k}%
\left[  S_{n}\right]  $ is also fixed under $S$ (since $S$ is $\mathbf{k}%
$-linear). But each Young--Jucys--Murphy element $J_{k}$ (for any $k\in\left[
n\right]  $) is a sum of transpositions $t_{i,k}$, and thus is fixed under $S$
(according to the previous sentence). In other words, we have proved the following:

\begin{lemma}
\label{lem.S.J}For any $k\in\left[  n\right]  $, we have $S\left(
J_{k}\right)  =J_{k}$.
\end{lemma}

Over a formally real commutative ring, we furthermore have the following:

\begin{lemma}
\label{lem.S.fr}Let $\mathbf{k}$ be a formally real commutative ring. Let
$a\in\mathbf{k}\left[  S_{n}\right]  $ be such that $S\left(  a\right)  \cdot
a=0$. Then, $a=0$.
\end{lemma}

\begin{proof}
Expand $a$ in the basis $\left(  w\right)  _{w\in S_{n}}$ of $\mathbf{k}%
\left[  S_{n}\right]  $. That is, write $a$ as
\[
a=\sum_{w\in S_{n}}a_{w}w\ \ \ \ \ \ \ \ \ \ \text{for some scalars }a_{w}%
\in\mathbf{k}.
\]
Then,
\begin{align*}
S\left(  a\right)   &  =S\left(  \sum_{w\in S_{n}}a_{w}w\right)  =\sum_{w\in
S_{n}}a_{w}\underbrace{S\left(  w\right)  }_{\substack{=w^{-1}\\\text{(by the
definition of }S\text{)}}}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since the map }S\\
\text{is }\mathbf{k}\text{-linear}%
\end{array}
\right) \\
&  =\sum_{w\in S_{n}}a_{w}w^{-1}=\sum_{v\in S_{n}}a_{v}v^{-1}.
\end{align*}
Multiplying this equality with the equality $a=\sum_{w\in S_{n}}a_{w}w$, we
obtain%
\begin{align*}
S\left(  a\right)  \cdot a  &  =\left(  \sum_{v\in S_{n}}a_{v}v^{-1}\right)
\left(  \sum_{w\in S_{n}}a_{w}w\right)  =\sum_{v\in S_{n}}\ \ \sum_{w\in
S_{n}}\underbrace{a_{v}v^{-1}a_{w}w}_{=a_{v}a_{w}v^{-1}w}\\
&  =\sum_{v\in S_{n}}\ \ \sum_{w\in S_{n}}a_{v}a_{w}v^{-1}w=\sum_{v\in S_{n}%
}\ \ \sum_{u\in S_{n}}a_{v}a_{vu}\underbrace{v^{-1}vu}_{=u}\\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we have substituted }vu\text{ for }w\text{ in the inner sum,}\\
\text{since the map }S_{n}\rightarrow S_{n},\ u\mapsto vu\text{ (for given
}v\in S_{n}\text{)}\\
\text{is a bijection (because }S_{n}\text{ is a group)}%
\end{array}
\right) \\
&  =\sum_{v\in S_{n}}\ \ \sum_{u\in S_{n}}a_{v}a_{vu}u=\sum_{u\in S_{n}%
}\left(  \sum_{v\in S_{n}}a_{v}a_{vu}\right)  u.
\end{align*}
Thus,%
\[
\sum_{u\in S_{n}}\left(  \sum_{v\in S_{n}}a_{v}a_{vu}\right)  u=S\left(
a\right)  \cdot a=0
\]
(by assumption). Note that the left hand side of this equality is a
$\mathbf{k}$-linear combination of the family $\left(  u\right)  _{u\in S_{n}%
}$ with coefficients $\sum_{v\in S_{n}}a_{v}a_{vu}$. Since the family $\left(
u\right)  _{u\in S_{n}}$ is $\mathbf{k}$-linearly independent (being a basis
of the group algebra $\mathbf{k}\left[  S_{n}\right]  $), this equality thus
entails that all the coefficients $\sum_{v\in S_{n}}a_{v}a_{vu}$ on the left
hand side must be $0$. In other words,%
\[
\sum_{v\in S_{n}}a_{v}a_{vu}=0\ \ \ \ \ \ \ \ \ \ \text{for each }u\in S_{n}.
\]
Applying this to $u=\operatorname*{id}$, we obtain $\sum_{v\in S_{n}}%
a_{v}a_{v\operatorname*{id}}=0$. In other words, $\sum_{v\in S_{n}}a_{v}%
^{2}=0$ (since $a_{v}a_{v\operatorname*{id}}=a_{v}a_{v}=a_{v}^{2}$ for each
$v\in S_{n}$). Since the ring $\mathbf{k}$ is formally real, this entails
that
\begin{equation}
a_{v}=0\ \ \ \ \ \ \ \ \ \ \text{for all }v\in S_{n} \label{pf.lem.S.fr.av=0}%
\end{equation}
(because a (finite) sum of squares of elements of $\mathbf{k}$ is never $0$
unless all the elements being squared are $0$). Therefore,
\[
a=\sum_{w\in S_{n}}\underbrace{a_{w}}_{\substack{=0\\\text{(by
(\ref{pf.lem.S.fr.av=0}))}}}w=\sum_{w\in S_{n}}0w=0.
\]
This proves Lemma \ref{lem.S.fr}.
\end{proof}

\begin{noncompile}
As a consequence of Lemma \ref{lem.S.fr} and Lemma \ref{lem.fr.1}, we obtain:

\begin{lemma}
\label{lem.S.fr1}Let $\mathbf{k}$ be a formally real commutative ring. Let $N$
be a positive integer. Let $a\in\mathbf{k}\left[  S_{n}\right]  $ be such that
$N\cdot S\left(  a\right)  \cdot a=0$. Then, $a=0$.
\end{lemma}

\begin{proof}
Expand $S\left(  a\right)  \cdot a$ in the basis $\left(  w\right)  _{w\in
S_{n}}$ of $\mathbf{k}\left[  S_{n}\right]  $. That is, write $S\left(
a\right)  \cdot a$ as
\[
S\left(  a\right)  \cdot a=\sum_{w\in S_{n}}c_{w}%
w\ \ \ \ \ \ \ \ \ \ \text{for some scalars }c_{w}\in\mathbf{k}.
\]
Then, $N\cdot S\left(  a\right)  \cdot a=N\cdot\sum_{w\in S_{n}}c_{w}%
w=\sum_{w\in S_{n}}Nc_{w}w$, so that%
\[
\sum_{w\in S_{n}}Nc_{w}w=N\cdot S\left(  a\right)  \cdot a=0.
\]
Since the family $\left(  w\right)  _{w\in S_{n}}$ is $\mathbf{k}$-linearly
independent (being a basis of $\mathbf{k}\left[  S_{n}\right]  $), this
equality shows that $Nc_{w}=0$ for all $w\in S_{n}$. Therefore, $c_{w}=0$ for
all $w\in S_{n}$ (by Lemma \ref{lem.fr.1}, applied to $c_{w}$ instead of $a$).
Hence,%
\[
S\left(  a\right)  \cdot a=\sum_{w\in S_{n}}\underbrace{c_{w}}_{=0}%
w=\sum_{w\in S_{n}}0w=0.
\]
Therefore, Lemma \ref{lem.S.fr} yields $a=0$. This proves Lemma
\ref{lem.S.fr1}.
\end{proof}
\end{noncompile}

\section{The recursion lemma}

Now comes the main tool of the proof of Theorem \ref{thm.yjm.poly=0}: a lemma
that I conjectured in
\href{https://mathoverflow.net/questions/420318}{MathOverflow question
\#420318}. First a piece of notation: For each positive integer $k$, we define
the polynomial%
\[
p_{k}\left(  t\right)  :=\prod_{i=-k+1}^{k-1}\left(  t-i\right)  \in
\mathbb{Z}\left[  t\right]  .
\]
For instance,%
\begin{align*}
p_{1}\left(  t\right)   &  =t;\\
p_{2}\left(  t\right)   &  =\left(  t+1\right)  t\left(  t-1\right)  ;\\
p_{3}\left(  t\right)   &  =\left(  t+2\right)  \left(  t+1\right)  t\left(
t-1\right)  \left(  t-2\right)  ;\\
p_{4}\left(  t\right)   &  =\left(  t+3\right)  \left(  t+2\right)  \left(
t+1\right)  t\left(  t-1\right)  \left(  t-2\right)  \left(  t-3\right)  .
\end{align*}


Before we state our main lemma, we shall show two easy properties of these
polynomials $p_{k}$:

\begin{lemma}
\label{lem.pk.odd}Let $k$ be a positive integer. Then, the polynomial
$p_{k}\in\mathbb{Z}\left[  t\right]  $ is odd, i.e., is a $\mathbb{Z}$-linear
combination of the powers $t^{m}$ for odd $m$.
\end{lemma}

\begin{proof}
By definition of $p_{k}$, we have%
\begin{align}
p_{k}\left(  t\right)   &  =\prod_{i=-k+1}^{k-1}\left(  t-i\right)  =\left(
\prod_{i=-k+1}^{-1}\left(  t-i\right)  \right)  \left(  t-0\right)  \left(
\prod_{i=1}^{k-1}\left(  t-i\right)  \right)  \nonumber\\
&  \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we have broken up the product into}\\
\text{three parts: the product of the factors for }i<0\text{, the}\\
\text{factor for }i=0\text{, and the product of the factors for }i>0
\end{array}
\right)  \nonumber\\
&  =\left(  \prod_{i=1}^{k-1}\underbrace{\left(  t-\left(  -i\right)  \right)
}_{=t+i}\right)  \underbrace{\left(  t-0\right)  }_{=t}\left(  \prod
_{i=1}^{k-1}\left(  t-i\right)  \right)  \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we have substituted }-i\\
\text{for }i\text{ in the first product}%
\end{array}
\right)  \nonumber\\
&  =\left(  \prod_{i=1}^{k-1}\left(  t+i\right)  \right)  t\left(  \prod
_{i=1}^{k-1}\left(  t-i\right)  \right)  =t\underbrace{\left(  \prod
_{i=1}^{k-1}\left(  t+i\right)  \right)  \left(  \prod_{i=1}^{k-1}\left(
t-i\right)  \right)  }_{=\prod_{i=1}^{k-1}\left(  \left(  t+i\right)  \left(
t-i\right)  \right)  }\nonumber\\
&  =t\prod_{i=1}^{k-1}\underbrace{\left(  \left(  t+i\right)  \left(
t-i\right)  \right)  }_{=t^{2}-i^{2}}=t\prod_{i=1}^{k-1}\left(  t^{2}%
-i^{2}\right)  .\label{pf.lem.pk.odd.5}%
\end{align}
Clearly, the product $\prod_{i=1}^{k-1}\left(  t^{2}-i^{2}\right)  $ is a
polynomial in $t^{2}$. In other words, it can be written as
\begin{equation}
\prod_{i=1}^{k-1}\left(  t^{2}-i^{2}\right)  =\sum_{s=0}^{r}a_{s}\left(
t^{2}\right)  ^{s}\label{pf.lem.pk.odd.7}%
\end{equation}
for some $r\in\mathbb{N}$ and some coefficients $a_{0},a_{1},\ldots,a_{r}%
\in\mathbb{Z}$. Consider these $r$ and $a_{0},a_{1},\ldots,a_{r}$. Now,
substituting (\ref{pf.lem.pk.odd.7}) into (\ref{pf.lem.pk.odd.5}), we find%
\[
p_{k}\left(  t\right)  =t\sum_{s=0}^{r}a_{s}\left(  t^{2}\right)  ^{s}%
=\sum_{s=0}^{r}a_{s}\underbrace{t\left(  t^{2}\right)  ^{s}}_{=t^{2s+1}}%
=\sum_{s=0}^{r}a_{s}t^{2s+1}.
\]
This shows that $p_{k}\left(  t\right)  $ is a $\mathbb{Z}$-linear combination
of odd powers of $t$ (since $2s+1$ is always odd when $s\in\mathbb{N}$). In
other words, $p_{k}\left(  t\right)  $ is an odd polynomial of $t$. This
proves Lemma \ref{lem.pk.odd}.
\end{proof}

\begin{lemma}
\label{lem.pk.-1rec}Let $k$ be a positive integer. Then,%
\begin{equation}
p_{k+1}\left(  t\right)  =\left(  t+k\right)  \left(  t-k\right)  p_{k}\left(
t\right)  \label{eq.lem.pk.-1rec.0}%
\end{equation}
and%
\begin{equation}
p_{k+1}\left(  t\right)  =\left(  t+k\right)  \left(  t+k-1\right)
p_{k}\left(  t-1\right)  . \label{eq.lem.pk.-1rec.1}%
\end{equation}

\end{lemma}

\begin{proof}
The definition of $p_{k}$ yields $p_{k}\left(  t\right)  =\prod_{i=-k+1}%
^{k-1}\left(  t-i\right)  $. Substituting $t-1$ for $t$ in this equality, we
obtain%
\begin{align}
p_{k}\left(  t-1\right)   &  =\prod_{i=-k+1}^{k-1}\underbrace{\left(  \left(
t-1\right)  -i\right)  }_{=t-\left(  i+1\right)  }=\prod_{i=-k+1}^{k-1}\left(
t-\left(  i+1\right)  \right) \nonumber\\
&  =\prod_{i=-k+2}^{k}\left(  t-i\right)  \label{pf.lem.pk.-1rec.1}%
\end{align}
(here, we have substituted $i$ for $i+1$ in the product).

The definition of $p_{k+1}$ yields%
\begin{align*}
p_{k+1}\left(  t\right)    & =\prod_{i=-\left(  k+1\right)  +1}^{\left(
k+1\right)  -1}\left(  t-i\right)  =\prod_{i=-k}^{k}\left(  t-i\right)
\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since }-\left(  k+1\right)  +1=-k\\
\text{and }\left(  k+1\right)  -1=k
\end{array}
\right)  \\
& =\left(  t-\left(  -k\right)  \right)  \left(  \prod_{i=-k+1}^{k-1}\left(
t-i\right)  \right)  \left(  t-k\right)
\end{align*}
(here, we have split off the factors for $i=-k$ and for $i=k$ from the
product). Thus,%
\begin{align*}
p_{k+1}\left(  t\right)   &  =\underbrace{\left(  t-\left(  -k\right)
\right)  }_{=t+k}\underbrace{\left(  \prod_{i=-k+1}^{k-1}\left(  t-i\right)
\right)  }_{\substack{=p_{k}\left(  t\right)  \\\text{(by the definition of
}p_{k}\text{)}}}\left(  t-k\right)  \\
&  =\left(  t+k\right)  \cdot p_{k}\left(  t\right)  \cdot\left(  t-k\right)
=\left(  t+k\right)  \left(  t-k\right)  p_{k}\left(  t\right)  ,
\end{align*}
which proves (\ref{eq.lem.pk.-1rec.0}).

Furthermore, as we have already seen,%
\[
p_{k+1}\left(  t\right)  =\prod_{i=-k}^{k}\left(  t-i\right)  =\left(
t-\left(  -k\right)  \right)  \left(  t-\left(  -k+1\right)  \right)
\prod_{i=-k+2}^{k}\left(  t-i\right)
\]
(here, we have split off the factors for $i=-k$ and for $i=-k+1$ from the
product). Thus,%
\[
p_{k+1}\left(  t\right)  =\underbrace{\left(  t-\left(  -k\right)  \right)
}_{=t+k}\underbrace{\left(  t-\left(  -k+1\right)  \right)  }_{=t+k-1}%
\underbrace{\prod_{i=-k+2}^{k}\left(  t-i\right)  }_{\substack{=p_{k}\left(
t-1\right)  \\\text{(by (\ref{pf.lem.pk.-1rec.1}))}}}=\left(  t+k\right)
\left(  t+k-1\right)  p_{k}\left(  t-1\right)  .
\]
This proves (\ref{eq.lem.pk.-1rec.1}). Thus the proof of Lemma
\ref{lem.pk.-1rec} is complete.
\end{proof}

We shall now state the crucial lemma:

\begin{lemma}
\label{lem.xys}Let $k$ be a positive integer. Let $x,y,s$ be three elements of
a ring $R$ (not necessarily commutative) that satisfy the conditions
\begin{align}
xy  &  =yx,\label{eq.lem.xys.xy}\\
s^{2}  &  =1,\label{eq.lem.xys.s}\\
sy  &  =xs+1,\label{eq.lem.xys.sy}\\
p_{k}\left(  x\right)   &  =0. \label{eq.lem.xys.pkx}%
\end{align}
Then,%
\begin{equation}
p_{k}\left(  y\right)  \cdot p_{k+1}\left(  y\right)  =0
\label{eq.lem.xys.2pkpk+1}%
\end{equation}
and
\begin{equation}
p_{k+1}^{2}\left(  y\right)  =0. \label{eq.lem.xys.2pk+12}%
\end{equation}

\end{lemma}

\begin{proof}
The condition (\ref{eq.lem.xys.xy}) shows that the elements $x$ and $y$ of $R$
commute. Thus, the $\mathbb{Z}$-subalgebra of $R$ generated by $x$ and $y$ is
commutative. Let us denote this commutative $\mathbb{Z}$-subalgebra by $C$. Of
course, $x,y\in C$.

From $s^{2}=1$, we obtain $s^{2}ys=1ys=ys$, so that%
\begin{align}
ys  &  =s^{2}ys=s\underbrace{sy}_{\substack{=xs+1\\\text{(by
(\ref{eq.lem.xys.sy}))}}}s=s\left(  xs+1\right)  s=sx\underbrace{ss}%
_{=s^{2}=1}+\underbrace{ss}_{=s^{2}=1}\nonumber\\
&  =sx+1. \label{pf.lem.xys.ys}%
\end{align}


There is a certain symmetry in our situation: If we read all the products in
$R$ backwards (i.e., replace each product $r_{1}r_{2}\cdots r_{k}$ by
$r_{k}r_{k-1}\cdots r_{1}$), then the conditions (\ref{eq.lem.xys.xy}),
(\ref{eq.lem.xys.s}), (\ref{eq.lem.xys.sy}) and (\ref{eq.lem.xys.pkx}) remain
true. Indeed, the equalities (\ref{eq.lem.xys.s}) and (\ref{eq.lem.xys.pkx})
remain unchanged because they only contain polynomials of a single element
($s$ or $x$, respectively); the equality (\ref{eq.lem.xys.xy}) turns into
$yx=xy$, which is equivalent to it; and finally, the equality
(\ref{eq.lem.xys.sy}) turns into (\ref{pf.lem.xys.ys}), which we know to be
true. This symmetry will be useful to us later; we call it the \emph{reversal
symmetry}.

Now we claim the following:

\begin{statement}
\textit{Claim 1:} For any polynomial $f\in\mathbb{Z}\left[  t\right]  $, we
have%
\[
f\left(  y\right)  -f\left(  x\right)  =\left(  sf\left(  y\right)  -f\left(
x\right)  s\right)  \left(  y-x\right)  .
\]

\end{statement}

\begin{proof}
[Proof of Claim 1.]Both sides of this equality are $\mathbb{Z}$-linear in $f$.
Thus, by linearity, we can WLOG assume that $f$ is a monomial $t^{m}$ for some
$m\in\mathbb{N}$ (since any polynomial $f\in\mathbb{Z}\left[  t\right]  $ is a
$\mathbb{Z}$-linear combination of monomials). In other words, it suffices to
prove that%
\begin{equation}
y^{m}-x^{m}=\left(  sy^{m}-x^{m}s\right)  \left(  y-x\right)
\label{pf.lem.xys.c1.m}%
\end{equation}
for each $m\in\mathbb{N}$.

So let us prove (\ref{pf.lem.xys.c1.m}). We induct on $m$:

\textit{Base case:} For $m=0$, the equality (\ref{pf.lem.xys.c1.m}) holds
because both of its sides are $0$.

\textit{Induction step:} Let $m\in\mathbb{N}$. Assume (as the induction
hypothesis) that (\ref{pf.lem.xys.c1.m}) holds for $m$. We must prove that
(\ref{pf.lem.xys.c1.m}) also holds for $m+1$ instead of $m$. That is, we must
prove that $y^{m+1}-x^{m+1}=\left(  sy^{m+1}-x^{m+1}s\right)  \left(
y-x\right)  $. But this follows from%
\begin{align*}
&  \underbrace{\left(  sy^{m+1}-x^{m+1}s\right)  }_{\substack{=sy^{m}%
y-x^{m}xs\\=\left(  sy^{m}-x^{m}s\right)  y+x^{m}\left(  sy-xs\right)
}}\left(  y-x\right) \\
&  =\left(  \left(  sy^{m}-x^{m}s\right)  y+x^{m}\left(  sy-xs\right)
\right)  \left(  y-x\right) \\
&  =\left(  sy^{m}-x^{m}s\right)  \underbrace{y\left(  y-x\right)
}_{\substack{=\left(  y-x\right)  y\\\text{(since }x\text{ and }y\text{ belong
to}\\\text{the commutative ring }C\text{)}}}+\,x^{m}\underbrace{\left(
sy-xs\right)  }_{\substack{=1\\\text{(by (\ref{eq.lem.xys.sy}))}}}\left(
y-x\right) \\
&  =\underbrace{\left(  sy^{m}-x^{m}s\right)  \left(  y-x\right)
}_{\substack{=y^{m}-x^{m}\\\text{(by (\ref{pf.lem.xys.c1.m}),}\\\text{since we
assumed that (\ref{pf.lem.xys.c1.m})}\\\text{holds for }m\text{)}}%
}y+x^{m}\left(  y-x\right) \\
&  =\left(  y^{m}-x^{m}\right)  y+x^{m}\left(  y-x\right)  =y^{m}%
y-x^{m}x=y^{m+1}-x^{m+1}.
\end{align*}
Thus, the induction step is complete, and (\ref{pf.lem.xys.c1.m}) is proved.
Hence, Claim 1 follows.
\end{proof}

\begin{statement}
\textit{Claim 2:} For any polynomial $f\in\mathbb{Z}\left[  t\right]  $, we
have%
\[
f\left(  y\right)  -f\left(  x\right)  =\left(  y-x\right)  \left(  f\left(
y\right)  s-sf\left(  x\right)  \right)  .
\]

\end{statement}

\begin{proof}
[Proof of Claim 2.]Reversal symmetry shows that if we read all the products in
Claim 1 backwards, then we still obtain a true fact. But this fact is
precisely Claim 2.

(To put it differently: A proof of Claim 2 can be obtained from the above
proof of Claim 1 by reading all products backwards, and by using
(\ref{pf.lem.xys.ys}) instead of (\ref{eq.lem.xys.sy}).)
\end{proof}

Now, define two elements%
\[
u:=p_{k}\left(  y\right)  \in C\ \ \ \ \ \ \ \ \ \ \left(  \text{since }y\in
C\right)
\]
and
\[
d:=y-x\in C\ \ \ \ \ \ \ \ \ \ \left(  \text{since }x,y\in C\right)  .
\]
Thus, $du=ud$ (since $d$ and $u$ both lie in the commutative ring $C$).

Now we claim:

\begin{statement}
\textit{Claim 3:} We have%
\[
su=ud=du=us.
\]

\end{statement}

\begin{proof}
[Proof of Claim 3.]Applying Claim 1 to $f=p_{k}$, we obtain%
\[
p_{k}\left(  y\right)  -p_{k}\left(  x\right)  =\left(  sp_{k}\left(
y\right)  -p_{k}\left(  x\right)  s\right)  \left(  y-x\right)  .
\]
Since $p_{k}\left(  x\right)  =0$ (by (\ref{eq.lem.xys.pkx})) and
$p_{k}\left(  y\right)  =u$ (by the definition of $u$) and $y-x=d$ (by the
definition of $d$), we can rewrite this as%
\[
u-0=\left(  su-0s\right)  d.
\]
In other words, $u=sud$. Multiplying this equality by $s$ from the left, we
find%
\begin{equation}
su=\underbrace{ss}_{=s^{2}=1}ud=ud.\label{pf.lem.xys.c3.4}%
\end{equation}
By reversal symmetry, the same must hold if we read all the products
backwards; that is, we have%
\begin{equation}
us=du.\label{pf.lem.xys.c3.5}%
\end{equation}
(Alternatively, this can be obtained from Claim 2 in the same way as
(\ref{pf.lem.xys.c3.4}) was derived from Claim 1.) Recall that $du=ud$, so
that $ud=du$. Altogether,%
\[
su=ud=du=us\ \ \ \ \ \ \ \ \ \ \left(  \text{by (\ref{pf.lem.xys.c3.5}%
)}\right)  .
\]
This proves Claim 3.
\end{proof}

\begin{statement}
\textit{Claim 4:} We have $d^{2}u=u$.
\end{statement}

\begin{proof}
[Proof of Claim 4.]Claim 3 yields $du=us$. Thus,%
\[
d^{2}u=d\underbrace{du}_{=us}=\underbrace{du}_{=us}s=u\underbrace{ss}%
_{=s^{2}=1}=u.
\]
This proves Claim 4.
\end{proof}

\begin{statement}
\textit{Claim 5:} We have $\left(  dy-1\right)  u=dxu$.
\end{statement}

\begin{proof}
[Proof of Claim 5.]We have
\begin{align*}
\left(  dy-1\right)  u-dxu &  =dyu-u-dxu=\underbrace{dyu-dxu}_{=d\left(
y-x\right)  u}-\,u\\
&  =d\underbrace{\left(  y-x\right)  }_{\substack{=d\\\text{(by the definition
of }d\text{)}}}u-u=\underbrace{ddu}_{\substack{=d^{2}u=u\\\text{(by Claim 4)}%
}}-\,u=u-u=0.
\end{align*}
That is, $\left(  dy-1\right)  u=dxu$. This proves Claim 5.
\end{proof}

\begin{statement}
\textit{Claim 6:} For any polynomial $f\in\mathbb{Z}\left[  t\right]  $, we
have%
\[
f\left(  dy-1\right)  \cdot u=f\left(  dx\right)  \cdot u.
\]

\end{statement}

\begin{proof}
[Proof of Claim 6.]Both sides of this equality are $\mathbb{Z}$-linear in $f$.
Thus, it suffices to prove this equality in the case when $f$ is a monomial
$t^{m}$ for some $m\in\mathbb{N}$. In other words, it suffices to prove that%
\begin{equation}
\left(  dy-1\right)  ^{m}\cdot u=\left(  dx\right)  ^{m}\cdot u
\label{pf.lem.xys.c6.m}%
\end{equation}
for each $m\in\mathbb{N}$. We shall prove (\ref{pf.lem.xys.c6.m}) by induction
on $m$:

\textit{Base case:} For $m=0$, the equality (\ref{pf.lem.xys.c6.m}) is simply
saying that $u=u$, which is clearly true.

\textit{Induction step:} Let $m\in\mathbb{N}$. Assume (as the induction
hypothesis) that $\left(  dy-1\right)  ^{m}\cdot u=\left(  dx\right)
^{m}\cdot u$. We must then prove that $\left(  dy-1\right)  ^{m+1}\cdot
u=\left(  dx\right)  ^{m+1}\cdot u$.

The elements $d,x,u$ all lie in the commutative ring $C$, and thus all
commute. Hence, $dxu=udx$. Now,%
\begin{align*}
\underbrace{\left(  dy-1\right)  ^{m+1}}_{=\left(  dy-1\right)  ^{m}%
\cdot\left(  dy-1\right)  }\cdot\,u  &  =\left(  dy-1\right)  ^{m}%
\cdot\underbrace{\left(  dy-1\right)  u}_{\substack{=dxu\\\text{(by Claim 5)}%
}}=\left(  dy-1\right)  ^{m}\cdot\underbrace{dxu}_{=udx}\\
&  =\underbrace{\left(  dy-1\right)  ^{m}\cdot u}_{\substack{=\left(
dx\right)  ^{m}\cdot u\\\text{(by the induction hypothesis)}}}dx=\left(
dx\right)  ^{m}\cdot\underbrace{udx}_{\substack{=dxu\\\text{(since
}dxu=udx\text{)}}}\\
&  =\underbrace{\left(  dx\right)  ^{m}\cdot dx}_{=\left(  dx\right)  ^{m+1}%
}u=\left(  dx\right)  ^{m+1}\cdot u.
\end{align*}
This completes the induction step. Thus, (\ref{pf.lem.xys.c6.m}) is proved by
induction. Thus, the proof of Claim 6 is complete.
\end{proof}

\begin{statement}
\textit{Claim 7:} For any even $m\in\mathbb{N}$, we have $d^{m}u=u$.
\end{statement}

\begin{proof}
[Proof of Claim 7.]In other words, we must show that $d^{2i}u=u$ for each
$i\in\mathbb{N}$ (since any even $m\in\mathbb{N}$ can be written as $2i$ for
some $i\in\mathbb{N}$). But this follows by a simple induction on $i$. (The
\textit{base case} $i=0$ is obvious. The \textit{induction step} from $i$ to
$i+1$ proceeds by assuming that $d^{2i}u=u$, and arguing that
$\underbrace{d^{2\left(  i+1\right)  }}_{=d^{2i+2}=d^{2i}d^{2}}u=d^{2i}%
\underbrace{d^{2}u}_{\substack{=u\\\text{(by Claim 4)}}}=d^{2i}u=u$.) Thus,
Claim 7 follows.
\end{proof}

\begin{statement}
\textit{Claim 8:} For any odd polynomial $f\in\mathbb{Z}\left[  t\right]  $,
we have%
\[
f\left(  y\right)  u=f\left(  dy\right)  du.
\]

\end{statement}

\begin{proof}
[Proof of Claim 8.]Both sides of this equality are $\mathbb{Z}$-linear in $f$.
Thus, it suffices to prove this equality in the case when $f$ is a monomial
$t^{m}$ for some odd $m\in\mathbb{N}$ (since any odd polynomial $f\in
\mathbb{Z}\left[  t\right]  $ is a $\mathbb{Z}$-linear combination of such
monomials). In other words, it suffices to prove that%
\begin{equation}
y^{m}u=\left(  dy\right)  ^{m}du \label{pf.lem.xys.c7.g}%
\end{equation}
for each odd $m\in\mathbb{N}$.

So let us do this. Let $m\in\mathbb{N}$ be odd. Then, $m+1$ is even, so that
Claim 7 (applied to $m+1$ instead of $m$) yields $d^{m+1}u=u$. But the
elements $y,u,d$ all lie in the commutative ring $C$, so all their products
and powers commute. Hence, $\left(  dy\right)  ^{m}=d^{m}y^{m}=y^{m}d^{m}$ and
therefore%
\[
\left(  dy\right)  ^{m}du=y^{m}\underbrace{d^{m}d}_{=d^{m+1}}u=y^{m}%
\underbrace{d^{m+1}u}_{=u}=y^{m}u.
\]
This proves (\ref{pf.lem.xys.c7.g}). Thus, the proof of Claim 8 is complete.
\end{proof}

\begin{statement}
\textit{Claim 9:} For any odd polynomial $f\in\mathbb{Z}\left[  t\right]  $,
we have%
\[
f\left(  x\right)  u=f\left(  dx\right)  du.
\]

\end{statement}

\begin{proof}
[Proof of Claim 9.]Analogous to Claim 8, just using $x$ instead of $y$.
\end{proof}

Now, Lemma \ref{lem.pk.odd} shows that the polynomial $p_{k}$ is odd. Hence,
Claim 9 (applied to $f=p_{k}$) yields $p_{k}\left(  x\right)  u=p_{k}\left(
dx\right)  du$. Thus,%
\begin{equation}
p_{k}\left(  dx\right)  du=\underbrace{p_{k}\left(  x\right)  }%
_{\substack{=0\\\text{(by (\ref{eq.lem.xys.pkx}))}}}u=0.
\label{pf.lem.xys.pkdxdu}%
\end{equation}


But $ud=du$ (since both $d$ and $u$ lie in the commutative ring $C$) and thus
$dud=ddu=d^{2}u=u$ (by Claim 4), so that $u=dud$. Hence,%
\begin{equation}
p_{k}\left(  dx\right)  \cdot\underbrace{u}_{=dud}=\underbrace{p_{k}\left(
dx\right)  du}_{\substack{=0\\\text{(by (\ref{pf.lem.xys.pkdxdu}))}}}d=0.
\label{pf.lem.xys.pkdxu}%
\end{equation}


On the other hand, Lemma \ref{lem.pk.odd} (applied to $k+1$ instead of $k$)
shows that the polynomial $p_{k+1}$ is odd. Hence, Claim 8 (applied to
$f=p_{k+1}$) yields%
\begin{equation}
p_{k+1}\left(  y\right)  u=p_{k+1}\left(  dy\right)  du=p_{k+1}\left(
dy\right)  ud \label{pf.lem.xys.pk+1yu}%
\end{equation}
(since $du=ud$).

Substituting $dy$ for $t$ in the equality (\ref{eq.lem.pk.-1rec.1}), we find%
\[
p_{k+1}\left(  dy\right)  =\left(  dy+k\right)  \left(  dy+k-1\right)
p_{k}\left(  dy-1\right)  .
\]
Multiplying both sides of this equality by $u$, we obtain%
\begin{align*}
p_{k+1}\left(  dy\right)  \cdot u  &  =\left(  dy+k\right)  \left(
dy+k-1\right)  \underbrace{p_{k}\left(  dy-1\right)  \cdot u}%
_{\substack{=p_{k}\left(  dx\right)  \cdot u\\\text{(by Claim 6, applied to
}f=p_{k}\text{)}}}\\
&  =\left(  dy+k\right)  \left(  dy+k-1\right)  \underbrace{p_{k}\left(
dx\right)  \cdot u}_{\substack{=0\\\text{(by (\ref{pf.lem.xys.pkdxu}))}}}=0.
\end{align*}
Hence, (\ref{pf.lem.xys.pk+1yu}) becomes%
\[
p_{k+1}\left(  y\right)  u=p_{k+1}\left(  dy\right)  ud=\underbrace{p_{k+1}%
\left(  dy\right)  \cdot u}_{=0}\cdot\,d=0.
\]
In view of $u=p_{k}\left(  y\right)  $, we can rewrite this as%
\[
p_{k+1}\left(  y\right)  \cdot p_{k}\left(  y\right)  =0.
\]
Since $p_{k}\left(  y\right)  $ and $p_{k+1}\left(  y\right)  $ both lie in
the commutative ring $C$ (since $y\in C$), we have%
\[
p_{k}\left(  y\right)  \cdot p_{k+1}\left(  y\right)  =p_{k+1}\left(
y\right)  \cdot p_{k}\left(  y\right)  =0.
\]
This proves (\ref{eq.lem.xys.2pkpk+1}).

Furthermore, substituting $y$ for $t$ in the equality (\ref{eq.lem.pk.-1rec.0}%
), we obtain%
\[
p_{k+1}\left(  y\right)  =\left(  y+k\right)  \left(  y-k\right)  p_{k}\left(
y\right)  .
\]
Now,%
\[
p_{k+1}^{2}\left(  y\right)  =\underbrace{p_{k+1}\left(  y\right)  }_{=\left(
y+k\right)  \left(  y-k\right)  p_{k}\left(  y\right)  }\cdot\,p_{k+1}\left(
y\right)  =\left(  y+k\right)  \left(  y-k\right)  \cdot\underbrace{p_{k}%
\left(  y\right)  \cdot p_{k+1}\left(  y\right)  }_{\substack{=0\\\text{(by
(\ref{eq.lem.xys.2pkpk+1}))}}}=0.
\]
This proves (\ref{eq.lem.xys.2pk+12}). Thus, Lemma \ref{lem.xys} is proved.
\end{proof}

The above proof has been obtained in collaboration with GPT-5.4 and Claude
Opus 4.6. Neither model came up with a correct proof on its own; both made
repeated mistakes of the \textquotedblleft division by zero\textquotedblright%
\ kind (e.g., dividing by $y-x$ or by $y-x-1$, neither of which is guaranteed
to be invertible), along with occasionally more basic computational mistakes.
Yet, the ideas of Claim 1 and of studying the element $u=p_{k}\left(
y\right)  $ were suggested by the models.

We note that $p_{k+1}\left(  y\right)  $ is not always $0$ in the general
setup of Lemma \ref{lem.xys}; counterexamples can be computed using
Gr\"{o}bner bases (see \url{https://mathoverflow.net/questions/420318} ).

\section{Proving Theorem \ref{thm.yjm.poly=0}}

We are now close to proving Theorem \ref{thm.yjm.poly=0} by induction on $k$.
In the induction step from $k$ to $k+1$, we shall apply Lemma \ref{lem.xys} to
$R=\mathbb{Z}\left[  S_{n}\right]  $ and $s=t_{k,k+1}$ and $x=J_{k}$ and
$y=J_{k+1}$. In order to justify this, we need the following:

\begin{lemma}
\label{lem.yjm.sy}Let $k\in\left[  n-1\right]  $. Set $s_{k}:=t_{k,k+1}\in
S_{n}$. Then, in $\mathbf{k}\left[  S_{n}\right]  $, we have%
\[
s_{k}J_{k+1}=J_{k}s_{k}+1.
\]

\end{lemma}

\begin{proof}
It is easy to see that each $i\in\left[  k-1\right]  $ satisfies%
\begin{equation}
s_{k}t_{i,k+1}=t_{i,k}s_{k}\label{pf.lem.yjm.sy.cyc}%
\end{equation}
(in fact, both sides of this equality equal the $3$-cycle $\operatorname*{cyc}%
\nolimits_{i,k,k+1}\in S_{n}$ that sends the elements $i,k,k+1$ to $k,k+1,i$
and leaves all other elements of $\left[  n\right]  $ unchanged).

Furthermore, $s_{k}=t_{k,k+1}$, so that $s_{k}t_{k,k+1}=t_{k,k+1}%
t_{k,k+1}=t_{k,k+1}^{2}=\operatorname*{id}$ (since $t_{k,k+1}$ is a
transposition, and thus squares to the identity).

By the definition of $J_{k}$, we have $J_{k}=\sum_{i=1}^{k-1}t_{i,k}$. Hence,%
\begin{equation}
J_{k}s_{k}=\left(  \sum_{i=1}^{k-1}t_{i,k}\right)  s_{k}=\sum_{i=1}%
^{k-1}t_{i,k}s_{k}. \label{pf.lem.yjm.sy.2}%
\end{equation}


By the definition of $J_{k+1}$, we have%
\[
J_{k+1}=\sum_{i=1}^{\left(  k+1\right)  -1}t_{i,k+1}=\sum_{i=1}^{k}%
t_{i,k+1}=\sum_{i=1}^{k-1}t_{i,k+1}+t_{k,k+1}%
\]
(here, we have split off the addend for $i=k$ from the sum). Hence,%
\begin{align*}
s_{k}J_{k+1}  & =s_{k}\left(  \sum_{i=1}^{k-1}t_{i,k+1}+t_{k,k+1}\right)
=\sum_{i=1}^{k-1}\underbrace{s_{k}t_{i,k+1}}_{\substack{=t_{i,k}%
s_{k}\\\text{(by (\ref{pf.lem.yjm.sy.cyc}))}}}+\underbrace{s_{k}t_{k,k+1}%
}_{=\operatorname*{id}=1}=\underbrace{\sum_{i=1}^{k-1}t_{i,k}s_{k}%
}_{\substack{=J_{k}s_{k}\\\text{(by (\ref{pf.lem.yjm.sy.2}))}}}+\,1\\
& =J_{k}s_{k}+1.
\end{align*}
This proves Lemma \ref{lem.yjm.sy}.
\end{proof}

\begin{proof}
[Proof of Theorem \ref{thm.yjm.poly=0}.]We must prove that $\prod
_{i=-k+1}^{k-1}\left(  J_{k}-i\right)  =0$ for each $k\in\left[  n\right]  $.
Let us first prove this in the case when $\mathbf{k}=\mathbb{Z}$. Thus, we
assume that $\mathbf{k}=\mathbb{Z}$.

We must prove that $\prod_{i=-k+1}^{k-1}\left(  J_{k}-i\right)  =0$ for each
$k\in\left[  n\right]  $. In other words, we must prove that
\begin{equation}
p_{k}\left(  J_{k}\right)  =0\ \ \ \ \ \ \ \ \ \ \text{for each }k\in\left[
n\right]  \label{pf.thm.yjm.poly=0.pk}%
\end{equation}
(since the definition of $p_{k}$ yields $p_{k}\left(  J_{k}\right)
=\prod_{i=-k+1}^{k-1}\left(  J_{k}-i\right)  $).

We shall prove (\ref{pf.thm.yjm.poly=0.pk}) by induction on $k$:

\textit{Base case:} For $k=1$, we have $p_{k}\left(  t\right)  =p_{1}\left(
t\right)  =t$ and thus $p_{k}\left(  J_{k}\right)  =J_{k}=J_{1}=0$. Hence,
(\ref{pf.thm.yjm.poly=0.pk}) is proved for $k=1$.

\textit{Induction step:} Fix $k\in\left[  n-1\right]  $. Assume (as the
induction hypothesis) that (\ref{pf.thm.yjm.poly=0.pk}) holds for $k$. We must
prove that (\ref{pf.thm.yjm.poly=0.pk}) also holds for $k+1$ instead of $k$.
In other words, we must prove that $p_{k+1}\left(  J_{k+1}\right)  =0$.

Our induction hypothesis says that $p_{k}\left(  J_{k}\right)  =0$.

Set $s_{k}:=t_{k,k+1}\in S_{n}$. Then, $s_{k}^{2}=1$ (since $s_{k}=t_{k,k+1}$
is a transposition) and $J_{k}J_{k+1}=J_{k+1}J_{k}$ (since the elements
$J_{1},J_{2},\ldots,J_{n}$ commute) and $s_{k}J_{k+1}=J_{k}s_{k}+1$ (by Lemma
\ref{lem.yjm.sy}). Therefore, Lemma \ref{lem.xys} (applied to $R=\mathbf{k}%
\left[  S_{n}\right]  $ and $x=J_{k}$ and $y=J_{k+1}$ and $s=s_{k}$) yields%
\[
p_{k}\left(  J_{k+1}\right)  \cdot p_{k+1}\left(  J_{k+1}\right)  =0
\]
and
\begin{equation}
p_{k+1}^{2}\left(  J_{k+1}\right)  =0.\label{pf.thm.yjm.poly=0.2pk+12}%
\end{equation}


On the other hand, Lemma \ref{lem.S.J} (applied to $k+1$ instead of $k$)
yields $S\left(  J_{k+1}\right)  =J_{k+1}$. But $S$ is a $\mathbf{k}$-algebra
anti-morphism (by Theorem \ref{thm.S.auto} \textbf{(a)}); thus, each
$a\in\mathbf{k}\left[  S_{n}\right]  $ and each polynomial $f\in
\mathbb{Z}\left[  t\right]  $ satisfy $S\left(  f\left(  a\right)  \right)
=f\left(  S\left(  a\right)  \right)  $ (since $\mathbf{k}$-algebra
anti-morphisms respect polynomials, just like $\mathbf{k}$-algebra morphisms
do). Applying this to $a=J_{k+1}$ and $f=p_{k+1}$, we obtain%
\[
S\left(  p_{k+1}\left(  J_{k+1}\right)  \right)  =p_{k+1}\left(
\underbrace{S\left(  J_{k+1}\right)  }_{\substack{=J_{k+1}}}\right)
=p_{k+1}\left(  J_{k+1}\right)  .
\]
Therefore,%
\begin{align*}
\underbrace{S\left(  p_{k+1}\left(  J_{k+1}\right)  \right)  }_{=p_{k+1}%
\left(  J_{k+1}\right)  }\cdot\,p_{k+1}\left(  J_{k+1}\right)   &
=p_{k+1}\left(  J_{k+1}\right)  \cdot p_{k+1}\left(  J_{k+1}\right)  =\left(
p_{k+1}\left(  J_{k+1}\right)  \right)  ^{2}\\
&  =p_{k+1}^{2}\left(  J_{k+1}\right)  =0\ \ \ \ \ \ \ \ \ \ \left(  \text{by
(\ref{pf.thm.yjm.poly=0.2pk+12})}\right)  .
\end{align*}
Therefore, Lemma \ref{lem.S.fr} (applied to $a=p_{k+1}\left(  J_{k+1}\right)
$) yields
\[
p_{k+1}\left(  J_{k+1}\right)  =0
\]
(since the ring $\mathbf{k}=\mathbb{Z}$ is formally real). This completes the
induction step. Thus, (\ref{pf.thm.yjm.poly=0.pk}) is proved by induction.

As we said above, this completes the proof of Theorem \ref{thm.yjm.poly=0} for
$\mathbf{k}=\mathbb{Z}$. In other words, we have now proved that for each
$k\in\left[  n\right]  $, we have%
\begin{equation}
\prod_{i=-k+1}^{k-1}\left(  J_{k}-i\right)  =0\ \ \ \ \ \ \ \ \ \ \text{in
}\mathbb{Z}\left[  S_{n}\right]  .\label{pf.thm.yjm.poly=0.Z}%
\end{equation}


Now let us return to the general case, in which $\mathbf{k}$ is an arbitrary
commutative ring. There is a canonical ring morphism $g:\mathbb{Z}%
\rightarrow\mathbf{k}$, and this morphism $g$ induces a base change morphism
\[
g_{\ast}:\mathbb{Z}\left[  S_{n}\right]  \rightarrow\mathbf{k}\left[
S_{n}\right]
\]
between the group rings of $S_{n}$ over $\mathbb{Z}$ and $\mathbf{k}$ (see
\cite[Definition 3.12.22]{sga}). The latter morphism $g_{\ast}$ clearly sends
each Young--Jucys--Murphy element $J_{k}$ of $\mathbb{Z}\left[  S_{n}\right]
$ to the corresponding element $J_{k}$ of $\mathbf{k}\left[  S_{n}\right]  $
(since it preserves each transposition $t_{i,k}$). Thus, applying this
morphism $g_{\ast}$ to both sides of the equality (\ref{pf.thm.yjm.poly=0.Z}),
we obtain%
\[
\prod_{i=-k+1}^{k-1}\left(  J_{k}-i\right)  =0\ \ \ \ \ \ \ \ \ \ \text{in
}\mathbf{k}\left[  S_{n}\right]  .
\]
This proves Theorem \ref{thm.yjm.poly=0}, now in the general case.
\end{proof}

\begin{thebibliography}{99999999}                                                                                         %


\bibitem[GarEge20]{GarEge20}%
\href{https://doi.org/10.1007/978-3-030-58373-6}{Adriano M. Garsia, \"{O}mer
E\u{g}ecio\u{g}lu, \textit{Lectures in Algebraic Combinatorics}, Springer
2020.}

\bibitem[Grinbe25]{sga}\href{https://arxiv.org/abs/2507.20706v1}{Darij
Grinberg, \textit{An introduction to the symmetric group algebra},
arXiv:2507.20706v1.}\newline A newer version is available at
\url{https://www.cip.ifi.lmu.de/~grinberg/t/24s/sga.pdf} .

\bibitem[Lascou02]{Lascou02}Alain Lascoux, \textit{The symmetric group}, draft
lecture notes, 11 July 2002.\newline\url{https://libgen.li/edition.php?id=137290539}

\bibitem[Makhli11]{Makhlin-yjm}Igor Makhlin, \textit{Answer to MathOverflow
question \#83150},\newline\url{https://mathoverflow.net/questions/83150/why-are-jucys-murphy-elements-eigenvalues-whole-numbers/83493#83493}

\bibitem[Murphy81]{Murphy81}%
\href{https://doi.org/10.1016/0021-8693(81)90205-2}{G. E. Murphy, \textit{A
New Construction of Young's Seminormal Representation of the Symmetric
Groups}, Journal of Algebra \textbf{69} (1981), pp. 287--297.}

\bibitem[Murphy92]{Murphy92}%
\href{https://doi.org/10.1016/0021-8693(92)90045-N}{G. E. Murphy, \textit{On
the representation theory of the symmetric groups and associated Hecke
algebras}, Journal of Algebra \textbf{152} (1992), issue 2, pp. 492--513.}

\bibitem[VerOko05]{VerOko05}\href{https://arxiv.org/abs/math/0503040v3}{A. M.
Vershik, A. Yu. Okounkov, \textit{A New Approach to the Representation Theory
of the Symmetric Groups. 2}, Journal of Mathematical Sciences \textbf{131}
(2005), no. 2, pp. 5471--5494 (English translation of a paper published in:
Zapiski Nauchnykh Seminarov POMI \textbf{307} (2004), pp. 57--98),
arXiv:math/0503040v3.}
\end{thebibliography}


\end{document}