\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}% \usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage} \usepackage[all,cmtip]{xy} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{framed} \usepackage{amsmath} \usepackage{comment} \usepackage{color} \usepackage{hyperref} \usepackage[sc]{mathpazo} \usepackage[T1]{fontenc} \usepackage{amsthm} %TCIDATA{OutputFilter=latex2.dll} %TCIDATA{Version=5.50.0.2960} %TCIDATA{LastRevised=Wednesday, January 29, 2020 23:27:52} %TCIDATA{SuppressPackageManagement} %TCIDATA{} %TCIDATA{} %TCIDATA{BibliographyScheme=Manual} %BeginMSIPreambleData \providecommand{\U}{\protect\rule{.1in}{.1in}} %EndMSIPreambleData \theoremstyle{definition} \newtheorem{theo}{Theorem}[section] \newenvironment{theorem}[] {\begin{theo}[#1]\begin{leftbar}} {\end{leftbar}\end{theo}} \newtheorem{lem}[theo]{Lemma} \newenvironment{lemma}[] {\begin{lem}[#1]\begin{leftbar}} {\end{leftbar}\end{lem}} \newtheorem{prop}[theo]{Proposition} \newenvironment{proposition}[] {\begin{prop}[#1]\begin{leftbar}} {\end{leftbar}\end{prop}} \newtheorem{defi}[theo]{Definition} \newenvironment{definition}[] {\begin{defi}[#1]\begin{leftbar}} {\end{leftbar}\end{defi}} \newtheorem{remk}[theo]{Remark} \newenvironment{remark}[] {\begin{remk}[#1]\begin{leftbar}} {\end{leftbar}\end{remk}} \newtheorem{coro}[theo]{Corollary} \newenvironment{corollary}[] {\begin{coro}[#1]\begin{leftbar}} {\end{leftbar}\end{coro}} \newtheorem{conv}[theo]{Convention} \newenvironment{condition}[] {\begin{conv}[#1]\begin{leftbar}} {\end{leftbar}\end{conv}} \newtheorem{quest}[theo]{Question} \newenvironment{algorithm}[] {\begin{quest}[#1]\begin{leftbar}} {\end{leftbar}\end{quest}} \newtheorem{warn}[theo]{Warning} \newenvironment{conclusion}[] {\begin{warn}[#1]\begin{leftbar}} {\end{leftbar}\end{warn}} \newtheorem{conj}[theo]{Conjecture} \newenvironment{conjecture}[] {\begin{conj}[#1]\begin{leftbar}} {\end{leftbar}\end{conj}} \newtheorem{exmp}[theo]{Example} \newenvironment{example}[] {\begin{exmp}[#1]\begin{leftbar}} {\end{leftbar}\end{exmp}} \iffalse \newenvironment{proof}[Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}} \fi \newenvironment{verlong}{}{} \newenvironment{vershort}{}{} \newenvironment{noncompile}{}{} \excludecomment{verlong} \includecomment{vershort} \excludecomment{noncompile} \newcommand{\kk}{\mathbf{k}} \newcommand{\id}{\operatorname{id}} \newcommand{\ev}{\operatorname{ev}} \newcommand{\Comp}{\operatorname{Comp}} \newcommand{\bk}{\mathbf{k}} \let\sumnonlimits\sum \let\prodnonlimits\prod \let\bigcapnonlimits\bigcap \renewcommand{\sum}{\sumnonlimits\limits} \renewcommand{\prod}{\prodnonlimits\limits} \renewcommand{\bigcap}{\bigcapnonlimits\limits} \setlength\textheight{22.5cm} \setlength\textwidth{15cm} \ihead{Marginalia to Structure theory for the group algebra ...''} \ohead{\today} \begin{document} \begin{center} \textbf{\href{https://eudml.org/doc/287582}{\textbf{Structure theory for the group algebra of the symmetric group, with applications to polynomial identities for the octonions}}} \textit{Murray R. Bremner, Sara Madariaga, Luiz A. Peresi} Comment. Math. Univ. Carolin. \textbf{57},4 (2016), pp. 413--452. version of 5 December 2016 \textbf{Errata by Darij Grinberg} \bigskip \end{center} \setcounter{section}{5} \section*{Marginalia} The following are my comments on specific places in the paper \textquotedblleft\href{https://eudml.org/doc/287582}{Structure theory for the group algebra of the symmetric group, with applications to polynomial identities for the octonions}\textquotedblright\ by Murray R. Bremner, Sara Madariaga, Luiz A. Peresi (Comment. Math. Univ. Carolin. 57,4 (2016), pp. 413--452). Very few of them are corrections (there is barely anything wrong in the paper); most of them are additional details and steps that have been omitted from the proofs. \begin{itemize} \item \textbf{page 414:} You write that the matrices obtained by restricting $\phi$ to $S_{n}$ \textquotedblleft have entries in $\left\{ 0,\pm1\right\}$\textquotedblright. If you are talking about the matrices $R^{\lambda}\left( p\right)$ from Definition 1.49, then I don't see why this is true (and I suspect it is not). \item \textbf{page 415, Definition 1.5:} \textquotedblleft A \textbf{Young tableau} $T^{\lambda}$\textquotedblright\ should be \textquotedblleft A \textbf{Young tableau} $T$\textquotedblright. (The \textquotedblleft$\lambda$\textquotedblright\ superscript is unnecessary and confusing; you just call it \textquotedblleft$T$\textquotedblright\ afterwards.) \item \textbf{page 416, Definition 1.9:} It would be good to point out that this action of $S_{n}$ on the set $\left\{ \text{tableaux of shape }% \lambda\right\}$ is free and transitive. (This is being used tacitly further below.) \item \textbf{page 417, Remark 1.14:} This is perhaps a bit out of place: You have yet to use the notation $hvT$ at this point! \item \textbf{page 417, proof of Proposition 1.15:} In the second paragraph of this proof, replace \textquotedblleft obtaining tableaux $T^{\lambda^{\prime}% }\succ T^{\mu^{\prime}}$ where $\lambda^{\prime}$ and $\mu^{\prime}$ are partitions of $n-n_{1}$\textquotedblright\ by \textquotedblleft obtaining tableaux $T^{\lambda^{\prime}}$ and $T^{\mu^{\prime}}$ whose shapes $\lambda^{\prime}$ and $\mu^{\prime}$ are partitions of $n-n_{1}$ satisfying $Y^{\lambda^{\prime}}\succ Y^{\mu^{\prime}}$ \textquotedblright. \item \textbf{page 419, proof of Lemma 1.19:} It would be helpful to point out that the third equality sign in the displayed equation relies on the facts that $\epsilon\left( v\right) ^{-1}=\epsilon\left( v\right)$ (because $\epsilon\left( v\right) \in\left\{ 1,-1\right\}$) and that $\epsilon$ is a group homomorphism. \item \textbf{page 419, proof of Proposition 1.20:} Remove \textquotedblleft Since $\epsilon\left( p\right) =\epsilon\left( p^{-1}\right)$\textquotedblright. (You don't use the fact that $\epsilon\left( p\right) =\epsilon\left( p^{-1}\right)$ here.) \item \textbf{page 419, Definition 1.21:} Replace \textquotedblleft in lex order\textquotedblright\ by \textquotedblleft in some fixed order chosen in such a way that the standard tableaux of shape $\lambda$ will be $T_{1}% ,T_{2},\ldots,T_{d_{\lambda}}$ in lex order\textquotedblright. Indeed, you don't need all the $n!$ tableaux $T_{1},T_{2},\ldots,T_{n!}$ to be in lex order; but you will later want the standard tableaux of shape $\lambda$ to be $T_{1},T_{2},\ldots,T_{d_{\lambda}}$ in lex order. If you list all the $n!$ tableaux of shape $\lambda$ in lex order, then (in general) the first $d_{\lambda}$ tableaux in your list will not be the standard tableaux of shape $\lambda$. \item \textbf{page 420, Corollary 1.24:} After \textquotedblleft be the standard tableaux\textquotedblright, add \textquotedblleft of shape $\lambda$\textquotedblright. \item \textbf{page 421, proof of Proposition 1.25:} At the beginning of this proof, add \textquotedblleft Again, set $H_{i}=H_{T_{i}}$ and $V_{i}=V_{T_{i}% }$; thus, $D_{i}=H_{i}V_{i}$.\textquotedblright. \item \textbf{page 421, proof of Proposition 1.25:} After \textquotedblleft% $hD_{i}^{2}v=\left( hH_{i}\right) V_{i}H_{i}\left( V_{i}v\right) =\epsilon\left( v\right) H_{i}V_{i}H_{i}V_{i}=\epsilon\left( v\right) D_{i}^{2}$\textquotedblright, add \textquotedblleft$=\epsilon\left( v\right) \sum_{p\in S_{n}}x_{p}p$\textquotedblright\ (in order to make the step to the next equality clearer). \item \textbf{page 421, proof of Proposition 1.25:} Replace \textquotedblleft On the left side of (7) take $p=\iota$, on the right side take $p=hv$, and compare coefficients\textquotedblright\ by \textquotedblleft Comparing coefficients of $hv$ on both sides of (7), we obtain\textquotedblright. \item \textbf{page 421, proof of Proposition 1.25:} Replace \textquotedblleft Setting $p=q$ on both sides, we obtain $x_{q}tqq^{-1}tq=\epsilon\left( q^{-1}tq\right) x_{q}q,$ and this simplifies to $x_{q}q=-x_{q}q$\textquotedblright\ by \textquotedblleft Comparing coefficients of $q$ on both sides of this equation, we obtain $x_{q}=\epsilon\left( q^{-1}tq\right) x_{q}$ (since the only $p\in S_{n}$ satisfying $tpq^{-1}tq=q$ is $q$), and this simplifies to $x_{q}=-x_{q}$\textquotedblright. \item \textbf{page 421, proof of Proposition 1.25:} After \textquotedblleft Combining the results of the two cases,\textquotedblright, add \textquotedblleft we obtain that $D_{i}^{2}=\sum_{h\in G_{H}\left( T_{i}\right) }\sum_{v\in G_{V}\left( T_{i}\right) }x_{\iota}\epsilon\left( v\right) hv$, since Lemma 1.12 shows that any permutation of the form $hv$ can be written in this form in exactly one way. In view of $D_{i}=\sum_{h\in G_{H}\left( T_{i}\right) }\sum_{v\in G_{V}\left( T_{i}\right) }% \epsilon\left( v\right) hv$, this rewrites as\textquotedblright. \item \textbf{page 422, proof of Proposition 1.25:} The equality sign in \textquotedblleft$\sum_{h,v}\epsilon\left( v\right) \operatorname*{trace}% \left( hv\right) =\operatorname*{trace}\left( I_{\mathbb{F}S_{n}}\right)$\textquotedblright, again, relies on Lemma 1.12. (Indeed, Lemma 1.12 shows that the only pair $\left( h,v\right)$ satisfying $hv=\iota$ is $\left( \iota,\iota\right)$.) \item \textbf{page 422, Definition 1.26:} It is worth saying that $E_{i}^{\lambda}$ will be denoted by $E_{i}$ when $\lambda$ is clear from the context. \item \textbf{page 422, \S 1.5:} After Corollary 1.27, I suggest adding another corollary (which is being tacitly used in the proof of Lemma 1.29): \textbf{Corollary 1.27a.} Let $\lambda\vdash n$. Let $i,j\in\left\{ 1,2,\ldots,n!\right\}$. Then, $f_{i}=f_{j}$ and $c_{i}=c_{j}$ and $E_{j}=s_{ji}E_{i}s_{ij}$ and $E_{i}s_{ij}=s_{ij}E_{j}$. \textit{Proof of Corollary 1.27a.} From (5), we obtain $D_{j}=s_{ji}% D_{i}s_{ij}$, so that $\mathbb{F}S_{n}D_{j}=\mathbb{F}\underbrace{S_{n}s_{ji}% }_{=S_{n}}D_{i}s_{ij}=\mathbb{F}S_{n}D_{i}s_{ij}\cong\mathbb{F}S_{n}D_{i}$ as vector spaces (since $s_{ij}\in\mathbb{F}S_{n}$ is invertible). Hence, $\dim\left( \mathbb{F}S_{n}D_{j}\right) =\dim\left( \mathbb{F}S_{n}% D_{i}\right)$. In other words, $f_{j}=f_{i}$ (since the numbers $f_{i}$ and $f_{j}$ are defined to be $\dim\left( \mathbb{F}S_{n}D_{i}\right)$ and $\dim\left( \mathbb{F}S_{n}D_{j}\right)$, respectively). In other words, $f_{i}=f_{j}$. This yields $n!/f_{i}=n!/f_{j}$. In other words, $c_{i}=c_{j}$ (since the numbers $c_{i}$ and $c_{j}$ are defined to be $n!/f_{i}$ and $n!/f_{j}$, respectively). Finally, the definition of $c_{i}$ yields $c_{i}=n!/f_{i}$, so that $\dfrac{f_{i}}{n!}=\dfrac{1}{c_{i}}$. But the definition of $E_{i}$ yields $E_{i}=\underbrace{\dfrac{f_{i}}{n!}}_{=\dfrac {1}{c_{i}}}D_{i}=\dfrac{1}{c_{i}}D_{i}$. Likewise, $E_{j}=\dfrac{1}{c_{j}% }D_{j}$. Hence,% $E_{j}=\underbrace{\dfrac{1}{c_{j}}}_{\substack{=\dfrac{1}{c_{i}}\\\text{(since }c_{i}=c_{j}\text{)}}}\underbrace{D_{j}}_{=s_{ji}D_{i}s_{ij}}=\dfrac{1}{c_{i}% }\cdot s_{ji}D_{i}s_{ij}=s_{ji}\underbrace{\left( \dfrac{1}{c_{i}}% D_{i}\right) }_{=E_{i}}s_{ij}=s_{ji}E_{i}s_{ij}.$ Hence,% $\underbrace{s_{ij}}_{=\left( s_{ji}\right) ^{-1}}\underbrace{E_{j}}% _{=s_{ji}E_{i}s_{ij}}=\underbrace{\left( s_{ji}\right) ^{-1}s_{ji}}_{=\iota }E_{i}s_{ij}=E_{i}s_{ij},$ so that $E_{i}s_{ij}=s_{ij}E_{j}$. This completes the proof of Corollary 1.27a. $\blacksquare$ \item \textbf{page 422, \S 1.5:} I would also add another corollary (which is being tacitly used in the proof of Lemma 1.32): \textbf{Corollary 1.27b.} Let $\lambda\vdash n$. Let $i\in\left\{ 1,2,\ldots,n!\right\}$. Then, $D_{i}\neq0$ and $f_{i}\neq0$ and $E_{i}\neq0$. \textit{Proof of Corollary 1.27b.} The definition of $D_{i}$ yields \begin{align*} D_{i} & =\underbrace{H_{T_{i}}}_{=\sum_{h\in G_{H}\left( T_{i}\right) }% h}\underbrace{V_{T_{i}}}_{=\sum_{v\in G_{V}\left( T_{i}\right) }% \epsilon\left( v\right) v}=\left( \sum_{h\in G_{H}\left( T_{i}\right) }h\right) \left( \sum_{v\in G_{V}\left( T_{i}\right) }\epsilon\left( v\right) v\right) \\ & =\sum_{h\in G_{H}\left( T_{i}\right) }\sum_{v\in G_{V}\left( T_{i}\right) }\epsilon\left( v\right) hv=\sum_{\left( h,v\right) \in G_{H}\left( T_{i}\right) \times G_{V}\left( T_{i}\right) }\epsilon\left( v\right) hv. \end{align*} The group elements $hv$ on the right hand side of this equality are all distinct (by the second sentence of Lemma 1.12); thus, the sum $\sum_{\left( h,v\right) \in G_{H}\left( T_{i}\right) \times G_{V}\left( T_{i}\right) }\epsilon\left( v\right) hv$ has no cancellations and therefore is nonzero. In other words, $D_{i}\neq0$. Hence, the left ideal $\mathbb{F}S_{n}D_{i}$ is nonzero, and thus $\dim\left( \mathbb{F}S_{n}D_{i}\right) >0$. The definition of $f_{i}$ now yields $f_{i}=\dim\left( \mathbb{F}S_{n}% D_{i}\right) >0$. Hence, $f_{i}\neq0$. Now, the definition of $E_{i}$ yields $E_{i}=\dfrac{f_{i}}{n!}D_{i}\neq0$ (since $f_{i}\neq0$ and $D_{i}\neq0$). This proves Corollary 1.27b. $\blacksquare$ \item \textbf{page 422, proof of Lemma 1.29:} At the beginning of the proof, add the following sentence: \textquotedblleft Again, set $H_{i}=H_{T_{i}}$ and $V_{i}=V_{T_{i}}$; thus, $D_{i}=H_{i}V_{i}$.\textquotedblright. \item \textbf{page 422, proof of Lemma 1.29:} I would replace this proof with the following more detailed version: \textquotedblleft Again, set $H_{i}=H_{T_{i}}$ and $V_{i}=V_{T_{i}}$; thus, $D_{i}=H_{i}V_{i}$. Define $H_{j}$ and $V_{j}$ likewise, so that $D_{j}% =H_{j}V_{j}$. First, assume that $s_{ji}=vh$ for some $h\in G_{H}\left( T_{i}\right)$ and $v\in G_{V}\left( T_{i}\right)$. Then, Lemma 1.19 yields $hH_{i}=H_{i}$. Now, from $D_{i}=H_{i}V_{i}$, we obtain $hD_{i}=\underbrace{hH_{i}}_{=H_{i}% }V_{i}=H_{i}V_{i}=D_{i}$. Multiplying this by $\dfrac{f_{i}}{n!}$, we obtain $hE_{i}=E_{i}$ (since $E_{i}=\dfrac{f_{i}}{n!}D_{i}$). Also, Lemma 1.19 yields $V_{i}v=\epsilon\left( v\right) V_{i}$. Now, from $D_{i}=H_{i}V_{i}$, we obtain $D_{i}v=H_{i}\underbrace{V_{i}v}_{=\epsilon\left( v\right) V_{i}% }=\epsilon\left( v\right) \underbrace{H_{i}V_{i}}_{=D_{i}}=\epsilon\left( v\right) D_{i}$. Multiplying this by $\dfrac{f_{i}}{n!}$, we obtain $E_{i}v=\epsilon\left( v\right) E_{i}$ (since $E_{i}=\dfrac{f_{i}}{n!}D_{i}$). Now, Corollary 1.27a yields $E_{j}=s_{ji}E_{i}s_{ij}$, so that% \begin{align*} E_{i}E_{j} & =E_{i}\left( \underbrace{s_{ji}}_{=vh}E_{i}s_{ij}\right) =\underbrace{E_{i}v}_{=\epsilon\left( v\right) E_{i}}\underbrace{hE_{i}% }_{=E_{i}}s_{ij}=\epsilon\left( v\right) \underbrace{E_{i}E_{i}% }_{\substack{=\left( E_{i}\right) ^{2}=E_{i}\\\text{(by Corollary 1.27)}% }}s_{ij}=\epsilon\left( v\right) E_{i}s_{ij}\\ & =\xi_{ij}E_{i}s_{ij}\ \ \ \ \ \ \ \ \ \ \left( \text{since }\xi _{ij}=\epsilon\left( v\right) \right) . \end{align*} Second, assume that $s_{ji}\neq vh$ for any $h\in G_{H}\left( T_{i}\right)$ and $v\in G_{V}\left( T_{i}\right)$. Thus, $\left( s_{ji}\right) ^{-1}\neq\left( vh\right) ^{-1}$ for any $h\in G_{H}\left( T_{i}\right)$ and $v\in G_{V}\left( T_{i}\right)$. In other words, $s_{ij}\neq h^{-1}v^{-1}$ for any $h\in G_{H}\left( T_{i}\right)$ and $v\in G_{V}\left( T_{i}\right)$ (since $\left( s_{ji}\right) ^{-1}=s_{ij}$ and $\left( vh\right) ^{-1}=h^{-1}v^{-1}$). Equivalently, $s_{ij}\neq hv$ for any $h\in G_{H}\left( T_{i}\right)$ and $v\in G_{V}\left( T_{i}\right)$ (since $G_{H}\left( T_{i}\right)$ and $G_{V}\left( T_{i}\right)$ are subgroups of $S_{n}$ and thus invariant under inversion). Hence, Lemma 1.16 (applied to $T=T_{j}$ and $p=s_{ij}$) shows that there exist two distinct numbers $k,\ell$ that lie in the same row of $T_{j}$ and in the same column of $s_{ij}T_{j}$. In view of $s_{ij}T_{j}=T_{i}$, this shows that $k$ and $\ell$ lie in the same column of $T_{i}$; therefore, the transposition $t=\left( k,\ell\right)$ satisfies $t\in G_{V}\left( T_{i}\right)$. Hence, $V_{i}t=-V_{i}$. But $k$ and $\ell$ lie in the same row of $T_{j}$; thus, $t\in G_{H}\left( T_{j}\right)$ and therefore $tH_{j}=H_{j}$. Now,% $\underbrace{D_{i}}_{=H_{i}V_{i}}\underbrace{D_{j}}_{=H_{j}V_{j}}=H_{i}% V_{i}H_{j}V_{j}=H_{i}\underbrace{V_{i}t}_{=-V_{i}}\underbrace{tH_{j}}_{=H_{j}% }V_{j}=-\underbrace{H_{i}V_{i}}_{=D_{i}}\underbrace{H_{j}V_{j}}_{=D_{j}% }=-D_{i}D_{j},$ so that $D_{i}D_{j}=0$. Since $E_{i}=\dfrac{f_{i}}{n!}D_{i}$ and $E_{j}% =\dfrac{f_{j}}{n!}D_{j}$, this entails $E_{i}E_{j}=0=\xi_{ij}E_{i}s_{ij}$ (since $\xi_{ij}=0$). This completes the proof of Lemma 1.29.\textquotedblright \item \textbf{page 423, proof of Lemma 1.32:} After \textquotedblleft and so $\xi_{ij}=0$\textquotedblright, I would add \textquotedblleft(since Corollary 1.27b yields $E_{i}\neq0$, and thus $E_{i}s_{ij}\neq0$)\textquotedblright. \item \textbf{page 423, proof of Lemma 1.32:} Replace \textquotedblleft and so Lemma 1.29 gives $E_{i}=\xi_{ii}E_{i}$, hence $\xi_{ii}=1$\textquotedblright% \ by the simpler argument \textquotedblleft and so the definition of $\xi _{ii}$ yields $\xi_{ii}=\epsilon\left( \iota\right) =1$\textquotedblright. \item \textbf{page 423, proof of Proposition 1.33:} \textquotedblleft Using Proposition 1.22\textquotedblright\ $\rightarrow$ \textquotedblleft Using Corollary 1.27a (specifically, the $E_{i}s_{ij}=s_{ij}E_{j}$ part)\textquotedblright. \item \textbf{page 423, Corollary 1.36:} Please say that your definition of \textquotedblleft subalgebra\textquotedblright\ does not require that the unity of the subalgebra equals the unity of the algebra! (This is far from standard.) \item \textbf{page 423:} After Corollary 1.36, I would add another corollary for later use: \textbf{Corollary 1.36a.} Let $\lambda,\mu\vdash n$ be distinct. Then, $N^{\lambda}N^{\mu}=0$. \textit{Proof.} It suffices to show that $E_{i}^{\lambda}s_{ij}^{\lambda}% E_{k}^{\mu}s_{k\ell}^{\mu}=0$ for any $i,j\in\left\{ 1,2,\ldots,d_{\lambda }\right\}$ and $k,\ell\in\left\{ 1,2,\ldots,d_{\mu}\right\}$. So let us consider such $i,j$ and $k,\ell$. Proposition 1.23 yields $D_{j}^{\lambda }D_{k}^{\mu}=0$ (since $\lambda\neq\mu$). But the definitions of $E_{j}^{\lambda}$ and $E_{k}^{\mu}$ yield $E_{j}^{\lambda}=\dfrac{f_{j}}% {n!}D_{j}^{\lambda}$ and $E_{k}^{\mu}=\dfrac{f_{k}}{n!}D_{k}^{\mu}$. But Corollary 1.27a yields $E_{i}s_{ij}^{\lambda}=s_{ij}^{\lambda}E_{j}^{\lambda}$ and therefore $\underbrace{E_{i}^{\lambda}s_{ij}^{\lambda}}_{=s_{ij}^{\lambda}E_{j}^{\lambda }}E_{k}^{\mu}s_{k\ell}^{\mu}=s_{ij}^{\lambda}\underbrace{E_{j}^{\lambda}% }_{=\dfrac{f_{j}}{n!}D_{j}^{\lambda}}\underbrace{E_{k}^{\mu}}_{=\dfrac{f_{k}% }{n!}D_{k}^{\mu}}s_{k\ell}^{\mu}=\dfrac{f_{j}}{n!}\cdot\dfrac{f_{k}}{n!}% s_{ij}^{\lambda}\underbrace{D_{j}^{\lambda}D_{k}^{\mu}}_{=0}s_{k\ell}^{\mu }=0.$ This proves Corollary 1.36a. $\blacksquare$ \item \textbf{page 423:} After Lemma 1.37, I would add another corollary for later use: \textbf{Corollary 1.37a.} For any partition $\lambda\vdash n$ and any two $d_{\lambda}\times d_{\lambda}$-matrices $B$ and $C$, we have% $\alpha^{\lambda}\left( B\right) \alpha^{\lambda}\left( C\right) =\alpha^{\lambda}\left( B\mathcal{E}^{\lambda}C\right) .$ \textit{Proof.} Let $\lambda\vdash n$, and let $B$ and $C$ be two $d_{\lambda }\times d_{\lambda}$-matrices $B$ and $C$. Then, we can write $B$ and $C$ in the forms $B=\left( b_{ij}\right)$ and $C=\left( c_{ij}\right)$. Hence, $B=\left( b_{ij}\right) =\sum_{i,j}b_{ij}E_{ij}$ and $C=\left( c_{ij}\right) =\left( c_{k\ell}\right) =\sum_{k,\ell}c_{k\ell}E_{k\ell}$. Hence,% \begin{align*} & \alpha^{\lambda}\left( \underbrace{B}_{=\sum_{i,j}b_{ij}E_{ij}}\right) \alpha^{\lambda}\left( \underbrace{C}_{=\sum_{k,\ell}c_{k\ell}E_{k\ell}% }\right) \\ & =\alpha^{\lambda}\left( \sum_{i,j}b_{ij}E_{ij}\right) \alpha^{\lambda }\left( \sum_{k,\ell}c_{k\ell}E_{k\ell}\right) =\sum_{i,j}b_{ij}\sum _{k,\ell}c_{k\ell}\underbrace{\alpha^{\lambda}\left( E_{ij}\right) \alpha^{\lambda}\left( E_{k\ell}\right) }_{\substack{=\alpha^{\lambda }\left( E_{ij}\mathcal{E}^{\lambda}E_{k\ell}\right) \\\text{(by Lemma 1.37)}}}\\ & =\sum_{i,j}b_{ij}\sum_{k,\ell}c_{k\ell}\alpha^{\lambda}\left( E_{ij}\mathcal{E}^{\lambda}E_{k\ell}\right) =\alpha^{\lambda}\left( \underbrace{\left( \sum_{i,j}b_{ij}E_{ij}\right) }_{=B}\mathcal{E}^{\lambda }\underbrace{\left( \sum_{k,\ell}c_{k\ell}E_{k\ell}\right) }_{=C}\right) \\ & =\alpha^{\lambda}\left( B\mathcal{E}^{\lambda}C\right) . \end{align*} This proves Corollary 1.37a. $\blacksquare$ \item \textbf{page 423, proof of Proposition 1.38:} After \textquotedblleft% $\sum_{\mu\vdash n}\alpha^{\mu}\left( A^{\mu}\right) =0$\textquotedblright, add \textquotedblleft for some matrices $A^{\mu}=\left( a_{ij}^{\mu}\right) _{i,j=1,2,\ldots,d_{\mu}}$\textquotedblright. \item \textbf{pages 423--424, proof of Proposition 1.38:} I think this whole proof would become clearer if rewritten as follows: \textquotedblleft Assume that $\sum_{\mu\vdash n}\sum_{i,j=1}^{d_{\mu}}% a_{ij}^{\mu}E_{i}^{\mu}s_{ij}^{\mu}=0$ for some family of scalars $a_{ij}% ^{\mu}\in\mathbb{F}$. We shall show that $a_{ij}^{\mu}=0$ for all $\mu$ and $i,j$. Fix a partition $\lambda$. Let $A$ be the $d_{\lambda}\times d_{\lambda}%$-matrix $\left( a_{ij}^{\lambda}\right) _{i,j=1,2,\ldots,d_{\lambda}}$. We shall show that $A=0$. Fix $u,v\in\left\{ 1,2,\ldots,d_{\lambda}\right\}$. We have $E_{u}% ^{\lambda}\underbrace{\sum_{\mu\vdash n}\sum_{i,j=1}^{d_{\mu}}a_{ij}^{\mu }E_{i}^{\mu}s_{ij}^{\mu}}_{=0}=0$, so that% \begin{align*} 0 & =E_{u}^{\lambda}\sum_{\mu\vdash n}\sum_{i,j=1}^{d_{\mu}}a_{ij}^{\mu }E_{i}^{\mu}s_{ij}^{\mu}=\sum_{\mu\vdash n}\sum_{i,j=1}^{d_{\mu}}a_{ij}^{\mu }E_{u}^{\lambda}E_{i}^{\mu}s_{ij}^{\mu}=\sum_{i,j=1}^{d_{\lambda}}% a_{ij}^{\lambda}E_{u}^{\lambda}E_{i}^{\lambda}s_{ij}^{\lambda}\\ & \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c}% \text{since Proposition 1.23 yields that }E_{u}^{\lambda}E_{i}^{\mu}=0\text{ whenever }\lambda\neq\mu\text{,}\\ \text{and this entails that all addends }a_{ij}^{\mu}E_{u}^{\lambda}E_{i}% ^{\mu}s_{ij}^{\mu}\text{ with }\lambda\neq\mu\text{ vanish}% \end{array} \right) \\ & =\sum_{i,j=1}^{d_{\lambda}}a_{ij}\underbrace{E_{u}E_{i}}_{\substack{=\xi _{ui}E_{u}s_{ui}\\\text{(by Lemma 1.29)}}}s_{ij}\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{from now on, we are omitting the superscripts }\lambda\right) \\ & =\sum_{i,j=1}^{d_{\lambda}}a_{ij}\xi_{ui}E_{u}\underbrace{s_{ui}s_{ij}% }_{=s_{uj}}=\sum_{i,j=1}^{d_{\lambda}}a_{ij}\xi_{ui}E_{u}s_{uj}. \end{align*} Multiplying both sides of this equality with $E_{v}$ on the right, we obtain% \begin{align*} 0 & =\left( \sum_{i,j=1}^{d_{\lambda}}a_{ij}\xi_{ui}E_{u}s_{uj}\right) E_{v}=\sum_{i,j=1}^{d_{\lambda}}a_{ij}\xi_{ui}E_{u}s_{uj}\underbrace{E_{v}% }_{\substack{=E_{v}s_{vv}\\\text{(since }s_{vv}=\iota\text{)}}}\\ & =\sum_{i,j=1}^{d_{\lambda}}a_{ij}\xi_{ui}\underbrace{\left( E_{u}% s_{uj}\right) \left( E_{v}s_{vv}\right) }_{\substack{=\xi_{jv}E_{u}% s_{uv}\\\text{(by Proposition 1.33)}}}=\sum_{i,j=1}^{d_{\lambda}}a_{ij}% \xi_{ui}\xi_{jv}E_{u}s_{uv}=\left( \sum_{i,j=1}^{d_{\lambda}}\xi_{ui}% a_{ij}\xi_{jv}\right) E_{u}s_{uv}. \end{align*} Since $s_{uv}\in S_{n}$ is invertible, we can cancel $s_{uv}$ from this equality and obtain% $0=\left( \sum_{i,j=1}^{d_{\lambda}}\xi_{ui}a_{ij}\xi_{jv}\right) E_{u}.$ Since $E_{u}\neq0$ (by Corollary 1.27b), we thus obtain $\sum_{i,j=1}^{d_{\lambda}}\xi_{ui}a_{ij}\xi_{jv}=0$ (since $\sum_{i,j=1}^{d_{\lambda}}\xi_{ui}a_{ij}\xi_{jv}$ is a scalar). But $A=\left( a_{ij}^{\lambda}\right) _{i,j=1,2,\ldots,d_{\lambda}}=\left( a_{ij}\right) _{i,j=1,2,\ldots,d_{\lambda}}$ (since we are omitting the superscript $\lambda$) and $\mathcal{E}^{\lambda}=\left( \xi_{ij}\right) _{i,j=1,2,\ldots,d_{\lambda}}$ (by the definition of $\mathcal{E}^{\lambda}$). Hence, $\sum_{i,j=1}^{d_{\lambda}}\xi_{ui}a_{ij}\xi_{jv}$ is the $\left( u,v\right)$-th entry of the matrix $\mathcal{E}^{\lambda}A\mathcal{E}% ^{\lambda}$. Thus, we have showed that the $\left( u,v\right)$-th entry of the matrix $\mathcal{E}^{\lambda}A\mathcal{E}^{\lambda}$ is $0$ (since we have showed that $\sum_{i,j=1}^{d_{\lambda}}\xi_{ui}a_{ij}\xi_{jv}=0$). Forget that we fixed $u,v$. We thus have proved that the $\left( u,v\right)$-th entry of the matrix $\mathcal{E}^{\lambda}A\mathcal{E}^{\lambda}$ is $0$ for each $u,v\in\left\{ 1,2,\ldots,d_{\lambda}\right\}$. In other words, all entries of the matrix $\mathcal{E}^{\lambda}A\mathcal{E}^{\lambda}$ are $0$. In other words, $\mathcal{E}^{\lambda}A\mathcal{E}^{\lambda}=0$. Since $\mathcal{E}^{\lambda}$ is invertible (by Lemma 1.32), we thus obtain $A=0$. Thus, all entries of $A$ are $0$. In other words, $a_{ij}^{\lambda}=0$ for any $i,j\in\left\{ 1,2,\ldots,d_{\lambda}\right\}$ (since the entries of $A$ are $a_{ij}^{\lambda}$). Since we have proved this for any $\lambda\vdash n$, we thus conclude that all our scalars $a_{ij}^{\mu}$ are $0$. This proves Proposition 1.38. $\blacksquare$\textquotedblright \item \textbf{page 424, proof of Corollary 1.40:} \textquotedblleft by Proposition 1.23\textquotedblright\ $\rightarrow$ \textquotedblleft by Proposition 1.38\textquotedblright. \item \textbf{page 424, Remark at the end of \S 1.6:} You give the reference [35, \S 5.1.4, Theorem A]. Here are a few alternative references for proofs of the equality $\sum_{\lambda}d_{\lambda}^{2}=n!$: \begin{itemize} \item Proposition 1.3.3 in Marc A. A. van Leeuwen, \textit{The Robinson-Schensted and Sch\"{u}tzenberger algorithms, an elementary approach}, version 25 Nov 2011.\newline\url{http://www-math.univ-poitiers.fr/~maavl/} \item Corollary 8.5 in Richard P. Stanley, \textit{Algebraic Combinatorics: Walks, Trees, Tableaux, and More}, Undergraduate Texts in Mathematics, Springer 2013.\newline\url{http://www-math.mit.edu/~rstan/algcomb/index.html} (This book also has a second edition; the equality still is Corollary 8.5 in it.) \item Theorem 2.6.5 part 3. in Bruce E. Sagan, \textit{The Symmetric Group: Representations, Combinatorial Algorithms, and Symmetric Functions}, 2nd edition, Springer 2001. \end{itemize} \item \textbf{page 424, \S 1.7:} \textquotedblleft We prove that the map $\psi$ in (1) is\textquotedblright\ $\rightarrow$ \textquotedblleft We shall now construct the map $\psi$ in (1), and prove that it is\textquotedblright. \item \textbf{page 425, Proposition 1.43:} It is worth saying that the expression \textquotedblleft$\delta_{\lambda\mu}\delta_{jk}U_{i\ell}^{\lambda }$\textquotedblright\ is understood to be $0$ if $\lambda=\mu$ (even if $U_{i\ell}^{\lambda}$ is undefined in this case). \item \textbf{page 425, proof of Proposition 1.43:} After \textquotedblleft If $\lambda=\mu$ then\textquotedblright, add \textquotedblleft the definitions of $U_{ij}$ and $U_{k\ell}$ and Lemma 1.37a yield (with the notation $\mathcal{E}$ being used for $\mathcal{E}^{\lambda}$)\textquotedblright. \item \textbf{page 425, proof of Proposition 1.43:} Replace \textquotedblleft orthogonality of Proposition 1.23\textquotedblright\ by \textquotedblleft orthogonality of Corollary 1.36a\textquotedblright. \item \textbf{page 425, proof of Theorem 1.45:} Here is a more detailed version of this proof: \textquotedblleft Proposition 1.43 shows that the map $\psi$ is an $\mathbb{F}$-algebra homomorphism. It remains to prove that $\psi$ is bijective. The Remark at the end of \S 1.6 shows that $\sum_{\lambda }d_{\lambda}^{2}=n!$; in other words, $\dim M=\dim\left( \mathbb{F}% S_{n}\right)$. Hence, $\psi$ is an $\mathbb{F}$-linear map between two $\mathbb{F}$-vector spaces of the same (finite) dimension. Thus, if $\psi$ is injective, then $\psi$ is bijective. Therefore, it will suffice to show that $\psi$ is injective. Recall that $M=\bigoplus\limits_{i=1}^{r}M_{d_{i}}\left( \mathbb{F}\right)$. Let $\gamma:M\rightarrow M$ be the $\mathbb{F}$-linear map that sends each $\left( A_{1},A_{2},\ldots,A_{r}\right) \in M$ to $\left( A_{1}\left( \mathcal{E}^{\lambda_{1}}\right) ^{-1},A_{2}\left( \mathcal{E}^{\lambda_{2}% }\right) ^{-1},\ldots,A_{r}\left( \mathcal{E}^{\lambda_{r}}\right) ^{-1}\right) \in M$. This map $\gamma$ is well-defined (since the $r$ matrices $\mathcal{E}^{\lambda_{1}},\mathcal{E}^{\lambda_{2}},\ldots ,\mathcal{E}^{\lambda_{r}}$ are all invertible) and injective (since the $r$ matrices $\left( \mathcal{E}^{\lambda_{1}}\right) ^{-1},\left( \mathcal{E}^{\lambda_{2}}\right) ^{-1},\ldots,\left( \mathcal{E}% ^{\lambda_{r}}\right) ^{-1}$ are all invertible). Moreover, it is easy to see that $\psi=\alpha\circ\gamma$. (Indeed, by linearity, it suffices to show that $\psi\left( E_{ij}^{\lambda}\right) =\left( \alpha\circ\gamma\right) \left( E_{ij}^{\lambda}\right)$ for all $\lambda\vdash n$ and $i,j\in\left\{ 1,2,\ldots,d_{\lambda}\right\}$. But this is easy to check, since the definition of $\psi$ yields% \begin{align*} \psi\left( E_{ij}^{\lambda}\right) & =U_{ij}^{\lambda}=\alpha^{\lambda }\left( E_{ij}^{\lambda}\left( \mathcal{E}^{\lambda}\right) ^{-1}\right) =\alpha\underbrace{\left( 0,0,\ldots,0,E_{ij}^{\lambda}\left( \mathcal{E}% ^{\lambda}\right) ^{-1},0,0,\ldots,0\right) }_{=\gamma\left( 0,0,\ldots ,0,E_{ij}^{\lambda},0,0,\ldots,0\right) =\gamma\left( E_{ij}^{\lambda }\right) }\\ & =\alpha\left( \gamma\left( E_{ij}^{\lambda}\right) \right) =\left( \alpha\circ\gamma\right) \left( E_{ij}^{\lambda}\right) . \end{align*} Thus, $\psi=\alpha\circ\gamma$ is proven.) Now, the maps $\alpha$ and $\gamma$ are both injective (indeed, the map $\alpha$ is injective by Corollary 1.40). Hence, their composition $\alpha\circ\gamma$ is injective. In other words, the map $\psi$ is injective (since $\psi=\alpha\circ\gamma$). As we have seen above, this completes the proof of Theorem 1.45. $\blacksquare$ \item \textbf{page 427:} After \textquotedblleft Our next goal is to compute explicitly the algebra homomorphism $\phi$\textquotedblright, add \textquotedblleft inverse to $\psi$\textquotedblright. \item \textbf{page 427:} Replace \textquotedblleft Proposition 1.22 and Lemma 1.29\textquotedblright\ by \textquotedblleft Lemma 1.29 and Corollary 1.27a (specifically, the $E_{i}s_{ij}=s_{ij}E_{j}$ part of it)\textquotedblright. \item \textbf{page 428:} I'd replace \textquotedblleft The Wedderburn decomposition of $\mathbb{F}S_{n}$ shows that\textquotedblright\ by \textquotedblleft The surjectivity of $\psi$ in Theorem 1.45 shows that\textquotedblright\ (this is more concrete). \item \textbf{page 428, Definition 1.49:} This definition tacitly uses the fact that the $r_{ij}^{\lambda}\left( p\right)$ are uniquely determined by $p$, $\lambda$, $i$ and $j$. This follows from the fact that the family $\left( U_{ij}^{\lambda}\right)$ (with $\lambda$ ranging over all partitions of $n$ and with $i,j$ ranging over $\left\{ 1,2,\ldots,d_{\lambda }\right\}$ each) is a basis of the $\mathbb{F}$-vector space $\mathbb{F}% S_{n}$. (And this fact follows from Theorem 1.45, since the family $\left( E_{ij}^{\lambda}\right)$ forms a basis of $M$ and is sent to the family $\left( U_{ij}^{\lambda}\right)$ by the map $\psi$.) \item \textbf{page 428, Lemma 1.50:} It is worth saying that Lemma 1.50 is a consequence of Proposition 1.43. \item \textbf{page 429, proof of Proposition 1.51:} Replace both \textquotedblleft$\alpha$\textquotedblright s in this proof by \textquotedblleft$\alpha^{\lambda}$\textquotedblright. (It is dangerous to omit the superscript on an $\alpha$, since $\alpha$ already has a different meaning given to it in Definition 1.39.) \item \textbf{page 429, proof of Proposition 1.51:} Remove the \textquotedblleft write $\mathcal{E}=A_{\iota}^{\lambda}$ and\textquotedblright\ part of the first sentence of the proof. Instead, at the beginning of the proof, I'd add \textquotedblleft The matrix $A_{\iota }^{\lambda}$ is the matrix $\mathcal{E}^{\lambda}$ from Definition 1.31, and thus is invertible (by Lemma 1.32). We shall omit the superscripts $\lambda$, so we write $A_{p}$ for $A_{p}^{\lambda}$, and we write $\mathcal{E}$ for $\mathcal{E}^{\lambda}=A_{\iota}^{\lambda}=A_{\iota}$\textquotedblright. \item \textbf{page 429, proof of Proposition 1.51:} Replace \textquotedblleft We have\textquotedblright\ by \textquotedblleft Thus, $E_{ii}\mathcal{E}^{-1}$ is the $d_{\lambda}\times d_{\lambda}$-matrix whose $i$-th row has entries $\eta_{i1},\eta_{i2},\ldots,\eta_{id_{\lambda}}$ while all other rows are $0$. Therefore, the definition of $\alpha^{\lambda}$ yields% $\alpha^{\lambda}\left( E_{ii}\mathcal{E}^{-1}\right) =\sum_{k=1}% ^{d_{\lambda}}\eta_{ik}E_{i}s_{ik}.$ Similarly,% $\alpha^{\lambda}\left( E_{jj}\mathcal{E}^{-1}\right) =\sum_{\ell =1}^{d_{\lambda}}\eta_{j\ell}E_{j}s_{j\ell}.$ Hence,\textquotedblright. \item \textbf{page 429, proof of Proposition 1.51:} The second-to-last equality sign in the long (displayed) computation relies on the equality% $\sum_{\ell=1}^{d_{\lambda}}\eta_{j\ell}E_{i}s_{i\ell}=U_{ij},$ which is not completely obvious. Here is how it can be proved: The matrix $\left( \mathcal{E}^{\lambda}\right) ^{-1}=\mathcal{E}^{-1}$ has entries $\eta_{ij}$. Thus, $E_{ij}^{\lambda}\left( \mathcal{E}^{\lambda}\right) ^{-1}$ is the $d_{\lambda}\times d_{\lambda}$-matrix whose $i$-th row has entries $\eta_{j1},\eta_{j2},\ldots,\eta_{jd_{\lambda}}$ while all other rows are $0$. Therefore, the definition of $\alpha^{\lambda}$ yields% $\alpha^{\lambda}\left( E_{ij}^{\lambda}\left( \mathcal{E}^{\lambda}\right) ^{-1}\right) =\sum_{\ell=1}^{d_{\lambda}}\eta_{j\ell}E_{i}s_{i\ell}.$ Now, the definition of $U_{ij}$ yields% $U_{ij}=\alpha^{\lambda}\left( E_{ij}^{\lambda}\left( \mathcal{E}^{\lambda }\right) ^{-1}\right) =\sum_{\ell=1}^{d_{\lambda}}\eta_{j\ell}E_{i}s_{i\ell }.$ Thus, $\sum_{\ell=1}^{d_{\lambda}}\eta_{j\ell}E_{i}s_{i\ell}=U_{ij}$ is proven. \item \textbf{page 429, proof of Proposition 1.51:} The last equality sign in the long (displayed) computation relies on the equality% $\sum_{k=1}^{d_{\lambda}}\eta_{ik}\xi_{kj}^{p}=\left( A_{\iota}^{-1}% A_{p}\right) _{ij},$ which is not completely obvious. Here is how it can be proved: We have $A_{\iota}=\mathcal{E}$, so that $A_{\iota}^{-1}=\mathcal{E}^{-1}$. Thus, the entries of the matrix $A_{\iota}^{-1}$ are the entries of the matrix $\mathcal{E}^{-1}$, which are the scalars $\eta_{ij}$ (by the definition of $\eta_{ij}$). On the other hand, the entries of the matrix $A_{p}% =A_{p}^{\lambda}$ are $\xi_{ij}^{p}$ (by the definition of $A_{p}^{\lambda}$). Hence, the $\left( i,j\right)$-th entry of the matrix $A_{\iota}^{-1}A_{p}$ is $\sum_{k=1}^{d_{\lambda}}\eta_{ik}\xi_{kj}^{p}$ (by the definition of the product of two matrices). In other words, $\left( A_{\iota}^{-1}A_{p}\right) _{ij}=\sum_{k=1}^{d_{\lambda}}\eta_{ik}\xi_{kj}^{p}$. Thus, $\sum _{k=1}^{d_{\lambda}}\eta_{ik}\xi_{kj}^{p}=\left( A_{\iota}^{-1}A_{p}\right) _{ij}$ is proven. \item \textbf{page 429, proof of Proposition 1.51:} \textquotedblleft Therefore $r_{ij}^{\lambda}\left( p\right)$\textquotedblright% \ $\rightarrow$ \textquotedblleft Therefore, by Lemma 1.50 (and because $U_{ij}\neq0$), we obtain $r_{ij}^{\lambda}\left( p\right)$% \textquotedblright. \item \textbf{page 429:} It is worth explaining why exactly Proposition 1.51 provides an explicit way of computing the homomorphism $\phi$ in (11). Indeed, each $p\in S_{n}$ satisfies% \begin{align*} & \psi\left( R^{\lambda_{1}}\left( p\right) ,R^{\lambda_{2}}\left( p\right) ,\ldots,R^{\lambda_{r}}\left( p\right) \right) \\ & =\sum_{\lambda\vdash n}\psi\left( \underbrace{R^{\lambda}\left( p\right) }_{\substack{=\sum_{i=1}^{d_{\lambda}}\sum_{j=1}^{d_{\lambda}}r_{ij}^{\lambda }\left( p\right) E_{ij}^{\lambda}\\\text{(by the definition of }R^{\lambda }\left( p\right) \text{)}}}\right) =\sum_{\lambda\vdash n}\underbrace{\psi \left( \sum_{i=1}^{d_{\lambda}}\sum_{j=1}^{d_{\lambda}}r_{ij}^{\lambda }\left( p\right) E_{ij}^{\lambda}\right) }_{\substack{=\sum_{i=1}% ^{d_{\lambda}}\sum_{j=1}^{d_{\lambda}}r_{ij}^{\lambda}\left( p\right) U_{ij}^{\lambda}\\\text{(by the definition of }\psi\text{)}}}\\ & =\sum_{\lambda\vdash n}\sum_{i=1}^{d_{\lambda}}\sum_{j=1}^{d_{\lambda}% }r_{ij}^{\lambda}\left( p\right) U_{ij}^{\lambda}% =p\ \ \ \ \ \ \ \ \ \ \left( \text{by (13)}\right) \end{align*} and therefore $\left( R^{\lambda_{1}}\left( p\right) ,R^{\lambda_{2}% }\left( p\right) ,\ldots,R^{\lambda_{r}}\left( p\right) \right) =\psi^{-1}\left( p\right) =\phi\left( p\right)$ (since $\psi^{-1}=\phi$). Hence, by computing the matrices $R^{\lambda}\left( p\right)$ for all $\lambda\vdash n$, we can obtain an explicit formula for $\phi\left( p\right)$. \end{itemize} \end{document}