\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}% \usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage} \usepackage[all,cmtip]{xy} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{framed} \usepackage{amsmath} \usepackage{comment} \usepackage{color} \usepackage{hyperref} \usepackage[sc]{mathpazo} \usepackage[T1]{fontenc} \usepackage{amsthm} %TCIDATA{OutputFilter=latex2.dll} %TCIDATA{Version=5.50.0.2960} %TCIDATA{LastRevised=Monday, January 09, 2017 17:53:42} %TCIDATA{SuppressPackageManagement} %TCIDATA{} %TCIDATA{} %TCIDATA{BibliographyScheme=Manual} %BeginMSIPreambleData \providecommand{\U}{\protect\rule{.1in}{.1in}} %EndMSIPreambleData \theoremstyle{definition} \newtheorem{theo}{Theorem}[section] \newenvironment{theorem}[] {\begin{theo}[#1]\begin{leftbar}} {\end{leftbar}\end{theo}} \newtheorem{lem}[theo]{Lemma} \newenvironment{lemma}[] {\begin{lem}[#1]\begin{leftbar}} {\end{leftbar}\end{lem}} \newtheorem{prop}[theo]{Proposition} \newenvironment{proposition}[] {\begin{prop}[#1]\begin{leftbar}} {\end{leftbar}\end{prop}} \newtheorem{defi}[theo]{Definition} \newenvironment{definition}[] {\begin{defi}[#1]\begin{leftbar}} {\end{leftbar}\end{defi}} \newtheorem{remk}[theo]{Remark} \newenvironment{remark}[] {\begin{remk}[#1]\begin{leftbar}} {\end{leftbar}\end{remk}} \newtheorem{coro}[theo]{Corollary} \newenvironment{corollary}[] {\begin{coro}[#1]\begin{leftbar}} {\end{leftbar}\end{coro}} \newtheorem{conv}[theo]{Convention} \newenvironment{condition}[] {\begin{conv}[#1]\begin{leftbar}} {\end{leftbar}\end{conv}} \newtheorem{quest}[theo]{Question} \newenvironment{algorithm}[] {\begin{quest}[#1]\begin{leftbar}} {\end{leftbar}\end{quest}} \newtheorem{warn}[theo]{Warning} \newenvironment{conclusion}[] {\begin{warn}[#1]\begin{leftbar}} {\end{leftbar}\end{warn}} \newtheorem{conj}[theo]{Conjecture} \newenvironment{conjecture}[] {\begin{conj}[#1]\begin{leftbar}} {\end{leftbar}\end{conj}} \newtheorem{exmp}[theo]{Example} \newenvironment{example}[] {\begin{exmp}[#1]\begin{leftbar}} {\end{leftbar}\end{exmp}} \iffalse \newenvironment{proof}[Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}} \fi \newenvironment{verlong}{}{} \newenvironment{vershort}{}{} \newenvironment{noncompile}{}{} \excludecomment{verlong} \includecomment{vershort} \excludecomment{noncompile} \newcommand{\kk}{\mathbf{k}} \newcommand{\id}{\operatorname{id}} \newcommand{\ev}{\operatorname{ev}} \newcommand{\Comp}{\operatorname{Comp}} \newcommand{\bk}{\mathbf{k}} \newcommand{\Nplus}{\mathbb{N}_{+}} \newcommand{\NN}{\mathbb{N}} \newcommand{\Znp}{\mathbb{Z}_{\left(p\right)}} \newcommand{\Fpn}{\mathbb{F}_p^n} \let\sumnonlimits\sum \let\prodnonlimits\prod \renewcommand{\sum}{\sumnonlimits\limits} \renewcommand{\prod}{\prodnonlimits\limits} \setlength\textheight{22.5cm} \setlength\textwidth{15cm} \ihead{Errata to The Steinberg module and the Hecke algebra''} \ohead{\today} \begin{document} \begin{center} \textbf{The Steinberg module and the Hecke algebra} \textit{Neil P. Strickland} \url{https://neil-strickland.staff.shef.ac.uk/research/jordan.pdf} version of 2 May 2012 \textbf{Errata and addenda by Darij Grinberg} \bigskip \end{center} The list below contains corrections and comments to the preprint \textquotedblleft The Steinberg module and the Hecke algebra\textquotedblright% \ by Neil P. Strickland. The comments include alternative proofs and additional details (indeed, most of the comments below are of the latter kind, and they are the main reason why this list is so large). I have written this list while I was reading the preprint (over the course of several months\footnote{The preprint packs a whole lot of material into just 15 pages. Partly, I wish it would proceed more slowly and leave less work to the reader; the below comments fill in lots of details that are omitted.}); since I am not myself an expert in the subject, my comments are not always particularly learned (I suspect that many of the proofs I am giving below can be drastically simplified), and they are probably full of mistakes of their own. (I have tried to be detailed, partly in order to avoid mistakes.) I will refer to the results appearing in Strickland's preprint by the numbers under which they appear in it (specifically, in its version of 2 May 2012, available from \url{https://neil-strickland.staff.shef.ac.uk/research/jordan.pdf}). \setcounter{section}{10} \section*{Errata and addenda} \begin{itemize} \item \textbf{\S 2:} I think it would be better if you spent a bit of time defining some of your notations: \begin{itemize} \item For any nonnegative integer $n$, you let $\Sigma_{n}$ denote the symmetric group of the set $\left\{ 1,2,\ldots,n\right\}$. (This is not a notation I have seen very often. Most combinatorialists call it either $S_{n}$ or $\mathfrak{S}_{n}$ or $\mathcal{S}_{n}$.) \item The composition $\alpha\beta$ of two maps $\alpha:Y\rightarrow Z$ and $\beta:X\rightarrow Y$ is defined as the map $X\rightarrow Z$ that sends each $x\in X$ to $\alpha\left( \beta\left( x\right) \right)$. (This might sound obvious, but irritatingly, a lot of people use the opposite convention for the order of multiplication, particularly when permutations are concerned.) \item If $\alpha:X\rightarrow Y$ is a map, then $\alpha_{\ast}$ means the map $\mathcal{P}\left( X\right) \rightarrow\mathcal{P}\left( Y\right)$ canonically induced by $\alpha$ (where $\mathcal{P}\left( Z\right)$ denotes the powerset of a set $Z$). This is the map that sends every subset $T$ of $X$ to the subset $\alpha\left( T\right)$ of $Y$. \end{itemize} \item \textbf{\S 2:} Do you ever use the notation $L^{+}\left( \sigma\right)$ that you define in the beginning of \S 2? (I don't know for sure; just asking.) \item \textbf{Lemma 2.6:} Replace \textquotedblleft$\Sigma$\textquotedblright% \ by \textquotedblleft$\Sigma_{n}$\textquotedblright. \item \textbf{Lemma 2.6:} The period at the end of the sentence should be outside of the parentheses. \item \textbf{Proof of Proposition 2.11:} After \textquotedblleft are disjoint, and\textquotedblright, add \textquotedblleft we have $\sigma =t_{m_{n}}^{n}\tau$; thus, Lemma 2.6 (applied to $t_{m_{n}}^{n}$ instead of $\sigma$) yields% $\overline{L}\left( \sigma\right) =\overline{L}\left( \tau\right) \Delta\tau_{\ast}^{-1}\overline{L}\left( t_{m_{n}}^{n}\right) =\overline {L}\left( \tau\right) \sqcup\tau_{\ast}^{-1}\overline{L}\left( t_{m_{n}% }^{n}\right)$ and therefore\textquotedblright. \item \textbf{Proof of Proposition 2.11:} You have not proven the uniqueness that is claimed in Proposition 2.11. This is not a large gap to fill, and becomes obvious later on\footnote{Namely: In the proof of Proposition 2.13, you show that the map $\epsilon:X_{n}\rightarrow\Sigma_{n}$ is surjective. Since $\left\vert X_{n}\right\vert \leq n!=\left\vert \Sigma_{n}\right\vert$, this entails that the map $\epsilon$ also is injective. But this means precisely that no two distinct sequences $\left( m_{1},m_{2},\ldots ,m_{n}\right)$ with $1\leq m_{k}\leq k$ give rise to one and the same permutation $t_{m_{n}}^{n}t_{m_{n-1}}^{n-1}\cdots t_{m_{2}}^{2}t_{m_{1}}^{1}$. And this is exactly the uniqueness claim of Proposition 2.11.}; but I think it is worth at least briefly mentioning how it is proven. \item \textbf{Definition 2.12:} Replace \textquotedblleft there is a canonical map\textquotedblright\ by \textquotedblleft there is a canonical homomorphism\textquotedblright. \item \textbf{Proof of Proposition 2.13:} You are slightly abusing notation here: When you write \textquotedblleft$X_{n}=\bigcup_{m=1}^{n}t_{m}^{n}% X_{n-1}$\textquotedblright, you are implicitly suggesting that $\widetilde{\Sigma}_{n-1}$ can be embedded into $\widetilde{\Sigma}_{n}$. This is correct, but is not obvious until Proposition 2.13 is already proven (at which point it is not useful anymore). A-priori, it is plausible that some nontrivial elements of $\widetilde{\Sigma}_{n-1}$ would collapse to the identity upon adding the extra generator $s_{n-1}$ of $\widetilde{\Sigma}_{n}$ and the extra relations that come with it. Fortunately, the proof is easy to fix, by introducing a group homomorphism $\widetilde{\Sigma}_{n-1}\rightarrow\widetilde{\Sigma}_{n}$: Namely, observe that all the generators and the relations appearing in the definition of $\widetilde{\Sigma}_{n-1}$ also appear in the definition of $\widetilde{\Sigma }_{n}$ (along with one new generator $s_{n-1}$ and some new relation). Thus, there is a group homomorphism $\eta:\widetilde{\Sigma}_{n-1}\rightarrow \widetilde{\Sigma}_{n}$ sending $s_{i}\mapsto s_{i}$ for each $i\in\left\{ 1,2,\ldots,n-2\right\}$. Consider this $\eta$. Regard $\widetilde{\Sigma }_{n}$ as a right $\widetilde{\Sigma}_{n-1}$-set by having $\widetilde{\Sigma }_{n-1}$ act through $\eta$ (that is, set $xy=x\eta\left( y\right)$ for all $x\in\widetilde{\Sigma}_{n}$ and $y\in\widetilde{\Sigma}_{n-1}$). Then, $X_{n}=\bigcup_{m=1}^{n}t_{m}^{n}X_{n-1}$ is still correct (where the implied multiplication in $t_{m}^{n}X_{n-1}=\left\{ t_{m}^{n}x\ \mid\ x\in X_{n-1}\right\}$ is now to be understood as the $\widetilde{\Sigma}_{n-1}%$-action on $\widetilde{\Sigma}_{n}$). All the rest of the proof goes through unchanged, except for one simple modification (namely, \textquotedblleft% $\widetilde{\Sigma}_{n}$ is generated by $\widetilde{\Sigma}_{n-1}$ and $s_{n-1}$\textquotedblright\ must become \textquotedblleft$\widetilde{\Sigma }_{n}$ is generated by $\eta\left( \widetilde{\Sigma}_{n-1}\right)$ and $s_{n-1}$\textquotedblright). \item \textbf{Proof of Lemma 2.14:} Replace \textquotedblleft identity permutation\textquotedblright\ by \textquotedblleft identity of $\widetilde{\Sigma}_{n}$\textquotedblright. (The identification between permutations and elements $\widetilde{\Sigma}_{n}$ cannot yet be used at this point.) \item \textbf{Definition 2.15:} Replace \textquotedblleft$s_{1},\ldots,s_{n}%$\textquotedblright\ by \textquotedblleft$s_{1},\ldots,s_{n-1}$% \textquotedblright\ twice in this definition. \item \textbf{Definition 2.15:} Replace \textquotedblleft$us_{i}s_{j}% s_{i}v=us_{j}s_{i}s_{j}v$\textquotedblright\ by \textquotedblleft$us_{i}% s_{j}s_{i}v\sim us_{j}s_{i}s_{j}v$\textquotedblright. \item \textbf{Definition 2.15:} Replace \textquotedblleft$\Sigma$\textquotedblright\ by \textquotedblleft$\Sigma_{n}$\textquotedblright\ twice in this definition (the second time is inside the commutative diagram). Or just define $\Sigma$ to be an abbreviation for $\Sigma_{n}$ ? \item \textbf{Definition 2.15:} Please explain that $\sim$ is defined to be the disjoint union of the relations $\sim_{r}$ over all $r\in\mathbb{N}$. (This is a relation on $\coprod_{r}W_{r}=W$.) \item \textbf{Definition 2.17:} At the end of condition (c), add \textquotedblleft and the word $uv$ is reduced\textquotedblright. Otherwise, condition (c) would always hold! \item \textbf{Definition 2.17:} I think the justification for the equivalence of the four conditions would be clearer if you replaced \textquotedblleft and it follows from Lemma 2.6 that (a) is equivalent to (d)\textquotedblright\ by \textquotedblleft and it follows from Lemma 2.6 (applied to $\sigma\tau^{-1}$ instead of $\sigma$) that (b) is equivalent to (d)\textquotedblright. \item \textbf{Proof of Lemma 2.18:} Replace \textquotedblleft the $3$-cycle $\left( i,i+1,i+2\right)$\textquotedblright\ by \textquotedblleft the transposition $\left( i,i+2\right)$\textquotedblright. \item \textbf{Proof of Theorem 2.16:} I suspect the LaTeX here is slightly broken: You want to start the proof by \textquotedblleft\textit{Proof of Theorem 2.16.}\textquotedblright\ and not by \textquotedblleft\textit{Proof.} Proof of Theorem 2.16.\textquotedblright. \item \textbf{Proof of Theorem 2.16:} Replace \textquotedblleft so $u=v$\textquotedblright\ by \textquotedblleft so $u\sim v$\textquotedblright. \item \textbf{\S 2:} I suggest adding the following fact to \S 2 (which is used later, in \S 9): \textbf{Corollary 2.19.} \textbf{(a)} We have $l\left( \sigma^{-1}% \rho\right) =n\left( n-1\right) /2-l\left( \sigma\right)$ for each $\sigma\in\Sigma_{n}$. \textbf{(b)} We have $l\left( \sigma\rho\right) =n\left( n-1\right) /2-l\left( \sigma\right)$ for each $\sigma\in\Sigma_{n}$. \textbf{(c)} We have $l\left( \rho\sigma^{-1}\right) =n\left( n-1\right) /2-l\left( \sigma\right)$ for each $\sigma\in\Sigma_{n}$. \textbf{(d)} We have $l\left( \rho\sigma\right) =n\left( n-1\right) /2-l\left( \sigma\right)$ for each $\sigma\in\Sigma_{n}$. [\textit{Proof of Corollary 2.19.} \textbf{(c)} Recall the four equivalent conditions (a), (b), (c) and (d) in Definition 2.17. In particular, the two conditions (b) and (d) are equivalent for each $\sigma\in\Sigma_{n}$ and $\tau\in\Sigma_{n}$. In other words, for each $\sigma\in\Sigma_{n}$ and $\tau\in\Sigma_{n}$, we have the following equivalence:% \begin{equation} \left( l\left( \sigma\tau^{-1}\right) =l\left( \sigma\right) -l\left( \tau\right) \right) \ \Longleftrightarrow\ \left( \overline{L}\left( \tau\right) \subseteq\overline{L}\left( \sigma\right) \right) . \label{pf.c2.19.1}% \end{equation} Now, fix $\sigma\in\Sigma_{n}$. Then, $\overline{L}\left( \sigma\right) \subseteq\overline{L}\left( \rho\right)$\ \ \ \ \footnote{\textit{Proof.} Let $U\in\overline{L}\left( \sigma\right)$. Then, $U\in\overline{L}\left( \sigma\right) =\left\{ \left\{ i,j\right\} \ \mid\ \left( i,j\right) \in L\left( \sigma\right) \right\} =\left\{ \left\{ u,v\right\} \ \mid\ \left( u,v\right) \in L\left( \sigma\right) \right\}$. In other words, $U=\left\{ u,v\right\}$ for some $\left( u,v\right) \in L\left( \sigma\right)$. Fix this $\left( u,v\right)$. We have $0n+1-v=\rho \left( v\right)$. Thus, $0\rho\left( v\right)$. In other words, $\left( u,v\right)$ is an element $\left( i,j\right)$ such that $0\rho\left( j\right)$. In other words, $\left( u,v\right) \in\left\{ \left( i,j\right) \ \mid\ 0\rho\left( v\right) \right\}$. This rewrites as $\left( u,v\right) \in L\left( \rho\right)$ (since $L\left( \rho\right) =\left\{ \left( i,j\right) \ \mid\ 0\rho\left( v\right) \right\}$ (by the definition of $L\left( \rho\right)$)). Now, recall that $U=\left\{ u,v\right\}$. Hence, $U=\left\{ i,j\right\}$ for some $\left( i,j\right) \in L\left( \rho\right)$ (namely, for $\left( i,j\right) =\left( u,v\right)$). In other words, $U\in\left\{ \left\{ i,j\right\} \ \mid\ \left( i,j\right) \in L\left( \rho\right) \right\}$. This rewrites as $U\in\overline{L}\left( \rho\right)$ (since $\overline{L}\left( \rho\right) =\left\{ \left\{ i,j\right\} \ \mid\ \left( i,j\right) \in L\left( \rho\right) \right\}$ (by the definition of $\overline{L}\left( \rho\right)$)). \par Now, forget that we fixed $U$. We thus have shown that $U\in\overline {L}\left( \rho\right)$ for each $U\in\overline{L}\left( \sigma\right)$. In other words, $\overline{L}\left( \sigma\right) \subseteq\overline {L}\left( \rho\right)$, qed.}. But the equivalence (\ref{pf.c2.19.1}) (applied to $\rho$ and $\sigma$ instead of $\sigma$ and $\rho$) shows that% $\left( l\left( \rho\sigma^{-1}\right) =l\left( \rho\right) -l\left( \sigma\right) \right) \ \Longleftrightarrow\ \left( \overline{L}\left( \sigma\right) \subseteq\overline{L}\left( \rho\right) \right) .$ Therefore, we have $l\left( \rho\sigma^{-1}\right) =l\left( \rho\right) -l\left( \sigma\right)$ (since we have $\overline{L}\left( \sigma\right) \subseteq\overline{L}\left( \rho\right)$). Thus, $l\left( \rho\sigma ^{-1}\right) =\underbrace{l\left( \rho\right) }_{=n\left( n-1\right) /2}-l\left( \sigma\right) =n\left( n-1\right) /2-l\left( \sigma\right)$. This proves Corollary 2.19 \textbf{(c)}. \textbf{(d)} Let $\sigma\in\Sigma_{n}$. Corollary 2.19 \textbf{(c)} (applied to $\sigma^{-1}$ instead of $\sigma$) shows that $l\left( \rho\left( \sigma^{-1}\right) ^{-1}\right) =n\left( n-1\right) /2-l\left( \sigma^{-1}\right)$. Since $\left( \sigma^{-1}\right) ^{-1}=\sigma$, this rewrites as% $l\left( \rho\sigma\right) =n\left( n-1\right) /2-\underbrace{l\left( \sigma^{-1}\right) }_{\substack{=l\left( \sigma\right) \\\text{(by Lemma 2.3)}}}=n\left( n-1\right) /2-l\left( \sigma\right) .$ This proves Corollary 2.19 \textbf{(d)}. \textbf{(a)} Let $\sigma\in\Sigma_{n}$. Then, Lemma 2.3 (applied to $\rho\sigma$ instead of $\sigma$) shows that $l\left( \left( \rho \sigma\right) ^{-1}\right) =l\left( \rho\sigma\right) =n\left( n-1\right) /2-l\left( \sigma\right)$ (by Corollary 2.19 \textbf{(d)}). Since $\left( \rho\sigma\right) ^{-1}=\sigma^{-1}\underbrace{\rho^{-1}% }_{=\rho}=\sigma^{-1}\rho$, this rewrites as $l\left( \sigma^{-1}\rho\right) =n\left( n-1\right) /2-l\left( \sigma\right)$. This proves Corollary 2.19 \textbf{(a)}. \textbf{(b)} Let $\sigma\in\Sigma_{n}$. Then, Lemma 2.3 (applied to $\sigma\rho$ instead of $\sigma$) shows that $l\left( \left( \sigma \rho\right) ^{-1}\right) =l\left( \sigma\rho\right)$. Since $\left( \sigma\rho\right) ^{-1}=\underbrace{\rho^{-1}}_{=\rho}\sigma^{-1}=\rho \sigma^{-1}$, this rewrites as $l\left( \rho\sigma^{-1}\right) =l\left( \sigma\rho\right)$. Hence, $l\left( \sigma\rho\right) =l\left( \rho \sigma^{-1}\right) =n\left( n-1\right) /2-l\left( \sigma\right)$ (by Corollary 2.19 \textbf{(c)}). This proves Corollary 2.19 \textbf{(b)}. $\square$ ] \item \textbf{\S 3:} In the definition of $T$, a whitespace is missing between \textquotedblleft$ge_{i}\in\mathbb{F}_{p}e_{i}$\textquotedblright\ and \textquotedblleft for all $i$\textquotedblright. \item \textbf{\S 3:} Add a period before \textquotedblleft With this convention\textquotedblright. \item \textbf{\S 3:} After \textquotedblleft and $\left( g.x\right) _{i}=\sum_{j}g_{ij}x_{j}$\textquotedblright, add \textquotedblleft for $x=\left( x_{1},x_{2},\ldots,x_{n}\right) ^{T}$\textquotedblright. \item \textbf{\S 4:} Before Example 4.1, it would be good to say the following: \textquotedblleft We shall call $\delta\left( \underline{U}% ,\underline{V}\right)$ the \textit{Jordan permutation} of the flags $\underline{U}$ and $\underline{V}$.\textquotedblright. This way, the words \textquotedblleft Jordan permutation\textquotedblright\ (which are used in Definition 8.2) are actually defined. \item \textbf{\S 4:} It would also be useful to point out explicitly that $\delta\left( \underline{U},\underline{U}\right) =1$ for each $\underline{U}% \in\operatorname*{Flag}\left( W\right)$. This is very easy to prove (it is a corollary of Lemma 4.5, but it is also pretty easy to check using just the definition); but I think it's worth explicitly stating. \item \textbf{Example 4.1:} At the very beginning of this example, add the following sentence: \textquotedblleft Set $\underline{E}=\left( E_{0}% \operatorname*{lind}\left( x\right)$, then $\epsilon_{i}\left( x\right) =0$. \textbf{(b)} We have $\epsilon_{\operatorname*{lind}\left( x\right) }\left( x\right) \neq0$. [\textit{Proof of Lemma 5.2b:} \textbf{(a)} Let $i\in\left\{ 1,2,\ldots ,n\right\}$ be such that $i>\operatorname*{lind}\left( x\right)$. But $\operatorname*{lind}\left( x\right)$ is the highest $k\in\left\{ 1,2,\ldots,n\right\}$ satisfying $\epsilon_{k}\left( x\right) \neq0$ (by the definition of $\operatorname*{lind}\left( x\right)$). Hence, every $k\in\left\{ 1,2,\ldots,n\right\}$ satisfying $\epsilon_{k}\left( x\right) \neq0$ must satisfy $k\leq\operatorname*{lind}\left( x\right)$. Applying this to $k=i$, we conclude that if $\epsilon_{i}\left( x\right) \neq0$, then $i\leq\operatorname*{lind}\left( x\right)$. Hence, we cannot have $\epsilon_{i}\left( x\right) \neq0$ (since we cannot have $i\leq\operatorname*{lind}\left( x\right)$ (since we have $i>\operatorname*{lind}\left( x\right)$)). In other words, we have $\epsilon_{i}\left( x\right) =0$. This proves Lemma 5.2b \textbf{(a)}. \textbf{(b)} We know that $\operatorname*{lind}\left( x\right)$ is the highest $k\in\left\{ 1,2,\ldots,n\right\}$ satisfying $\epsilon_{k}\left( x\right) \neq0$ (by the definition of $\operatorname*{lind}\left( x\right)$). Hence, $\operatorname*{lind}\left( x\right)$ is a $k\in\left\{ 1,2,\ldots,n\right\}$ satisfying $\epsilon_{k}\left( x\right) \neq0$. Thus, $\epsilon_{\operatorname*{lind}\left( x\right) }\left( x\right) \neq0$. This proves Lemma 5.2b \textbf{(b)}. $\square$] \item \textbf{Lemma 5.2c.} Let $\left( w_{s}\right) _{s\in S}$ be a finite family of nonzero vectors in $\mathbb{F}_{p}^{n}$. Assume that the leading indices of the $w_{s}$ (for $s\in S$) are pairwise distinct. \textbf{(a)} Each nonzero vector $x\in\mathbb{F}_{p}\left\{ w_{s}\ \mid\ s\in S\right\}$ satisfies $\operatorname*{lind}\left( x\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\} .$ \textbf{(b)} The family $\left( w_{s}\right) _{s\in S}$ is $\mathbb{F}_{p}%$-linearly independent. \textbf{(c)} Let $\left( \lambda_{s}\right) _{s\in S}\in\mathbb{F}_{p}^{S}$ be a family of elements of $\mathbb{F}_{p}$. Assume that there exists at least one $s\in S$ satisfying $\lambda_{s}\neq0$. Set $x=\sum_{s\in S}\lambda _{s}w_{s}$. Then, $x\neq0$ and $\operatorname*{lind}\left( x\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\}$. [\textit{Proof of Lemma 5.2c:} \textbf{(c)} There exists at least one $s\in S$ satisfying $\lambda_{s}\neq0$. Among all these $s\in S$ satisfying $\lambda_{s}\neq0$, pick one for which $\operatorname*{lind}\left( w_{s}\right)$ is maximum, and denote this $s$ by $t$. Thus, $\lambda_{t}% \neq0$, and $\operatorname*{lind}\left( w_{t}\right)$ is the highest among the $\operatorname*{lind}\left( w_{s}\right)$ for all $s\in S$ satisfying $\lambda_{s}\neq0$. As a consequence,% \begin{equation} \text{every }s\in S\text{ satisfying }\lambda_{s}\neq0\text{ satisfies }\operatorname*{lind}\left( w_{s}\right) \leq\operatorname*{lind}\left( w_{t}\right) \label{pf.prop.5.2.lt-in-set.pf.leq}% \end{equation} (since $\operatorname*{lind}\left( w_{t}\right)$ is the highest among the $\operatorname*{lind}\left( w_{s}\right)$ for all $s\in S$ satisfying $\lambda_{s}\neq0$). Moreover, recall that the leading indices of the $w_{s}$ (for $s\in S$) are pairwise distinct. In other words,% \begin{equation} \text{every two distinct elements }p\text{ and }q\text{ of }S\text{ satisfy }\operatorname*{lind}\left( w_{p}\right) \neq\operatorname*{lind}\left( w_{q}\right) . \label{pf.prop.5.2.lt-in-set.pf.neq}% \end{equation} Now,% \begin{equation} \epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \lambda_{s}% w_{s}\right) =0\ \ \ \ \ \ \ \ \ \ \text{for every }s\in S\text{ satisfying }s\neq t \label{pf.prop.5.2.lt-in-set.pf.1}% \end{equation} \footnote{\textit{Proof of (\ref{pf.prop.5.2.lt-in-set.pf.1}):} Let $s\in S$ be such that $s\neq t$. Then, (\ref{pf.prop.5.2.lt-in-set.pf.neq}) (applied to $p=s$ and $q=t$) yields $\operatorname*{lind}\left( w_{s}\right) \neq\operatorname*{lind}\left( w_{t}\right)$. \par If $\lambda_{s}=0$, then $\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \underbrace{\lambda_{s}}_{=0}w_{s}\right) =\epsilon _{\operatorname*{lind}\left( w_{t}\right) }\left( 0w_{s}\right) =0$. Hence, if $\lambda_{s}=0$, then (\ref{pf.prop.5.2.lt-in-set.pf.1}) holds. Thus, for the rest of this proof of (\ref{pf.prop.5.2.lt-in-set.pf.1}), we WLOG asume that $\lambda_{s}\neq0$. Hence, (\ref{pf.prop.5.2.lt-in-set.pf.leq}% ) shows that $\operatorname*{lind}\left( w_{s}\right) \leq \operatorname*{lind}\left( w_{t}\right)$. Combining this with $\operatorname*{lind}\left( w_{s}\right) \neq\operatorname*{lind}\left( w_{t}\right)$, we obtain $\operatorname*{lind}\left( w_{s}\right) <\operatorname*{lind}\left( w_{t}\right)$. Thus, $\operatorname*{lind}% \left( w_{t}\right) >\operatorname*{lind}\left( w_{s}\right)$. Hence, Lemma 5.2b \textbf{(a)} (applied to $x=w_{s}$ and $i=\operatorname*{lind}% \left( w_{t}\right)$) yields $\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( w_{s}\right) =0$. Thus, $\epsilon _{\operatorname*{lind}\left( w_{t}\right) }\left( \lambda_{s}w_{s}\right) =\lambda_{s}\underbrace{\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( w_{s}\right) }_{=0}=0$. This proves (\ref{pf.prop.5.2.lt-in-set.pf.1}).}. Furthermore, if $i\in\left\{ 1,2,\ldots,n\right\}$ is such that $i>\operatorname*{lind}\left( w_{t}\right)$, then \begin{equation} \epsilon_{i}\left( \lambda_{s}w_{s}\right) =0\ \ \ \ \ \ \ \ \ \ \text{for every }s\in S \label{pf.prop.5.2.lt-in-set.pf.2}% \end{equation} \footnote{\textit{Proof of (\ref{pf.prop.5.2.lt-in-set.pf.2}):} Let $s\in S$. If $\lambda_{s}=0$, then $\epsilon_{i}\left( \underbrace{\lambda_{s}}% _{=0}w_{s}\right) =\epsilon_{i}\left( 0w_{s}\right) =0$. Hence, if $\lambda_{s}=0$, then (\ref{pf.prop.5.2.lt-in-set.pf.2}) holds. Thus, for the rest of this proof of (\ref{pf.prop.5.2.lt-in-set.pf.2}), we WLOG asume that $\lambda_{s}\neq0$. Hence, (\ref{pf.prop.5.2.lt-in-set.pf.leq}) shows that $\operatorname*{lind}\left( w_{s}\right) \leq\operatorname*{lind}\left( w_{t}\right) \operatorname*{lind}\left( w_{t}\right)$). Thus, $i>\operatorname*{lind}\left( w_{s}\right)$. Hence, Lemma 5.2b \textbf{(a)} (applied to $x=w_{s}$) yields $\epsilon_{i}\left( w_{s}\right) =0$. Thus, $\epsilon_{i}\left( \lambda_{s}w_{s}\right) =\lambda _{s}\underbrace{\epsilon_{i}\left( w_{s}\right) }_{=0}=0$. This proves (\ref{pf.prop.5.2.lt-in-set.pf.2}).}. If $i\in\left\{ 1,2,\ldots,n\right\}$ is such that $i>\operatorname*{lind}\left( w_{t}\right)$, then% \begin{align} \epsilon_{i}\left( \underbrace{x}_{=\sum_{s\in S}\lambda_{s}w_{s}}\right) & =\epsilon_{i}\left( \sum_{s\in S}\lambda_{s}w_{s}\right) =\sum_{s\in S}\underbrace{\epsilon_{i}\left( \lambda_{s}w_{s}\right) }% _{\substack{=0\\\text{(by (\ref{pf.prop.5.2.lt-in-set.pf.2}))}}}\nonumber\\ & =\sum_{s\in S}0=0. \label{pf.prop.5.2.lt-in-set.pf.4}% \end{align} But% \begin{align*} & \epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \underbrace{x}% _{=\sum_{s\in S}\lambda_{s}w_{s}}\right) \\ & =\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \sum_{s\in S}\lambda_{s}w_{s}\right) =\sum_{s\in S}\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \lambda_{s}w_{s}\right) \\ & =\underbrace{\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \lambda_{t}w_{t}\right) }_{=\lambda_{t}\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( w_{t}\right) }+\sum_{\substack{s\in S;\\s\neq t}}\underbrace{\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( \lambda_{s}w_{s}\right) }_{\substack{=0\\\text{(by (\ref{pf.prop.5.2.lt-in-set.pf.1}))}}}\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{here, we have split off the addend for }s=t\text{ from the sum}\right) \\ & =\lambda_{t}\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( w_{t}\right) +\underbrace{\sum_{\substack{s\in S;\\s\neq t}}0}_{=0}% =\underbrace{\lambda_{t}}_{\neq0}\underbrace{\epsilon_{\operatorname*{lind}% \left( w_{t}\right) }\left( w_{t}\right) }_{\substack{\neq0\\\text{(by Lemma 5.2b \textbf{(b)}, applied}\\\text{to }x=w_{t}\text{)}}}\neq0. \end{align*} Hence, $x\neq0$. It remains to show that $\operatorname*{lind}\left( x\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\}$. But $\operatorname*{lind}\left( w_{t}\right)$ is a $k\in\left\{ 1,2,\ldots,n\right\}$ satisfying $\epsilon_{k}\left( x\right) \neq0$ (since $\epsilon_{\operatorname*{lind}\left( w_{t}\right) }\left( x\right) \neq0$). Moreover, $\operatorname*{lind}\left( w_{t}\right)$ is the \textbf{highest} such $k$ (because any $i\in\left\{ 1,2,\ldots,n\right\}$ satisfying $i>\operatorname*{lind}\left( w_{t}\right)$ satisfies $\epsilon_{i}\left( x\right) =0$ (by (\ref{pf.prop.5.2.lt-in-set.pf.4}))). Thus, $\operatorname*{lind}\left( w_{t}\right)$ is the highest $k\in\left\{ 1,2,\ldots,n\right\}$ satisfying $\epsilon_{k}\left( x\right) \neq0$. In other words, $\operatorname*{lind}\left( w_{t}\right)$ is the leading index of $x$ (by the definition of the leading index). In other words, $\operatorname*{lind}\left( w_{t}\right) =\operatorname*{lind}\left( x\right)$. Thus,% $\operatorname*{lind}\left( x\right) =\operatorname*{lind}\left( w_{t}\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\} .$ This completes the proof of Lemma 5.2c \textbf{(c)}. \textbf{(b)} Let $\left( \lambda_{s}\right) _{s\in S}\in\mathbb{F}_{p}^{S}$ be a family of elements of $\mathbb{F}_{p}$ satisfying $\sum_{s\in S}% \lambda_{s}w_{s}=0$. Thus, $0=\sum_{s\in S}\lambda_{s}w_{s}$. Then, no $s\in S$ satisfies $\lambda_{s}\neq0$\ \ \ \ \footnote{\textit{Proof.} Assume the contrary. Thus, there exists at least one $s\in S$ satisfying $\lambda_{s}% \neq0$. Hence, Lemma 5.2c \textbf{(c)} (applied to $x=0$) yields $0\neq0$ and $\operatorname*{lind}\left( 0\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\}$. But $0\neq0$ is clearly absurd. Hence, we have obtained a contradiction. This shows that our assumption was wrong. Qed.}. In other words, every $s\in S$ satisfies $\lambda_{s}=0$. Now, forget that we fixed $\left( \lambda_{s}\right) _{s\in S}$. We thus have shown that if $\left( \lambda_{s}\right) _{s\in S}\in\mathbb{F}_{p}% ^{S}$ is a family of elements of $\mathbb{F}_{p}$ satisfying $\sum_{s\in S}\lambda_{s}w_{s}=0$, then every $s\in S$ satisfies $\lambda_{s}=0$. In other words, the family $\left( w_{s}\right) _{s\in S}$ is $\mathbb{F}_{p}%$-linearly independent. This proves Lemma 5.2c \textbf{(b)}. \textbf{(a)} Let $x\in\mathbb{F}_{p}\left\{ w_{s}\ \mid\ s\in S\right\}$ be a nonzero vector. We must prove that $\operatorname*{lind}\left( x\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\}$. We have $x\in\mathbb{F}_{p}\left\{ w_{s}\ \mid\ s\in S\right\}$. Hence, we can write $x$ in the form $x=\sum_{s\in S}\lambda_{s}w_{s}$ for some elements $\lambda_{s}$ of $\mathbb{F}_{p}$. Consider these $\lambda_{s}$. There exists at least one $s\in S$ satisfying $\lambda_{s}\neq0$% \ \ \ \ \footnote{\textit{Proof.} Assume the contrary. Thus, $\lambda_{s}=0$ for all $s\in S$. Now, $x=\sum_{s\in S}\underbrace{\lambda_{s}}_{=0}w_{s}% =\sum_{s\in S}0w_{s}=0$. This contradicts the fact that $x$ is nonzero. This contradiction shows that our assumption was wrong, qed.}. Thus, Lemma 5.2c \textbf{(c)} yields $x\neq0$ and $\operatorname*{lind}\left( x\right) \in\left\{ \operatorname*{lind}\left( w_{s}\right) \ \mid\ s\in S\right\}$. This proves Lemma 5.2c \textbf{(a)}. $\square$] \item \textbf{Lemma 5.2d.} If $\mathfrak{W}$ is a vector subspace of $\mathbb{F}_{p}^{n}$, then $\dim\mathfrak{W}=\left\vert \left\{ \operatorname*{lind}\left( x\right) \ \mid\ x\in\mathfrak{W}\setminus\left\{ 0\right\} \right\} \right\vert .$ [\textit{Proof of Lemma 5.2d (sketched).} Lemma 5.2d is well-known and not hard to prove. We shall only use it on one occasion, which is not central to our argument; thus, I shall only outline the proof. Define the \textit{energy} of a basis $\left( w_{1},w_{2},\ldots ,w_{k}\right)$ of $\mathfrak{W}$ to be the nonnegative integer $\operatorname*{lind}\left( w_{1}\right) +\operatorname*{lind}\left( w_{2}\right) +\cdots+\operatorname*{lind}\left( w_{k}\right)$. Then, there clearly exists a basis $\left( w_{1},w_{2},\ldots,w_{k}\right)$ having minimum energy. Fix such a basis\footnote{This argument is not constructive, but we could easily replace it by a constructive argument by induction.}. Then, no two among the elements $w_{1},w_{2},\ldots,w_{k}$ can have equal leading indices (because if $w_{i}$ and $w_{j}$ had equal leading indices for some $i$ and $j$, then we could replace $w_{j}$ by some linear combination $\alpha w_{i}+w_{j}$ with $\alpha\in\mathbb{F}_{p}$, and by choosing $\alpha$ and $\beta$ appropriately we would ensure that $\operatorname*{lind}\left( \alpha w_{i}+w_{j}\right) <\operatorname*{lind}\left( w_{j}\right)$, so that the resulting basis would have a smaller energy than $\left( w_{1}% ,w_{2},\ldots,w_{k}\right)$; but this would contradict our choice of $\left( w_{1},w_{2},\ldots,w_{k}\right)$ as the basis with minimum energy). Hence, the indices of the elements $w_{1},w_{2},\ldots,w_{k}$ are distinct. Thus, $\left\vert \left\{ \operatorname*{lind}\left( x\right) \ \mid \ x\in\mathfrak{W}\setminus\left\{ 0\right\} \right\} \right\vert \geq k=\dim\mathfrak{W}$. It remains to prove that $\dim\mathfrak{W}\geq\left\vert \left\{ \operatorname*{lind}\left( x\right) \ \mid\ x\in\mathfrak{W}% \setminus\left\{ 0\right\} \right\} \right\vert$. In order to do so, we assume the contrary. Thus, $\left\vert \left\{ \operatorname*{lind}\left( x\right) \ \mid\ x\in\mathfrak{W}\setminus\left\{ 0\right\} \right\} \right\vert >\dim\mathfrak{W}=k$. Hence, there exists some $x\in \mathfrak{W}\setminus\left\{ 0\right\}$ such that $\operatorname*{lind}% \left( x\right)$ equals none of $\operatorname*{lind}\left( w_{1}\right) ,\operatorname*{lind}\left( w_{2}\right) ,\ldots,\operatorname*{lind}\left( w_{k}\right)$. Consider the $x$. The $k+1$ nonzero vectors $w_{1}% ,w_{2},\ldots,w_{k},x$ in $\mathbb{F}_{p}^{n}$ have the property that their leading indices are pairwise distinct. Thus, Lemma 5.2c \textbf{(b)} shows that they are $\mathbb{F}_{p}$-linearly independent. Since these $k+1$ vectors all belong to $\mathfrak{W}$, we thus have found $k+1$ linearly independent vectors in $\mathfrak{W}$. But this contradicts the fact that $\dim \mathfrak{W}=kV_{i-1}\cap E_{\sigma\left( i\right) }+V_{i}\cap E_{\sigma\left( i\right) -1}\geq V_{i}\cap E_{\sigma\left( i\right) -1}. \label{pf.prop.5.2.alt}% \end{equation} Now,$\epsilon_{\sigma\left( i\right) }\mid_{V_{i}\cap E_{\sigma\left( i\right) }}\neq0$\ \ \ \ \footnote{\textit{Proof.} Assume the contrary. Thus,$\epsilon_{\sigma\left( i\right) }\mid_{V_{i}\cap E_{\sigma\left( i\right) }}=0$. \par Fix$x\in V_{i}\cap E_{\sigma\left( i\right) }$. Then,$x\in V_{i}\cap E_{\sigma\left( i\right) }\subseteq E_{\sigma\left( i\right) }% =\mathbb{F}_{p}\left\{ e_{1},e_{2},\ldots,e_{\sigma\left( i\right) }\right\} $. Also,$x\in V_{i}\cap E_{\sigma\left( i\right) }$, so that$\epsilon_{\sigma\left( i\right) }\left( x\right) =\underbrace{\left( \epsilon_{\sigma\left( i\right) }\mid_{V_{i}\cap E_{\sigma\left( i\right) }}\right) }_{=0}\left( x\right) =0$. In other words, the$\sigma\left( i\right) $-th coordinate of the vector$x$is$0$. Combining this with$x\in\mathbb{F}_{p}\left\{ e_{1},e_{2},\ldots,e_{\sigma\left( i\right) }\right\} $, we conclude$x\in\mathbb{F}_{p}\left\{ e_{1},e_{2}% ,\ldots,e_{\sigma\left( i\right) -1}\right\} =E_{\sigma\left( i\right) -1}$. Combining$x\in V_{i}\cap E_{\sigma\left( i\right) }\subseteq V_{i}$with$x\in E_{\sigma\left( i\right) -1}$, we find$x\in V_{i}\cap E_{\sigma\left( i\right) -1}$. \par Now, forget that we fixed$x$. We thus have proven that$x\in V_{i}\cap E_{\sigma\left( i\right) -1}$for each$x\in V_{i}\cap E_{\sigma\left( i\right) }$. In other words,$V_{i}\cap E_{\sigma\left( i\right) }\subseteq V_{i}\cap E_{\sigma\left( i\right) -1}$. Hence,$V_{i}\cap E_{\sigma\left( i\right) -1}$is not a proper subset of$V_{i}\cap E_{\sigma\left( i\right) }$. This contradicts (\ref{pf.prop.5.2.alt}). This contradiction shows that our assumption was wrong, qed.}. Hence, the$\mathbb{F}_{p}$-linear map$\epsilon_{\sigma\left( i\right) }\mid_{V_{i}\cap E_{\sigma\left( i\right) }}:V_{i}\cap E_{\sigma\left( i\right) }\rightarrow\mathbb{F}_{p}$has rank$\geq1$, and therefore must be surjective (since its target is the$1$-dimensional$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}$). Therefore, there exists some$x\in V_{i}\cap E_{\sigma\left( i\right) }$satisfying$\left( \epsilon_{\sigma\left( i\right) }\mid_{V_{i}\cap E_{\sigma\left( i\right) }}\right) \left( x\right) =1$. Consider this$x$. We have$\epsilon_{\sigma\left( i\right) }\left( x\right) =\left( \epsilon_{\sigma\left( i\right) }\mid_{V_{i}\cap E_{\sigma\left( i\right) }}\right) \left( x\right) =1$. Furthermore,$x\in V_{i}\cap E_{\sigma \left( i\right) }\subseteq E_{\sigma\left( i\right) }=S_{i}\oplus T_{i}$. In other words, there exist$y\in S_{i}$and$z\in T_{i}$such that$x=y+z$. Consider these$y$and$z$. We have$x\in V_{i}\cap E_{\sigma\left( i\right) }\leq V_{i}$and$y\in S_{i}\leq V_{i}\cap E_{\sigma\left( i\right) }\leq V_{i}$. Now,$x=y+z$, so that$z=x-y\in V_{i}$(since$x\in V_{i}$and$y\in V_{i}$, and since$V_{i}$is an$\mathbb{F}_{p}$-vector space). Combining this with$z\in T_{i}$, we obtain$z\in V_{i}\cap T_{i}$. For each$j\sigma\left( i\right) $. But the$\sigma\left( i\right) $-th coordinate of$z$is$\epsilon_{\sigma\left( i\right) }\left( z\right) =1\neq0$. Combining the preceding two sentences, we conclude that the leading index of$z$is$\sigma\left( i\right) $. In other words,$\operatorname*{lind}\left( z\right) =\sigma\left( i\right) $. Hence,$\sigma\left( i\right) =\operatorname*{lind}\left( z\right) \in\left\{ \sigma\left( j\right) \ \mid\ jV_{i-1}$(since$z\in\mathbb{F}_{p}z\subseteq V_{i-1}% +\mathbb{F}_{p}z$but$z\notin V_{i-1}$). Hence,$\dim\left( V_{i-1}% +\mathbb{F}_{p}z\right) >\dim\left( V_{i-1}\right) =i-1$. Since$\dim\left( V_{i-1}+\mathbb{F}_{p}z\right) $and$i-1$are integers, this entails that$\dim\left( V_{i-1}+\mathbb{F}_{p}z\right) \geq\left( i-1\right) +1=i=\dim\left( V_{i}\right) $. Furthermore,$\underbrace{V_{i-1}}_{\leq V_{i}}+\underbrace{\mathbb{F}_{p}z}% _{\substack{\leq V_{i}\\\text{(since }z\in V_{i}\text{)}}}\leq V_{i}% +V_{i}=V_{i}$. \par Now, it is well-known that if$\mathfrak{U}$is a subspace of a finite-dimensional vector space$\mathfrak{V}$, and if$\dim\mathfrak{U}% \geq\dim\mathfrak{V}$, then$\mathfrak{U}=\mathfrak{V}$. Applying this to$\mathfrak{U}=V_{i-1}+\mathbb{F}_{p}z$and$\mathfrak{V}=V_{i}$, we obtain$V_{i-1}+\mathbb{F}_{p}z=V_{i}$(since$V_{i-1}+\mathbb{F}_{p}z$is a subspace of$V_{i}$and satisfies$\dim\left( V_{i-1}+\mathbb{F}_{p}z\right) \geq \dim\left( V_{i}\right) $). Now,% $\mathbb{F}_{p}\left\{ v_{1},v_{2},\ldots,v_{i-1},z\right\} =\underbrace{\mathbb{F}_{p}\left\{ v_{1},v_{2},\ldots,v_{i-1}\right\} }_{\substack{=V_{i-1}\\\text{(since }\left( v_{1},v_{2},\ldots,v_{i-1}% \right) \\\text{is a basis for }V_{i-1}\text{)}}}+\mathbb{F}_{p}% z=V_{i-1}+\mathbb{F}_{p}z=V_{i}.$ Hence, the list$\left( v_{1},v_{2},\ldots,v_{i-1},z\right) $spans the$\mathbb{F}_{p}$-vector space$V_{i}$. Since the size$i$of this list equals the dimension of$V_{i}$(because$\dim\left( V_{i}\right) =i$), this shows that the list$\left( v_{1},v_{2},\ldots,v_{i-1},z\right) $is a basis for$V_{i}$. Qed.}. Thus, we have shown that$z$is an element of$V_{i}\cap T_{i}$such that$\epsilon_{\sigma\left( i\right) }\left( z\right) =1$and moreover that$v_{1},v_{2},\ldots,v_{i-1},z$is a basis for$V_{i}$over$\mathbb{F}_{p}$. Hence, there is an element$v_{i}\in V_{i}\cap T_{i}$such that$\epsilon _{\sigma\left( i\right) }\left( v_{i}\right) =1$and moreover that$v_{1},v_{2},\ldots,v_{i}$is a basis for$V_{i}$over$\mathbb{F}_{p}$(namely,$v_{i}=z$). This completes our proof. (As I have said, the uniqueness of this$v_{i}$is not proven here, but it is not needed in your argument either.)$\square$\item \textbf{Proof of Proposition 5.2:} Before the words \textquotedblleft Now define$g$\textquotedblright, add the following sentences: \textquotedblleft Notice that, for each$i\in\left\{ 1,2,\ldots,n\right\} $, the leading term of$v_{i}$is$e_{\sigma\left( i\right) }$(because$v_{i}\in T_{i}\leq E_{\sigma\left( i\right) }=\mathbb{F}_{p}\left\{ e_{1},e_{2},\ldots,e_{\sigma\left( i\right) }\right\} $has its$\sigma\left( i\right) $-th coordinate equal to$\epsilon_{\sigma\left( i\right) }\left( v_{i}\right) =1$). Hence, for each$i\in\left\{ 1,2,\ldots,n\right\} $, the leading term of$v_{\sigma^{-1}\left( i\right) }$is$e_{i}$.\textquotedblright \item \textbf{Proof of Proposition 5.2:} Replace \textquotedblleft Now define$g$\textquotedblright\ by \textquotedblleft Now define an$\mathbb{F}_{p}% $-linear map$g$\textquotedblright. \item \textbf{Proof of Proposition 5.2:} Replace \textquotedblleft%$\mathbb{F}_{p}\left\{ e_{\sigma\left( k\right) },e_{\sigma\left( k+1\right) },\ldots,e_{\sigma\left( m\right) }\right\} $\textquotedblright% \ by \newline\textquotedblleft$\mathbb{F}_{p}\left\{ e_{\sigma\left( k\right) },e_{\sigma\left( k+1\right) },\ldots,e_{\sigma\left( n\right) }\right\} $\textquotedblright. \item \textbf{Proof of Proposition 5.2:} Replace \textquotedblleft so$\sigma^{-1}g\sigma$is a lower-triangular matrix\textquotedblright\ by: \textquotedblleft so$\sigma^{-1}g\sigma\left( e_{k}\right) \in \mathbb{F}_{p}\left\{ e_{k},e_{k+1},\ldots,e_{n}\right\} $. Hence,$\sigma^{-1}g\sigma$is a lower-triangular matrix\textquotedblright. \item \textbf{Proof of Proposition 5.2:} Replace \textquotedblleft so$g\in U^{\rho\sigma^{-1}}$\textquotedblright\ by \textquotedblleft so$g\in U^{\rho\sigma^{-1}}=U^{\left( \sigma\rho\right) ^{-1}}$\textquotedblright. \item \textbf{Proof of Proposition 5.2:} Replace \textquotedblleft and$\phi\left( g\right) =\underline{V}$\textquotedblright\ by \textquotedblleft and$\phi\left( g\right) =g\sigma\left( \underline{E}\right) =\underline{V}$\textquotedblright. \item \textbf{Example 5.3:} Replace \textquotedblleft$g=\left[ \begin{array} [c]{ccccc}% 1 & 0 & 0 & b & a\\ 0 & 1 & 0 & d & c\\ 0 & 0 & 1 & f & e\\ 0 & 0 & 0 & 1 & g\\ 0 & 0 & 0 & 0 & 1 \end{array} \right] $\textquotedblright\ by \textquotedblleft$g=\left[ \begin{array} [c]{ccccc}% 1 & 0 & 0 & e & a\\ 0 & 1 & 0 & f & b\\ 0 & 0 & 1 & g & c\\ 0 & 0 & 0 & 1 & d\\ 0 & 0 & 0 & 0 & 1 \end{array} \right] $\textquotedblright. (Otherwise, the equation after it wouldn't be true.) \item \textbf{Example 5.3:} Rename \textquotedblleft$g$\textquotedblright\ as \textquotedblleft$h$\textquotedblright\ in the contexts \textquotedblleft%$g=$\textquotedblright, \textquotedblleft For such$g$\textquotedblright, \textquotedblleft$g\sigma=$\textquotedblright\ and \textquotedblleft%$\phi\left( g\right) $\textquotedblright. (In fact, the notation \textquotedblleft$g$\textquotedblright\ here clashes with the notation \textquotedblleft$g$\textquotedblright\ for the$\left( 4,5\right) $-th entry of the matrix$g$.) \item \textbf{Corollary 5.4:} I think you should define what you mean by \textquotedblleft isomorphism\textquotedblright\ here. Namely, an \textit{isomorphism} from a triple$\left( V,\underline{U},\underline{W}% \right) $(where$V$is an$n$-dimensional$\mathbb{F}_{p}$-vector space, and$\underline{U}$and$\underline{W}$are two complete flags in$V$) to a triple$\left( V^{\prime},\underline{U^{\prime}},\underline{W^{\prime}}\right) $(where$V^{\prime}$is an$n$-dimensional$\mathbb{F}_{p}$-vector space, and$\underline{U^{\prime}}$and$\underline{W^{\prime}}$are two complete flags in$V^{\prime}$) means an isomorphism$\phi:V\rightarrow V^{\prime}$of$\mathbb{F}_{p}$-vector spaces satisfying$\phi U=U^{\prime}$and$\phi W=W^{\prime}$. \item \textbf{Corollary 5.4:} Replace \textquotedblleft$\Sigma$% \textquotedblright\ by \textquotedblleft$\Sigma_{n}$\textquotedblright. \item \textbf{Corollary 5.4:} Replace \textquotedblleft if and only iff\textquotedblright\ by \textquotedblleft if and only if\textquotedblright% \ (or by \textquotedblleft iff\textquotedblright). \item \textbf{Proof of Corollary 5.4:} Replace \textquotedblleft a pair as above\textquotedblright\ by \textquotedblleft a triple as above\textquotedblright. \item \textbf{Proof of Corollary 5.4:} Replace \textquotedblleft by$f\left( a\right) =\sum_{i}a_{i}w_{i}$\textquotedblright\ by \textquotedblleft by$f\left( a_{1},a_{2},\ldots,a_{n}\right) =\sum_{i}a_{i}w_{i}$% \textquotedblright. \item \textbf{Proof of Corollary 5.4:} Replace \textquotedblleft so$\underline{F}=x\sigma\underline{E}$\textquotedblright\ by: \textquotedblleft. Since the map$X\left( \sigma\right) \rightarrow Y\left( \sigma\right) ,\ g\mapsto g\sigma\underline{E}$is a bijection (by Proposition 5.2), we thus see that$\underline{F}=x\sigma\underline{E}$\textquotedblright. \item \textbf{Proof of Corollary 5.4:} At the end of this proof, add the following sentence: \textquotedblleft Hence,$\left( \mathbb{F}_{p}% ^{n},\sigma\underline{E},\underline{E}\right) \simeq\left( \mathbb{F}% _{p}^{n},\underline{F},\underline{E}\right) \simeq\left( V,\underline{U}% ,\underline{W}\right) $(since the map$f:\mathbb{F}_{p}^{n}\rightarrow V$is an isomorphism$\left( \mathbb{F}_{p}^{n},\underline{F},\underline{E}\right) \rightarrow\left( V,\underline{U},\underline{W}\right) $).\textquotedblright \item \textbf{Proof of Corollary 5.5:} After \textquotedblleft such bases exist iff$\delta\left( \underline{U},\underline{W}\right) =\sigma $\textquotedblright, add \textquotedblleft(because the first claim of Corollary 5.4 shows that$\left( \mathbb{F}_{p}^{n},\sigma\underline{E}% ,\underline{E}\right) \cong\left( V,\underline{U},\underline{W}\right) $holds if and only if$\delta\left( \underline{U},\underline{W}\right) =\delta\left( \sigma\underline{E},\underline{E}\right) $; but in light of Example 4.1 this condition rewrites as$\delta\left( \underline{U}% ,\underline{W}\right) =\sigma$)\textquotedblright. \item \textbf{Proof of Corollary 5.5:} I think the last sentence of this proof would be better off taken out into a separate result: \textbf{Lemma 5.5a.} Let$\sigma\in\Sigma_{n}$. Then,$\left\vert B^{\sigma }\cap B\right\vert =\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}% \rho\right) }$. [\textit{Proof of Lemma 5.5a.} The definition of$X\left( \sigma^{-1}% \rho\right) yields% \begin{align*} X\left( \sigma^{-1}\rho\right) & =U\cap U^{\left( \sigma^{-1}\rho \rho\right) ^{-1}}\\ & =U\cap U^{\sigma}\ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( \sigma^{-1}\underbrace{\rho\rho}_{=1}\right) ^{-1}=\left( \sigma ^{-1}\right) ^{-1}=\sigma\right) \\ & =U^{\sigma}\cap U. \end{align*} But the last sentence of Lemma 5.1 (applied to\sigma^{-1}\rho$instead of$\sigma$) yields$\left\vert X\left( \sigma^{-1}\rho\right) \right\vert =p^{l\left( \sigma^{-1}\rho\right) }$. Consider the short exact sequence% $1\longrightarrow U^{\sigma}\cap U\longrightarrow B^{\sigma}\cap B\longrightarrow T\longrightarrow1,$ where the arrow$U^{\sigma}\cap U\longrightarrow B^{\sigma}\cap B$is the canonical inclusion, and where the arrow$B^{\sigma}\cap B\longrightarrow T$is the map that replaces all off-diagonal entries of a matrix$g\in B^{\sigma }\cap B$by$0$.\ \ \ \ \footnote{This is indeed a short exact sequence, because the matrices$g\in B^{\sigma}\cap B$whose diagonal entries all equal$1$are exactly the elements of$U^{\sigma}\cap U$. It is actually a split extension, since the arrow$B^{\sigma}\cap B\longrightarrow T$is split by the canonical inclusion$T\longrightarrow B^{\sigma}\cap B.} This short exact sequence shows that% \begin{align} \left\vert B^{\sigma}\cap B\right\vert & =\left\vert T\right\vert \cdot\left\vert \underbrace{U^{\sigma}\cap U}_{=X\left( \sigma^{-1}% \rho\right) }\right\vert =\left\vert T\right\vert \cdot\underbrace{\left\vert X\left( \sigma^{-1}\rho\right) \right\vert }_{=p^{l\left( \sigma^{-1}% \rho\right) }}=\underbrace{\left\vert T\right\vert }_{=\left( p-1\right) ^{n}}\cdot p^{l\left( \sigma^{-1}\rho\right) }\label{l5.5a.pf.1}\\ & =\left( p-1\right) ^{n}\cdot p^{l\left( \sigma^{-1}\rho\right) }.\nonumber \end{align} Thus, Lemma 5.5a is proven.\square$] \item \textbf{Proof of Proposition 6.1:} Before \textquotedblleft Moreover, if\textquotedblright, add the following sentence: \textquotedblleft Thus,$\pi\left( BgB\right) =\left\{ \pi\left( g\right) \right\} $for each$g\in G$.\textquotedblright. \item \textbf{Proof of Proposition 6.1:} Replace \textquotedblleft from which it follows directly that$\pi\left( \sigma\right) =\sigma$\textquotedblright% \ by \textquotedblleft and the definition of$\pi$yields$\pi\left( \sigma\right) =\delta\left( \sigma\underline{E},\underline{E}\right) =\sigma$(by Example 4.1)\textquotedblright. \item \textbf{Proof of Proposition 6.1:} After \textquotedblleft This means that$\pi\left( B\sigma B\right) =\left\{ \sigma\right\} $% .\textquotedblright, add \textquotedblleft Hence,$B\sigma B\subseteq\pi ^{-1}\left\{ \sigma\right\} $.\textquotedblright. \item \textbf{Proof of Proposition 6.1:} Replace \textquotedblleft Conversely, suppose that$\pi\left( h\right) =\sigma$.\textquotedblright\ by \textquotedblleft Conversely, let$h\in\pi^{-1}\left\{ \sigma\right\} $. Thus,$h\in G$and$\pi\left( h\right) =\sigma$. Hence,$\sigma=\pi\left( h\right) =\delta\left( h\underline{E},\underline{E}\right) $, so that$h\underline{E}\in Y\left( \sigma,\underline{E}\right) =Y\left( \sigma\right) $. Hence,\textquotedblright. \item \textbf{Proof of Proposition 6.1:} Replace \textquotedblleft Propsition\textquotedblright\ by \textquotedblleft Proposition\textquotedblright. \item \textbf{Proof of Proposition 6.1:} After \textquotedblleft we find that$b\in B$\textquotedblright, add \textquotedblleft(since$b\underline{E}% =\left( g\sigma\right) ^{-1}\underbrace{h\underline{E}}_{=g\sigma \underline{E}}=\left( g\sigma\right) ^{-1}g\sigma\underline{E}% =\underline{E}$)\textquotedblright. \item \textbf{Proof of Proposition 6.1:} After \textquotedblleft shows that$h\in B\sigma B$\textquotedblright, add \textquotedblleft(since$g\in X\left( \sigma\right) \leq U\leq B$and$b\in B$). Hence, we have shown that$\pi^{-1}\left\{ \sigma\right\} \subseteq B\sigma B$(since$h$was assumed to be any element of$G$satisfying$\pi\left( h\right) =\sigma $)\textquotedblright. \item \textbf{Proof of Proposition 6.1:} Replace \textquotedblleft we see that$g\sigma\underline{E}=g^{\prime}\sigma\underline{E}$\textquotedblright\ by \textquotedblleft and$g\sigma b=h=g^{\prime}\sigma b^{\prime}$, we see that$g\sigma\underbrace{\underline{E}}_{=b\underline{E}}=\underbrace{g\sigma b}_{=g^{\prime}\sigma b^{\prime}}\underline{E}=g^{\prime}\sigma \underbrace{b^{\prime}\underline{E}}_{=\underline{E}}=g^{\prime}% \sigma\underline{E}$\textquotedblright. \item \textbf{Proof of Corollary 6.2:} The claim \textquotedblleft$B=T\times U$\textquotedblright\ is wrong, or at least seriously misleading (the group$B$is not a direct product of$T$and$U$). I would replace the whole sentence containing it by \textquotedblleft Applying (\ref{l5.5a.pf.1}) to$\rho\sigma^{-1}$instead of$\sigma$, we obtain$\left\vert B^{\rho \sigma^{-1}}\cap B\right\vert =\left\vert T\right\vert \cdot p^{l\left( \left( \rho\sigma^{-1}\right) ^{-1}\rho\right) }$. Since$B^{\rho \sigma^{-1}}\cap B=B\cap B^{\rho\sigma^{-1}}$and$\left( \rho\sigma ^{-1}\right) ^{-1}\rho=\sigma$, this rewrites as$\left\vert B\cap B^{\rho\sigma^{-1}}\right\vert =\left\vert T\right\vert \cdot p^{l\left( \sigma\right) }$. \item \textbf{Proof of Corollary 6.2:} After the displayed equation \textquotedblleft$\left\vert B\sigma B\right\vert =p^{l\left( \sigma\right) }\left\vert B\right\vert =\left\vert U\right\vert \left\vert T\right\vert p^{l\left( \sigma\right) }$\textquotedblright, add \textquotedblleft(since the short exact sequence$1\longrightarrow U\longrightarrow B\longrightarrow T\longrightarrow1$yields$\left\vert B\right\vert =\left\vert U\right\vert \left\vert T\right\vert $)\textquotedblright. \item \textbf{Proof of Corollary 6.3:} Before \textquotedblleft We have a bijection\textquotedblright, add \textquotedblleft Proposition 6.1 (applied to$\sigma^{-1}$instead of$\sigma$) shows that\textquotedblright. \item \textbf{Proof of Corollary 6.3:} Replace \textquotedblleft given by$\phi\left( g,b\right) =g\sigma b$\textquotedblright\ by \textquotedblleft given by$\phi\left( g,b\right) =g\sigma^{-1}b$\textquotedblright. \item \textbf{Proof of Proposition 6.4:} The statement in the first sentence of your proof is worth stating as a separate lemma: \textbf{Lemma 2.6a.} Let$\sigma,\tau\in\Sigma_{n}$. Then, the following two conditions are equivalent: \textit{Condition }$\mathcal{C}_{1}$\textit{:} We have$l\left( \sigma \tau\right) =l\left( \sigma\right) +l\left( \tau\right) $. \textit{Condition$\mathcal{C}$}$_{2}$\textit{:} For any$T\subseteq\left\{ 1,2,\ldots,n\right\} $with$\left\vert T\right\vert =2$, at most one of the two maps$T\overset{\tau}{\longrightarrow}\tau\left( T\right) \overset{\sigma}{\longrightarrow}\sigma\tau\left( T\right) $is order-reversing. [\textit{Proof of Lemma 2.6a.} Let us prove the implications$\mathcal{C}% _{1}\Longrightarrow\mathcal{C}_{2}$and$\mathcal{C}_{2}\Longrightarrow \mathcal{C}_{1}$separately. \textit{Proof of the implication }$\mathcal{C}_{1}\Longrightarrow \mathcal{C}_{2}$\textit{:} Assume that Condition$\mathcal{C}_{1}$holds. In other words, we have$l\left( \sigma\tau\right) =l\left( \sigma\right) +l\left( \tau\right) $. Lemma 2.6 shows that$\overline{L}\left( \sigma\tau\right) =\overline {L}\left( \tau\right) \Delta\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) $. Also,$l\left( \sigma\right) =\left\vert L\left( \sigma\right) \right\vert =\left\vert \overline{L}\left( \sigma\right) \right\vert $and similarly$l\left( \tau\right) =\left\vert \overline {L}\left( \tau\right) \right\vert $and$l\left( \sigma\tau\right) =\left\vert \overline{L}\left( \sigma\tau\right) \right\vert . Now,% \begin{align*} \left\vert \overline{L}\left( \tau\right) \right\vert +\underbrace{\left\vert \tau_{\ast}^{-1}\overline{L}\left( \sigma\right) \right\vert }_{\substack{=\left\vert \overline{L}\left( \sigma\right) \right\vert \\\text{(since }\tau_{\ast}\text{ is a bijection)}}} & =\underbrace{\left\vert \overline{L}\left( \tau\right) \right\vert }_{=l\left( \tau\right) }+\underbrace{\left\vert \overline{L}\left( \sigma\right) \right\vert }_{=l\left( \sigma\right) }=l\left( \tau\right) +l\left( \sigma\right) =l\left( \sigma\right) +l\left( \tau\right) \\ & =l\left( \sigma\tau\right) =\left\vert \underbrace{\overline{L}\left( \sigma\tau\right) }_{=\overline{L}\left( \tau\right) \Delta\tau_{\ast}% ^{-1}\overline{L}\left( \sigma\right) }\right\vert =\left\vert \overline {L}\left( \tau\right) \Delta\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) \right\vert . \end{align*} But a simple and fundamental fact states that ifA$and$B$are two finite sets satisfying$\left\vert A\right\vert +\left\vert B\right\vert =\left\vert A\Delta B\right\vert $, then$A\cap B=\varnothing$. Applying this to$A=\overline{L}\left( \tau\right) $and$B=\tau_{\ast}^{-1}\overline {L}\left( \sigma\right) $, we find that$\overline{L}\left( \tau\right) \cap\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) =\varnothing$(since$\left\vert \overline{L}\left( \tau\right) \right\vert +\left\vert \tau_{\ast}^{-1}\overline{L}\left( \sigma\right) \right\vert =\left\vert \overline{L}\left( \tau\right) \Delta\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) \right\vert $). Now, let$T\subseteq\left\{ 1,2,\ldots,n\right\} $with$\left\vert T\right\vert =2$. We shall show that at most one of the two maps$T\overset{\tau}{\longrightarrow}\tau\left( T\right) \overset{\sigma }{\longrightarrow}\sigma\tau\left( T\right) $is order-reversing. Indeed, assume the contrary. Thus, both maps$T\overset{\tau}{\longrightarrow }\tau\left( T\right) \overset{\sigma}{\longrightarrow}\sigma\tau\left( T\right) $are order-reversing. One of the characterizations of$\overline{L}\left( \sigma\right) $shows that the two-element subset$\tau\left( T\right) $of$\left\{ 1,2,\ldots,n\right\} $belongs to$\overline{L}\left( \sigma\right) $if and only if the map$\sigma:\tau\left( T\right) \rightarrow\sigma\tau\left( T\right) $is order-reversing. Hence, the two-element subset$\tau\left( T\right) $of$\left\{ 1,2,\ldots,n\right\} $belongs to$\overline {L}\left( \sigma\right) $(since the map$\sigma:\tau\left( T\right) \rightarrow\sigma\tau\left( T\right) $is order-reversing). Thus,$\tau\left( T\right) \in\overline{L}\left( \sigma\right) $. But$\tau_{\ast}\left( T\right) =\tau\left( T\right) \in\overline{L}\left( \sigma\right) $, so that$T\in\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) $. One of the characterizations of$\overline{L}\left( \tau\right) $shows that the two-element subset$T$of$\left\{ 1,2,\ldots,n\right\} $belongs to$\overline{L}\left( \tau\right) $if and only if the map$\tau :T\rightarrow\tau\left( T\right) $is order-reversing. Hence, the two-element subset$T$of$\left\{ 1,2,\ldots,n\right\} $belongs to$\overline{L}\left( \tau\right) $(since the map$\tau:T\rightarrow \tau\left( T\right) $is order-reversing). In other words,$T\in\overline {L}\left( \tau\right) $. Combining this with$T\in\tau_{\ast}^{-1}% \overline{L}\left( \sigma\right) $, we obtain$T\in\overline{L}\left( \tau\right) \cap\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) =\varnothing$. In other words,$T$belongs to the empty set. This is clearly absurd. Thus, we have obtained a contradiction. Hence, our assumption was wrong. We thus have proven that at most one of the two maps$T\overset{\tau }{\longrightarrow}\tau\left( T\right) \overset{\sigma}{\longrightarrow }\sigma\tau\left( T\right) $is order-reversing. Now, forget that we fixed$T$. We thus have shown that for any$T\subseteq \left\{ 1,2,\ldots,n\right\} $with$\left\vert T\right\vert =2$, at most one of the two maps$T\overset{\tau}{\longrightarrow}\tau\left( T\right) \overset{\sigma}{\longrightarrow}\sigma\tau\left( T\right) $is order-reversing. In other words, Condition$\mathcal{C}_{2}$holds. Thus, we have derived Condition$\mathcal{C}_{2}$from Condition$\mathcal{C}_{1}$. In other words, we have proven the implication$\mathcal{C}_{1}\Longrightarrow\mathcal{C}_{2}$. We omit the proof of the implication$\mathcal{C}_{2}\Longrightarrow \mathcal{C}_{1}$(since you don't actually use this implication in your arguments, and since this proof is rather easy to obtained by \textquotedblleft walking backwards\textquotedblright\ our above proof of the implication$\mathcal{C}_{1}\Longrightarrow\mathcal{C}_{2}$). Combining the implications$\mathcal{C}_{1}\Longrightarrow\mathcal{C}_{2}$and$\mathcal{C}_{2}\Longrightarrow\mathcal{C}_{1}$, we obtain the equivalence$\mathcal{C}_{1}\Longleftrightarrow\mathcal{C}_{2}$. Thus, Lemma 2.6a is proven.] \item \textbf{\S 6:} Let me suggest an alternative way of proving Proposition 6.4 and Proposition 6.5. This alternative way has the advantage that it does not use the finiteness of the field$\mathbb{F}_{p}$, and so can be directly generalized to an arbitrary field.\footnote{A few comments on your proofs: \par -- In the proof of Proposition 6.4, before \textquotedblleft We now show that$\phi\left( g,h\right) \in X\left( \sigma\tau\right) $\textquotedblright, I would suggest adding \textquotedblleft Let$g\in X\left( \sigma\right) $and$h\in X\left( \tau\right) $.\textquotedblright. \par -- In the proof of Proposition 6.4, \textquotedblleft apply Corollary 6.1\textquotedblright\ should be replaced by \textquotedblleft apply Proposition 6.1\textquotedblright.} First, let me show a few useful lemmas: \textbf{Lemma 6.4a.} Let$\sigma\in\Sigma_{n}$and$g=\left( g_{i,j}\right) _{i,j=1}^{n}\in G$. \textbf{(a)} If$g\in X\left( \sigma\right) $, then% \begin{equation} \left( g_{i,i}=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) \label{eq.l6.4a.1}% \end{equation} and% \begin{equation} \left( g_{i,j}=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i\neq j\text{ and }\left( i,j\right) \notin L\left( \sigma ^{-1}\right) \right) . \label{eq.l6.4a.2}% \end{equation} \textbf{(b)} If (\ref{eq.l6.4a.1}) and (\ref{eq.l6.4a.2}) hold, then$g\in X\left( \sigma\right) $. [\textit{Proof of Lemma 6.4a.} From the first sentence of Lemma 5.1, we see that$g\in X\left( \sigma\right) $holds if and only if we have% $g_{i,j}=% \begin{cases} 1, & \text{if }i=j;\\ \text{arbitrary,} & \text{if }\left( i,j\right) \in L\left( \sigma ^{-1}\right) ;\\ 0, & \text{otherwise}% \end{cases} .$ In other words,$g\in X\left( \sigma\right) $holds if and only if (\ref{eq.l6.4a.1}) and (\ref{eq.l6.4a.2}) hold. This proves both parts \textbf{(a)} and \textbf{(b)} of Lemma 6.4a.$\square$] \textbf{Lemma 6.4b.} Let$g=\left( g_{i,j}\right) _{i,j=1}^{n}\in G$. \textbf{(a)} If$g\in U$, then% \begin{equation} \left( g_{i,i}=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) \label{eq.l6.4b.1}% \end{equation} and% \begin{equation} \left( g_{i,j}=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i>j\right) . \label{eq.l6.4b.2}% \end{equation} \textbf{(b)} If (\ref{eq.l6.4b.1}) and (\ref{eq.l6.4b.2}) hold, then$g\in U$. [\textit{Proof of Lemma 6.4b.} Recall that$U$is the set of all upper-unitriangular$n\times n$-matrices. Hence,$g\in U$holds if and only if$g$is upper-unitriangular. By the definition of upper-triangular, this rewrites as follows:$g\in U$holds if and only if (\ref{eq.l6.4b.1}) and (\ref{eq.l6.4b.2}) hold. This proves both parts \textbf{(a)} and \textbf{(b)} of Lemma 6.4b.$\square$] \textbf{Lemma 6.4c.} Let$\sigma\in\Sigma_{n}$. Then, the map$X\left( \sigma\right) \times B\rightarrow B\sigma B,\ \left( g,b\right) \mapsto g\sigma b$is well-defined and is a bijection. [\textit{Proof of Lemma 6.4c.} Lemma 6.4c is the first claim of Proposition 6.1, and thus we already have proven it.$\square$] \textbf{Lemma 6.4d.} Let$\sigma\in\Sigma_{n}$and$\tau\in\Sigma_{n}$be such that$l\left( \sigma\tau\right) =l\left( \sigma\right) +l\left( \tau\right) $. Then: \textbf{(a)} We have$\overline{L}\left( \tau\right) \cap\tau_{\ast}% ^{-1}\overline{L}\left( \sigma\right) =\varnothing$. \textbf{(b)} We have$X\left( \sigma\right) \cap\left( X\left( \tau\right) \right) ^{\sigma^{-1}}=1$. \textbf{(c)} We have$X\left( \sigma\right) \subseteq U^{\rho\tau^{-1}% \sigma^{-1}}$. \textbf{(d)} We have$\left( X\left( \tau\right) \right) ^{\sigma^{-1}% }\subseteq U$. \textbf{(e)} We have$X\left( \sigma\right) \cdot\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq X\left( \sigma\tau\right) $. \textbf{(f)} We have$B\sigma B\tau B=B\sigma\tau B$. \textbf{(g)} The map $X\left( \sigma\right) \times X\left( \tau\right) \rightarrow X\left( \sigma\tau\right) ,\ \ \ \ \ \ \ \ \ \ \left( g,h\right) \mapsto gh^{\sigma^{-1}}%$ is well-defined and bijective. [\textit{Proof of Lemma 6.4d.} \textbf{(a)} This was shown in the proof of Claim 1 during the proof of Lemma 2.6a given above. \textbf{(b)} Let$g\in X\left( \sigma\right) \cap U^{\sigma^{-1}}$. Thus,$g\in X\left( \sigma\right) $and$g\in U^{\sigma^{-1}}$. Write the matrix$g$in the form$g=\left( g_{i,j}\right) _{i,j=1}^{n}$. Then,$g^{\sigma }=\left( g_{\sigma\left( i\right) ,\sigma\left( j\right) }\right) _{i,j=1}^{n}$. But$g\in U^{\sigma^{-1}}$, so that% $g^{\sigma}\in\left( U^{\sigma^{-1}}\right) ^{\sigma}=U^{\sigma^{-1}\sigma }=U^{1}=U.$ Hence, Lemma 6.4a \textbf{(a)} (applied to$g^{\sigma}$and$g_{\sigma\left( i\right) ,\sigma\left( j\right) }$instead of$g$and$g_{i,j}$) yields that% \begin{equation} \left( g_{\sigma\left( i\right) ,\sigma\left( i\right) }=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) \label{pf.l6.4b.b.1}% \end{equation} and% \begin{equation} \left( g_{\sigma\left( i\right) ,\sigma\left( j\right) }=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i>j\right) . \label{pf.l6.4b.b.2}% \end{equation} On the other hand,$g\in X\left( \sigma\right) $. Hence, Lemma 6.4a \textbf{(a)} yields that% \begin{equation} \left( g_{i,i}=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) \label{pf.l6.4b.b.3}% \end{equation} and% \begin{equation} \left( g_{i,j}=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i\neq j\text{ and }\left( i,j\right) \notin L\left( \sigma ^{-1}\right) \right) . \label{pf.l6.4b.b.4}% \end{equation} Now, let$i,j\in\left\{ 1,2,\ldots,n\right\} $. We shall show that$g_{i,j}=\delta_{i,j}$. Indeed, assume the contrary. Thus,$g_{i,j}\neq\delta_{i,j}$. Hence,$i\neq j$\ \ \ \ \footnote{\textit{Proof.} Assume the contrary. Thus,$i=j$. Hence,$j=i$, so that$g_{i,j}=g_{i,i}=1$(by (\ref{pf.l6.4b.b.3})). But from$i=j$, we also obtain$\delta_{i,j}=1$. Comparing this with$g_{i,j}=1$, we obtain$g_{i,j}=\delta_{i,j}$. This contradicts$g_{i,j}\neq\delta_{i,j}$. This contradiction shows that our assumption was wrong. Qed.}, so that$\delta_{i,j}=0$and thus$g_{i,j}\neq\delta_{i,j}=0$. If we had$\left( i,j\right) \notin L\left( \sigma^{-1}\right) $, then we would have$g_{i,j}=0$(by (\ref{pf.l6.4b.b.4})), which would contradict$g_{i,j}\neq0$. Thus, we cannot have$\left( i,j\right) \notin L\left( \sigma^{-1}\right) $. Hence, we have$\left( i,j\right) \in L\left( \sigma^{-1}\right) $. In other words,$i\sigma^{-1}\left( j\right) $(by the definition of$L\left( \sigma ^{-1}\right) $). Thus, (\ref{pf.l6.4b.b.2}) (applied to$\left( \sigma ^{-1}\left( i\right) ,\sigma^{-1}\left( j\right) \right) $instead of$\left( i,j\right) $) yields$g_{\sigma\left( \sigma^{-1}\left( i\right) \right) ,\sigma\left( \sigma^{-1}\left( j\right) \right) }=0$. This contradicts$g_{\sigma\left( \sigma^{-1}\left( i\right) \right) ,\sigma\left( \sigma^{-1}\left( j\right) \right) }=g_{i,j}\neq0$. This contradiction shows that our assumption was false. Hence,$g_{i,j}=\delta_{i,j}$is proven. Now, forget that we fixed$i,j$. We thus have shown that$g_{i,j}=\delta _{i,j}$for all$i,j\in\left\{ 1,2,\ldots,n\right\} $. In other words,$\left( g_{i,j}\right) _{i,j=1}^{n}=\left( \delta_{i,j}\right) _{i,j=1}^{n}=1$. Hence,$g=\left( g_{i,j}\right) _{i,j=1}^{n}=1$. Now, forget that we fixed$g$. Thus we have proven that$g=1$for each$g\in X\left( \sigma\right) \cap U^{\sigma^{-1}}$. In other words,$X\left( \sigma\right) \cap U^{\sigma^{-1}}=1$. But the definition of$X\left( \tau\right) $yields$X\left( \tau\right) =U\cap U^{\left( \tau\rho\right) ^{-1}}\subseteq U$. Thus,$\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq U^{\sigma^{-1}}$, so that$X\left( \sigma\right) \cap\underbrace{\left( X\left( \tau\right) \right) ^{\sigma^{-1}}}_{\subseteq U^{\sigma^{-1}}}\subseteq X\left( \sigma\right) \cap U^{\sigma^{-1}}=1$. Hence,$X\left( \sigma\right) \cap\left( X\left( \tau\right) \right) ^{\sigma^{-1}}=1$. This proves Lemma 6.4d \textbf{(b)}. \textbf{(c)} Let$g\in X\left( \sigma\right) $. We shall prove that$g\in U^{\rho\tau^{-1}\sigma^{-1}}$. Write the matrix$g$in the form$g=\left( g_{i,j}\right) _{i,j=1}^{n}$. We have$g\in X\left( \sigma\right) $. Hence, Lemma 6.4a \textbf{(a)} yields that% \begin{equation} \left( g_{i,i}=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) \label{pf.l6.4b.c.1}% \end{equation} and% \begin{equation} \left( g_{i,j}=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i\neq j\text{ and }\left( i,j\right) \notin L\left( \sigma ^{-1}\right) \right) . \label{pf.l6.4b.c.2}% \end{equation} Let$\eta=\sigma\tau\rho$. Thus,$\eta\in\Sigma_{n}$, so that$\eta$is an injective map. From$\eta=\sigma\tau\rho$, we obtain$\eta^{-1}=\left( \sigma\tau\rho\right) ^{-1}=\underbrace{\rho^{-1}}_{=\rho}\tau^{-1}% \sigma^{-1}=\rho\tau^{-1}\sigma^{-1}$. Let$i,j\in\left\{ 1,2,\ldots,n\right\} $be such that$i>j$. We shall show that$g_{\eta\left( i\right) ,\eta\left( j\right) }=0$. Indeed, assume the contrary. Thus,$g_{\eta\left( i\right) ,\eta\left( j\right) }\neq0$. From$i>j$, we obtain$i\neq j$and therefore$\eta\left( i\right) \neq\eta\left( j\right) $(since$\eta$is injective). If we had$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \notin L\left( \sigma^{-1}\right) $, then we would have$g_{\eta\left( i\right) ,\eta\left( j\right) }=0$(by (\ref{pf.l6.4b.c.2}), applied to$\eta\left( i\right) $and$\eta\left( j\right) $instead of$i$and$j$), which would contradict$g_{\eta\left( i\right) ,\eta\left( j\right) }\neq0$. Hence, we cannot have$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \notin L\left( \sigma^{-1}\right) $. Thus, we must have$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \in L\left( \sigma^{-1}\right) $. In other words,$\eta\left( i\right) <\eta\left( j\right) $and$\sigma^{-1}\left( \eta\left( i\right) \right) >\sigma^{-1}\left( \eta\left( j\right) \right) $(by the definition of$L\left( \sigma ^{-1}\right) $). From$\eta\left( i\right) <\eta\left( j\right) $, we obtain$\eta\left( j\right) >\eta\left( i\right) $. Now, the definition of$\rho$yields$\rho\left( i\right) =n+1-\underbrace{i}_{>j}\sigma^{-1}\left( \eta\left( j\right) \right) =\tau\left( \rho\left( j\right) \right) $. Combining$\rho\left( i\right) <\rho\left( j\right) $with$\tau\left( \rho\left( i\right) \right) >\tau\left( \rho\left( j\right) \right) $, we obtain$\left( \rho\left( i\right) ,\rho\left( j\right) \right) \in L\left( \tau\right) $(by the definition of$L\left( \tau\right) $). Thus,$\left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} \in \overline{L}\left( \tau\right) $(by the definition of$\overline{L}\left( \tau\right) $). On the other hand,$\tau\left( \rho\left( j\right) \right) <\tau\left( \rho\left( i\right) \right) $(since$\tau\left( \rho\left( i\right) \right) >\tau\left( \rho\left( j\right) \right) $) and$\sigma\left( \tau\left( \rho\left( j\right) \right) \right) =\underbrace{\left( \sigma\tau\rho\right) }_{=\eta}\left( j\right) =\eta\left( j\right) >\underbrace{\eta}_{=\sigma\tau\rho}\left( i\right) =\left( \sigma\tau \rho\right) \left( i\right) =\sigma\left( \tau\left( \rho\left( i\right) \right) \right) $. Combining these two inequalities, we obtain$\left( \tau\left( \rho\left( j\right) \right) ,\tau\left( \rho\left( i\right) \right) \right) \in L\left( \sigma\right) $(by the definition of$L\left( \sigma\right) $). Hence,$\left\{ \tau\left( \rho\left( j\right) \right) ,\tau\left( \rho\left( i\right) \right) \right\} \in\overline{L}\left( \sigma\right) $(by the definition of$\overline {L}\left( \sigma\right) ). Now,% \begin{align*} \tau_{\ast}\left( \left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} \right) & =\tau\left( \left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} \right) \ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\tau_{\ast}\right) \\ & =\left\{ \tau\left( \rho\left( i\right) \right) ,\tau\left( \rho\left( j\right) \right) \right\} =\left\{ \tau\left( \rho\left( j\right) \right) ,\tau\left( \rho\left( i\right) \right) \right\} \in\overline{L}\left( \sigma\right) , \end{align*} so that\left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} \in\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) $. Combining this with$\left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} \in \overline{L}\left( \tau\right) $, we obtain$\left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} \in\overline{L}\left( \tau\right) \cap \tau_{\ast}^{-1}\overline{L}\left( \sigma\right) =\varnothing$. Thus,$\left\{ \rho\left( i\right) ,\rho\left( j\right) \right\} $belongs to the empty set. This is clearly absurd. Thus, we have obtained a contradiction. This shows that our assumption was false. Hence,$g_{\eta\left( i\right) ,\eta\left( j\right) }=0$is proven. Now, forget that we fixed$i,j$. We thus have shown that \begin{equation} \left( g_{\eta\left( i\right) ,\eta\left( j\right) }=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i>j\right) . \label{pf.l6.4b.c.7}% \end{equation} Moreover, if$i\in\left\{ 1,2,\ldots,n\right\} $, then$g_{\eta\left( i\right) ,\eta\left( i\right) }=1$(by (\ref{pf.l6.4b.c.1}), applied to$\eta\left( i\right) $instead of$i$). Thus, we have shown that% \begin{equation} \left( g_{\eta\left( i\right) ,\eta\left( i\right) }=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) . \label{pf.l6.4b.c.8}% \end{equation} Now,$g^{\eta}=\left( g_{\eta\left( i\right) ,\eta\left( j\right) }\right) _{i,j=1}^{n}$(since$g=\left( g_{i,j}\right) _{i,j=1}^{n}$). Hence, Lemma 6.4b \textbf{(b)} (applied to$g^{\eta}$and$g_{\eta\left( i\right) ,\eta\left( j\right) }$instead of$g$and$g_{i,j}$) shows that$g^{\eta}\in U$(since (\ref{pf.l6.4b.c.8}) and (\ref{pf.l6.4b.c.7}) hold). Hence,$\left( g^{\eta}\right) ^{\eta^{-1}}\in U^{\eta^{-1}}$. Since$\left( g^{\eta}\right) ^{\eta^{-1}}=g^{\eta\eta^{-1}}=g^{1}=g$, this rewrites as$g\in U^{\eta^{-1}}$. Since$\eta^{-1}=\rho\tau^{-1}\sigma^{-1}$, this rewrites as$g\in U^{\rho\tau^{-1}\sigma^{-1}}$. Now, forget that we fixed$g$. We thus have shown that$g\in U^{\rho\tau ^{-1}\sigma^{-1}}$for each$g\in X\left( \sigma\right) $. In other words,$X\left( \sigma\right) \subseteq U^{\rho\tau^{-1}\sigma^{-1}}$. This proves Lemma 6.4d \textbf{(c)}. \textbf{(d)} Let$g\in X\left( \tau\right) $. We shall prove that$g\in U^{\sigma}$. Write the matrix$g$in the form$g=\left( g_{i,j}\right) _{i,j=1}^{n}$. We have$g\in X\left( \tau\right) $. Hence, Lemma 6.4a \textbf{(a)} (applied to$\tau$instead of$\sigma$) yields that% \begin{equation} \left( g_{i,i}=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) \label{pf.l6.4b.d.1}% \end{equation} and% \begin{equation} \left( g_{i,j}=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i\neq j\text{ and }\left( i,j\right) \notin L\left( \tau ^{-1}\right) \right) . \label{pf.l6.4b.d.2}% \end{equation} Let$\eta=\sigma^{-1}$. Thus,$\eta\in\Sigma_{n}$. Hence,$\eta$is an injective map. Let$i,j\in\left\{ 1,2,\ldots,n\right\} $be such that$i>j$. We shall show that$g_{\eta\left( i\right) ,\eta\left( j\right) }=0$. Indeed, assume the contrary. Thus,$g_{\eta\left( i\right) ,\eta\left( j\right) }\neq0$. From$i>j$, we obtain$i\neq j$and therefore$\eta\left( i\right) \neq\eta\left( j\right) $(since$\eta$is injective). If we had$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \notin L\left( \tau^{-1}\right) $, then we would have$g_{\eta\left( i\right) ,\eta\left( j\right) }=0$(by (\ref{pf.l6.4b.d.2}), applied to$\eta\left( i\right) $and$\eta\left( j\right) $instead of$i$and$j$), which would contradict$g_{\eta\left( i\right) ,\eta\left( j\right) }\neq0$. Hence, we cannot have$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \notin L\left( \tau^{-1}\right) $. Thus, we must have$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \in L\left( \tau^{-1}\right) $. In other words,$\eta\left( i\right) <\eta\left( j\right) $and$\tau ^{-1}\left( \eta\left( i\right) \right) >\tau^{-1}\left( \eta\left( j\right) \right) $(by the definition of$L\left( \tau^{-1}\right) $). From$\eta\left( i\right) <\eta\left( j\right) $, we obtain$\eta\left( j\right) >\eta\left( i\right) $. Set$x=\tau^{-1}\left( \eta\left( j\right) \right) $and$y=\tau ^{-1}\left( \eta\left( i\right) \right) $. Then,$x,y$are elements of$\left\{ 1,2,\ldots,n\right\} $. Furthermore,$y=\tau^{-1}\left( \eta\left( i\right) \right) >\tau^{-1}\left( \eta\left( j\right) \right) =x$, so that$x\tau\left( y\right) $. From$x\tau\left( y\right) $, we obtain$\left( x,y\right) \in L\left( \tau\right) $(by the definition of$L\left( \tau\right) $). Thus,$\left\{ x,y\right\} \in\overline{L}\left( \tau\right) $(by the definition of$\overline{L}\left( \tau\right) $). On the other hand, the elements$\eta\left( i\right) ,\eta\left( j\right) \in\left\{ 1,2,\ldots,n\right\} $satisfy$\eta\left( i\right) <\eta\left( j\right) $and$\sigma\left( \eta\left( i\right) \right) >\sigma\left( \eta\left( j\right) \right) $(since$\sigma\left( \underbrace{\eta}_{=\sigma^{-1}}\left( i\right) \right) =\sigma\left( \sigma^{-1}\left( i\right) \right) =i>j=\sigma\left( \underbrace{\sigma ^{-1}}_{=\eta}\left( j\right) \right) =\sigma\left( \eta\left( j\right) \right) $). In other words,$\left( \eta\left( i\right) ,\eta\left( j\right) \right) \in L\left( \sigma\right) $(by the definition of$L\left( \sigma\right) $). Hence,$\left\{ \eta\left( i\right) ,\eta\left( j\right) \right\} \in\overline{L}\left( \sigma\right) $(by the definition of$\overline{L}\left( \sigma\right) ). But% \begin{align*} \tau_{\ast}\left( \left\{ x,y\right\} \right) & =\tau\left( \left\{ x,y\right\} \right) \ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\tau_{\ast}\right) \\ & =\left\{ \underbrace{\tau\left( x\right) }_{=\eta\left( j\right) },\underbrace{\tau\left( y\right) }_{=\eta\left( i\right) }\right\} =\left\{ \eta\left( j\right) ,\eta\left( i\right) \right\} =\left\{ \eta\left( i\right) ,\eta\left( j\right) \right\} \in\overline{L}\left( \sigma\right) , \end{align*} so that\left\{ x,y\right\} \in\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) $. Combining this with$\left\{ x,y\right\} \in\overline {L}\left( \tau\right) $, we obtain$\left\{ x,y\right\} \in\overline {L}\left( \tau\right) \cap\tau_{\ast}^{-1}\overline{L}\left( \sigma\right) =\varnothing$. Thus,$\left\{ x,y\right\} $belongs to the empty set. This is clearly absurd. Thus, we have obtained a contradiction. This shows that our assumption was false. Hence,$g_{\eta\left( i\right) ,\eta\left( j\right) }=0$is proven. Now, forget that we fixed$i,j$. We thus have shown that \begin{equation} \left( g_{\eta\left( i\right) ,\eta\left( j\right) }=0\text{ for any }i,j\in\left\{ 1,2,\ldots,n\right\} \text{ satisfying }i>j\right) . \label{pf.l6.4b.d.7}% \end{equation} Moreover, if$i\in\left\{ 1,2,\ldots,n\right\} $, then$g_{\eta\left( i\right) ,\eta\left( i\right) }=1$(by (\ref{pf.l6.4b.d.1}), applied to$\eta\left( i\right) $instead of$i$). Thus, we have shown that% \begin{equation} \left( g_{\eta\left( i\right) ,\eta\left( i\right) }=1\text{ for all }i\in\left\{ 1,2,\ldots,n\right\} \right) . \label{pf.l6.4b.d.8}% \end{equation} Now,$g^{\eta}=\left( g_{\eta\left( i\right) ,\eta\left( j\right) }\right) _{i,j=1}^{n}$(since$g=\left( g_{i,j}\right) _{i,j=1}^{n}$). Hence, Lemma 6.4b \textbf{(b)} (applied to$g^{\eta}$and$g_{\eta\left( i\right) ,\eta\left( j\right) }$instead of$g$and$g_{i,j}$) shows that$g^{\eta}\in U$(since (\ref{pf.l6.4b.d.8}) and (\ref{pf.l6.4b.d.7}) hold). Hence,$\left( g^{\eta}\right) ^{\sigma}\in U^{\sigma}. Since \begin{align*} \left( g^{\eta}\right) ^{\sigma} & =g^{\eta\sigma}=g^{1}% \ \ \ \ \ \ \ \ \ \ \left( \text{since }\underbrace{\eta}_{=\sigma^{-1}% }\sigma=\sigma^{-1}\sigma=1\right) \\ & =g, \end{align*} this rewrites asg\in U^{\sigma}$. Now, forget that we fixed$g$. We thus have shown that$g\in U^{\sigma}$for each$g\in X\left( \tau\right) $. In other words,$X\left( \tau\right) \subseteq U^{\sigma}$. Hence,$\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq\left( U^{\sigma}\right) ^{\sigma^{-1}}% =U^{\sigma\sigma^{-1}}=U^{1}=U$. This proves Lemma 6.4d \textbf{(d)}. \textbf{(e)} The definition of$X\left( \sigma\tau\right) $yields$X\left( \sigma\tau\right) =U\cap U^{\left( \sigma\tau\rho\right) ^{-1}}$. Hence,$X\left( \sigma\tau\right) $is the intersection of two subgroups of$G$(namely, of the subgroup$U$and of the subgroup$U^{\left( \sigma\tau \rho\right) ^{-1}}$). Thus,$X\left( \sigma\tau\right) $is itself a subgroup of$G$. Therefore,$X\left( \sigma\tau\right) \cdot X\left( \sigma\tau\right) \subseteq X\left( \sigma\tau\right) $. Now,$\left( \sigma\tau\rho\right) ^{-1}=\underbrace{\rho^{-1}}_{=\rho}% \tau^{-1}\sigma^{-1}=\rho\tau^{-1}\sigma^{-1}$, so that$U^{\left( \sigma \tau\rho\right) ^{-1}}=U^{\rho\tau^{-1}\sigma^{-1}}$. The definition of$X\left( \sigma\right) $yields$X\left( \sigma\right) =U\cap U^{\left( \sigma\rho\right) ^{-1}}\subseteq U$. But Lemma 6.4d \textbf{(c)} yields$X\left( \sigma\right) \subseteq U^{\rho\tau^{-1}% \sigma^{-1}}=U^{\left( \sigma\tau\rho\right) ^{-1}}$. Combining$X\left( \sigma\right) \subseteq U$with$X\left( \sigma\right) \subseteq U^{\left( \sigma\tau\rho\right) ^{-1}}$, we obtain$X\left( \sigma\right) \subseteq U\cap U^{\left( \sigma\tau\rho\right) ^{-1}}=X\left( \sigma\tau\right) $. On the other hand, the definition of$X\left( \tau\right) $yields$X\left( \tau\right) =U\cap U^{\left( \tau\rho\right) ^{-1}}\subseteq U^{\left( \tau\rho\right) ^{-1}}$. Hence,$\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq\left( U^{\left( \tau\rho\right) ^{-1}}\right) ^{\sigma^{-1}}=U^{\left( \tau\rho\right) ^{-1}\sigma^{-1}}=U^{\left( \sigma\tau\rho\right) ^{-1}}$(since$\left( \tau\rho\right) ^{-1}% \sigma^{-1}=\left( \sigma\tau\rho\right) ^{-1}$). Combining$\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq U$(which follows from Lemma 6.4d \textbf{(d)}) with$\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq U^{\left( \sigma\tau\rho\right) ^{-1}}$, we obtain$\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq U\cap U^{\left( \sigma\tau\rho\right) ^{-1}}=X\left( \sigma\tau\right) $. Now,% $\underbrace{X\left( \sigma\right) }_{\subseteq X\left( \sigma\tau\right) }\cdot\underbrace{\left( X\left( \tau\right) \right) ^{\sigma^{-1}}% }_{\subseteq X\left( \sigma\tau\right) }\subseteq X\left( \sigma \tau\right) \cdot X\left( \sigma\tau\right) \subseteq X\left( \sigma \tau\right) .$ This proves Lemma 6.4d \textbf{(e)}. \textbf{(f)} Let$r\in B\sigma B\tau B$. Then,$r\in B\sigma B\tau B=B\sigma\left( B\tau B\right) $. In other words, there exist$c\in B$and$p\in B\tau B$such that$r=c\sigma p$. Consider these$c$and$p$. Lemma 6.4c (applied to$\tau$instead of$\sigma$) yields that the map$X\left( \tau\right) \times B\rightarrow B\tau B,\ \left( g,b\right) \mapsto g\tau b$is well-defined and is a bijection. Hence, the element$p\in B\tau B$is an image under this map. In other words, there exists some$\left( g,b\right) \in X\left( \tau\right) \times B$such that$p=g\tau b$. Consider this$\left( g,b\right) $. From$\left( g,b\right) \in X\left( \tau\right) \times B$, we obtain$g\in X\left( \tau\right) $and$b\in B$. From$g\in X\left( \tau\right) $, we obtain$g^{\sigma^{-1}}\in\left( X\left( \tau\right) \right) ^{\sigma ^{-1}}\subseteq U$(by Lemma 6.4c \textbf{(d)}), so that$g^{\sigma^{-1}}\in U\subseteq B$. Since$g^{\sigma^{-1}}=\underbrace{\left( \sigma^{-1}\right) ^{-1}}_{=\sigma}g\sigma^{-1}=\sigma g\sigma^{-1}$, this rewrites as$\sigma g\sigma^{-1}\in B$. Now,$\sigma g\underbrace{\sigma^{-1}\sigma}_{=1}\tau b=\sigma \underbrace{g\tau b}_{=p}=\sigma p$, so that$\sigma p=\underbrace{\sigma g\sigma^{-1}}_{\in B}\sigma\tau\underbrace{b}_{\in B}\in B\sigma\tau B$. Now,% $r=\underbrace{c}_{\in B}\underbrace{\sigma p}_{\in B\sigma\tau B}% \in\underbrace{BB}_{\substack{\subseteq B\\\text{(since }B\text{ is a group)}% }}\sigma\tau B\subseteq B\sigma\tau B.$ Now, forget that we fixed$r$. We thus have proven that$r\in B\sigma\tau B$for each$r\in B\sigma B\tau B$. In other words,$B\sigma B\tau B\subseteq B\sigma\tau B$. Combining this with the inclusion% $B\underbrace{\sigma}_{=\sigma1}\tau B=B\sigma\underbrace{1}_{\in B}\tau B\subseteq B\sigma B\tau B,$ we obtain$B\sigma B\tau B=B\sigma\tau B$. This proves Lemma 6.4d \textbf{(f)}. \textbf{(g)} For every$\left( g,h\right) \in X\left( \sigma\right) \times X\left( \tau\right) $, we have% $\underbrace{g}_{\substack{\in X\left( \sigma\right) \\\text{(since }\left( g,h\right) \in X\left( \sigma\right) \times X\left( \tau\right) \text{)}% }}\underbrace{h^{\sigma^{-1}}}_{\substack{\in\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\\\text{(since }h\in X\left( \tau\right) \\\text{(since }\left( g,h\right) \in X\left( \sigma\right) \times X\left( \tau\right) \text{))}}}\in X\left( \sigma\right) \cdot\left( X\left( \tau\right) \right) ^{\sigma^{-1}}\subseteq X\left( \sigma \tau\right)$ (by Lemma 6.4d \textbf{(e)}). Thus, the map $X\left( \sigma\right) \times X\left( \tau\right) \rightarrow X\left( \sigma\tau\right) ,\ \ \ \ \ \ \ \ \ \ \left( g,h\right) \mapsto gh^{\sigma^{-1}}%$ is well-defined. It remains to prove that this map is bijective. In order to do so, we denote this map by$\Phi$. Thus,$\Phi\left( g,h\right) =gh^{\sigma^{-1}}$for each$\left( g,h\right) \in X\left( \sigma\right) \times X\left( \tau\right) $. Our goal is to prove that$\Phi$is bijective. Let us first prove that$\Phi$is surjective. Indeed, let$k\in X\left( \sigma\tau\right) $. Then,$k\in X\left( \sigma\tau\right) =U\cap U^{\left( \sigma\tau\rho\right) ^{-1}}$(by the definition of$X\left( \sigma\tau\right) $), so that$k\in U\cap U^{\left( \sigma\tau\rho\right) ^{-1}}\subseteq U\subseteq B$. Hence,$k\sigma=\underbrace{k}_{\in B}% \sigma\underbrace{1}_{\in B}\in B\sigma B$. Lemma 6.4c yields that the map$X\left( \sigma\right) \times B\rightarrow B\sigma B,\ \left( g,b\right) \mapsto g\sigma b$is well-defined and is a bijection. Hence, the element$k\sigma\in B\sigma B$is an image under this map. In other words, there exists some$\left( u,d\right) \in X\left( \sigma\right) \times B$such that$k\sigma=u\sigma d$. Consider this$\left( u,d\right) $. From$\left( u,d\right) \in X\left( \sigma\right) \times B$, we obtain$u\in X\left( \sigma\right) $and$d\in B$. We have$d\tau=\underbrace{d}_{\in B}\tau\underbrace{1}_{\in B}\in B\tau B$. Lemma 6.4c (applied to$\tau$instead of$\sigma$) yields that the map$X\left( \tau\right) \times B\rightarrow B\tau B,\ \left( g,b\right) \mapsto g\tau b$is well-defined and is a bijection. Hence, the element$d\tau\in B\tau B$is an image under this map. In other words, there exists some$\left( h,c\right) \in X\left( \tau\right) \times B$such that$d\tau=h\tau c$. Consider this$\left( h,c\right) $. From$\left( h,c\right) \in X\left( \tau\right) \times B$, we obtain$h\in X\left( \tau\right) $and$c\in B$. We have$\left( u,h\right) \in X\left( \sigma\right) \times X\left( \tau\right) $(since$u\in X\left( \sigma\right) $and$h\in X\left( \tau\right) $). Thus, the definition of$\Phi$yields$\Phi\left( u,h\right) =u\underbrace{h^{\sigma^{-1}}}_{=\left( \sigma^{-1}\right) ^{-1}h\sigma^{-1}}=u\underbrace{\left( \sigma^{-1}\right) ^{-1}}_{=\sigma }h\sigma^{-1}=u\sigma h\sigma^{-1}$, so that% \begin{equation} \Phi\left( u,h\right) \sigma=u\sigma h. \label{pf.l6.4b.g.surj.1}% \end{equation} We have% $\underbrace{k\sigma}_{=u\sigma d}\underbrace{\tau1}_{=\tau}=u\sigma \underbrace{d\tau}_{=h\tau c}=\underbrace{u\sigma h}_{\substack{=\Phi\left( u,h\right) \sigma\\\text{(by (\ref{pf.l6.4b.g.surj.1}))}}}\tau c=\Phi\left( u,h\right) \sigma\tau c.$ Notice that$\left( k,1\right) \in X\left( \sigma\tau\right) \times B$(since$k\in X\left( \sigma\tau\right) $and$1\in B$) and$\left( \Phi\left( u,h\right) ,c\right) \in X\left( \sigma\tau\right) \times B$(since$\Phi\left( u,h\right) \in X\left( \sigma\tau\right) $and$c\in B$). But Lemma 6.4c (applied to$\sigma\tau$instead of$\sigma$) yields that the map$X\left( \sigma\tau\right) \times B\rightarrow B\sigma\tau B,\ \left( g,b\right) \mapsto g\sigma\tau b$is well-defined and is a bijection. In particular, this map is bijective, thus injective. In other words, if$\left( g_{1},b_{1}\right) $and$\left( g_{2},b_{2}\right) $are two elements of$X\left( \sigma\tau\right) \times B$satisfying$g_{1}\sigma\tau b_{1}% =g_{2}\sigma\tau b_{2}$, then we have$\left( g_{1},b_{1}\right) =\left( g_{2},b_{2}\right) $. Applying this to$\left( g_{1},b_{1}\right) =\left( k,1\right) $and$\left( g_{2},b_{2}\right) =\left( \Phi\left( u,h\right) ,c\right) $, we obtain$\left( k,1\right) =\left( \Phi\left( u,h\right) ,c\right) $(since$\left( k,1\right) \in X\left( \sigma \tau\right) \times B$and$\left( \Phi\left( u,h\right) ,c\right) \in X\left( \sigma\tau\right) \times B$and$k\sigma\tau1=\Phi\left( u,h\right) \sigma\tau c$). In other words,$k=\Phi\left( u,h\right) $and$1=c$. Hence,$k=\Phi\left( u,h\right) \in\Phi\left( X\left( \sigma\right) \times X\left( \tau\right) \right) $. Now, forget that we fixed$k$. We thus have shown that every$k\in X\left( \sigma\tau\right) $satisfies$k\in\Phi\left( X\left( \sigma\right) \times X\left( \tau\right) \right) $. In other words,$X\left( \sigma\tau\right) \subseteq\Phi\left( X\left( \sigma\right) \times X\left( \tau\right) \right) $. In other words, the map$\Phi$is surjective. (This proof was a more detailed paraphrase of an argument that you included in your proof of Proposition 6.4.) Let us now show that the map$\Phi$is injective. Indeed, let$\left( g_{1},h_{1}\right) $and$\left( g_{2},h_{2}\right) $be two elements of$X\left( \sigma\right) \times X\left( \tau\right) $satisfying$\Phi\left( g_{1},h_{1}\right) =\Phi\left( g_{2},h_{2}\right) $. We shall show that$\left( g_{1},h_{1}\right) =\left( g_{2},h_{2}\right) $. We have$\left( g_{1},h_{1}\right) \in X\left( \sigma\right) \times X\left( \tau\right) $. In other words,$g_{1}\in X\left( \sigma\right) $and$h_{1}\in X\left( \tau\right) $. We have$\left( g_{2},h_{2}\right) \in X\left( \sigma\right) \times X\left( \tau\right) $. In other words,$g_{2}\in X\left( \sigma\right) $and$h_{2}\in X\left( \tau\right) $. The definition of$\Phi$yields$\Phi\left( g_{1},h_{1}\right) =g_{1}\left( h_{1}\right) ^{\sigma^{-1}}$. The definition of$\Phi$yields$\Phi\left( g_{2},h_{2}\right) =g_{2}\left( h_{2}\right) ^{\sigma^{-1}}$. Now,% $g_{1}\left( h_{1}\right) ^{\sigma^{-1}}=\Phi\left( g_{1},h_{1}\right) =\Phi\left( g_{2},h_{2}\right) =g_{2}\left( h_{2}\right) ^{\sigma^{-1}}.$ Multiplying both sides of this equality by$g_{2}^{-1}$from the left and by$\left( \left( h_{1}\right) ^{\sigma^{-1}}\right) ^{-1}$from the right, we obtain% $g_{2}^{-1}g_{1}=\left( h_{2}\right) ^{\sigma^{-1}}\left( \left( h_{1}\right) ^{\sigma^{-1}}\right) ^{-1}=\left( h_{2}h_{1}^{-1}\right) ^{\sigma^{-1}}%$ (since the map$G\rightarrow G,\ x\mapsto x^{\sigma^{-1}}$is a group automorphism). But the definition of$X\left( \sigma\right) $yields$X\left( \sigma\right) =U\cap U^{\left( \sigma\rho\right) ^{-1}}$. Hence,$X\left( \sigma\right) $is the intersection of two subgroups of$G$(namely, of the subgroup$U$and of the subgroup$U^{\left( \sigma\rho\right) ^{-1}}$). Thus,$X\left( \sigma\right) $is itself a subgroup of$G$. The same argument (applied to$\tau$instead of$\sigma$) shows that$X\left( \tau\right) $is a subgroup of$G$. From$g_{2}\in X\left( \sigma\right) $and$g_{1}\in X\left( \sigma\right) $, we obtain$g_{2}^{-1}g_{1}\in X\left( \sigma\right) $(since$X\left( \sigma\right) $is a subgroup of$G$). From$h_{2}\in X\left( \tau\right) $and$h_{1}\in X\left( \tau\right) $, we obtain$h_{2}h_{1}^{-1}\in X\left( \tau\right) $(since$X\left( \tau\right) $is a subgroup of$G$), so that$\left( h_{2}h_{1}^{-1}\right) ^{\sigma^{-1}}\in\left( X\left( \tau\right) \right) ^{\sigma^{-1}}$. Combining$g_{2}^{-1}g_{1}\in X\left( \sigma\right) $with$g_{2}^{-1}% g_{1}=\left( h_{2}h_{1}^{-1}\right) ^{\sigma^{-1}}\in\left( X\left( \tau\right) \right) ^{\sigma^{-1}}$, we obtain$g_{2}^{-1}g_{1}\in X\left( \sigma\right) \cap\left( X\left( \tau\right) \right) ^{\sigma^{-1}}=1$(by Lemma 6.4d \textbf{(b)}). In other words,$g_{2}^{-1}g_{1}=1$. Thus,$g_{1}=g_{2}$. Comparing$g_{2}^{-1}g_{1}=1$with$g_{2}^{-1}g_{1}=\left( h_{2}h_{1}% ^{-1}\right) ^{\sigma^{-1}}$, we obtain$1=\left( h_{2}h_{1}^{-1}\right) ^{\sigma^{-1}}=\underbrace{\left( \sigma^{-1}\right) ^{-1}}_{=\sigma}% h_{2}h_{1}^{-1}\sigma^{-1}=\sigma h_{2}h_{1}^{-1}\sigma^{-1}$. Multiplying both sides of this equality by$\sigma$from the right, we obtain$\sigma=\sigma h_{2}h_{1}^{-1}$. Cancelling$\sigma$from this equality, we find$1=h_{2}h_{1}^{-1}$. Thus,$h_{1}=h_{2}$. Now,$\left( \underbrace{g_{1}}_{=g_{2}},\underbrace{h_{1}}_{=h_{2}}\right) =\left( g_{2},h_{2}\right) $. Let us now forget that we fixed$\left( g_{1},h_{1}\right) $and$\left( g_{2},h_{2}\right) $. We thus have shown that if$\left( g_{1},h_{1}\right) $and$\left( g_{2},h_{2}\right) $are two elements of$X\left( \sigma\right) \times X\left( \tau\right) $satisfying$\Phi\left( g_{1},h_{1}\right) =\Phi\left( g_{2},h_{2}\right) $, then$\left( g_{1},h_{1}\right) =\left( g_{2},h_{2}\right) $. In other words, the map$\Phi$is injective. Since we also know that$\Phi$is surjective, we therefore conclude that the map$\Phi$is bijective. In other words, the map $X\left( \sigma\right) \times X\left( \tau\right) \rightarrow X\left( \sigma\tau\right) ,\ \ \ \ \ \ \ \ \ \ \left( g,h\right) \mapsto gh^{\sigma^{-1}}%$ is bijective (since this map is$\Phi$). This proves Lemma 6.4d \textbf{(g)}.$\square$] Now, your Proposition 6.4 is precisely Lemma 6.4d \textbf{(g)}, whereas your Proposition 6.5 is exactly Lemma 6.4d \textbf{(f)}. Hence, both Proposition 6.4 and Proposition 6.5 are proven. \item \textbf{Definition 7.1:} Remove the comma in \textquotedblleft preserves the sets,\textquotedblright. \item \textbf{Definition 7.1:} After \textquotedblleft it is the largest subgroup of$\Sigma_{n}$that preserves these sets\textquotedblright, I would add \textquotedblleft(actually, it is the set of all permutations$\sigma \in\Sigma_{n}$that preserve these sets)\textquotedblright. (This is a more concrete description of$\Sigma_{I}$, and you use it in the proof of Proposition 7.3 below.) I would also suggest replacing the word \textquotedblleft sets\textquotedblright\ by \textquotedblleft intervals\textquotedblright% \ whenever you are talking about these sets. \item \textbf{Definition 7.2:} At the beginning of this definition, I would add the following sentences: \textquotedblleft Let$I$be a subset of$\left\{ 1,2,\ldots,n-1\right\} $. Set$I^{c}=\left\{ 0,1,\ldots,n\right\} \setminus I$. Again, write this set$I^{c}$as$\left\{ i_{0},i_{1}% ,\ldots,i_{r}\right\} $with$0=i_{0}1$\textquotedblright\ by \textquotedblleft Now suppose that$l\left( \sigma\right) >0$\textquotedblright. Also, remove the preceding sentence (\textquotedblleft If$l\left( \sigma\right) =1$then$\sigma=s_{i}$for some$i$and$\sigma\in P$so$i\in I$so$\sigma\in P_{I}% $\textquotedblright) completely (it is unnecessary and complicates the structure of the proof). \item \textbf{Proof of Proposition 7.5:} Replace \textquotedblleft so$s_{k}\in P_{I}$\textquotedblright\ by \textquotedblleft so$s_{k}\in \Sigma_{I}\subseteq B\Sigma_{I}B=P_{I}$\textquotedblright. \item \textbf{Proof of Proposition 7.5:} Replace \textquotedblleft so we can assume by induction that\textquotedblright\ by \textquotedblleft so the induction hypothesis yields\textquotedblright. \item \textbf{Proof of Proposition 7.5:} Remove \textquotedblleft and thus that$g\in P_{I}$\textquotedblright. \item \textbf{Proof of Proposition 7.5:} I would notice that your proof of Proposition 7.5 proves a slightly slonger claim: \textbf{Proposition 7.5a.} Let$P$be a subgroup of$G$such that$P\geq B$. Let$I=\left\{ i\ \mid\ s_{i}\in P\right\} $. Then,$P=P_{I}$. Furthermore, let me state another fact (that will be used later): \textbf{Lemma 7.5b.} Let$I\subseteq\left\{ 1,2,\ldots,n-1\right\} $. Then,$I=\left\{ i\ \mid\ s_{i}\in P_{I}\right\} $. [\textit{Proof of Lemma 7.5b.} Define a subset$J$of$\left\{ 1,2,\ldots ,n-1\right\} $by$J=\left\{ i\ \mid\ s_{i}\in P_{I}\right\} $. Then,$J=\left\{ i\ \mid\ s_{i}\in P_{I}\right\} \supseteq I$(since every$i\in I$satisfies$s_{i}\in\Sigma_{I}\subseteq B\Sigma_{I}B=P_{I}$). Now, let$j\in J$. We are going to prove that$j\in I$. Indeed, assume the contrary. Hence,$j\notin I$, so that$j\in I^{c}$. Hence,$E_{j}$is one of the entries of the obvious flag$\underline{E}% \in\operatorname*{Flag}\nolimits_{I}\left( \mathbb{F}_{p}^{n}\right) $. Therefore, the group$P_{I}$fixes the subspace$E_{j}$(since the group$P_{I}$fixes the obvious flag$\underline{E}\in\operatorname*{Flag}% \nolimits_{I}\left( \mathbb{F}_{p}^{n}\right) $). In other words,$pE_{j}\subseteq E_{j}$for each$p\in P_{I}$. But$j\in J=\left\{ i\ \mid\ s_{i}\in P_{I}\right\} $. In other words,$s_{j}\in P_{I}$. But we have$pE_{j}\subseteq E_{j}$for each$p\in P_{I}$. Applying this to$p=s_{j}$, we conclude that$s_{j}E_{j}\subseteq E_{j}$(since$s_{j}\in P_{I}$). Now,$s_{j}e_{j}=e_{j+1}$, so that$e_{j+1}% =s_{j}\underbrace{e_{j}}_{\in E_{j}}\in s_{j}E_{j}\subseteq E_{j}$. This contradicts the (obvious) fact that$e_{j+1}\notin E_{j}$. This contradiction proves that our assumption was false. Hence, we have$j\in I$. Now, forget that we fixed$j$. We thus have shown that$j\in I$for each$j\in J$. In other words,$J\subseteq I$. Combining this with$J\supseteq I$, we obtain$J=I$. Hence,$I=J=\left\{ i\ \mid\ s_{i}\in P_{I}\right\} $. This proves Lemma 7.5b.$\square$] \item \textbf{Proposition 7.7:} Add \textquotedblleft Let$V=\mathbb{F}% _{p}^{n}$.\textquotedblright\ at the beginning of the theorem. (Otherwise,$\operatorname*{Flag}\nolimits_{I}\left( V\right) $and$\operatorname*{Flag}\nolimits_{J}\left( V\right) $wouldn't canonically be$G$-sets.) \item \textbf{Proof of Proposition 7.7:} Again, I'd prefer some more details: \textbf{1.} You claim that \textquotedblleft the orbits of$B$in$V$are precisely the sets$E_{k}\setminus E_{k-1}$\textquotedblright. In order for this claim to be fully correct, you should set$E_{-1}=\varnothing$, and allow$k$to range over$\left\{ 0,1,\ldots,n\right\} $(rather than$\left\{ 1,2,\ldots,n\right\} $only). (Otherwise, you are missing the orbit$\left\{ 0\right\} =E_{0}\setminus E_{-1}$.) Let me also prove this claim: [\textit{Proof of the fact that the orbits of }$B$\textit{in }$V$\textit{are precisely the sets }$E_{k}\setminus E_{k-1}$\textit{:} For every$k\in\left\{ 1,2,\ldots,n\right\} $, we have% \begin{equation} E_{k}\setminus E_{k-1}=Be_{k} \label{pf.p7.7.1}% \end{equation} \footnote{\textit{Proof of (\ref{pf.p7.7.1}):} Let$k\in\left\{ 1,2,\ldots,n\right\} $. Hence, the vector$e_{k}$is well-defined. \par Let$b\in B$. Then,$bE_{k}=E_{k}$and$bE_{k-1}=E_{k-1}$(by the definition of$B$). But the element$b$of$G$is invertible (since$G$is a group), thus a bijection. Hence,$b\left( E_{k}\setminus E_{k-1}\right) =\underbrace{bE_{k}}_{=E_{k}}\setminus\underbrace{bE_{k-1}}_{=E_{k-1}}% =E_{k}\setminus E_{k-1}$. \par Now,$e_{k}\in E_{k}\setminus E_{k-1}$(since$e_{k}\in E_{k}$and$e_{k}\notin E_{k-1}$), and thus$b\underbrace{e_{k}}_{\in E_{k}\setminus E_{k-1}}\in b\left( E_{k}\setminus E_{k-1}\right) =E_{k}\setminus E_{k-1}$. \par Now, forget that we fixed$b$. We thus have shown that$be_{k}\in E_{k}\setminus E_{k-1}$for each$b\in B$. In other words,$Be_{k}\subseteq E_{k}\setminus E_{k-1}$. \par On the other hand, fix$\zeta\in E_{k}\setminus E_{k-1}$. Thus,$\zeta\in E_{k}$and$\zeta\notin E_{k-1}$. The last$n-k$coordinates of$\zeta$are zero (since$\zeta\in E_{k}$), but the last$n-k+1$coordinates of$\zeta$are not all zero (since$\zeta\notin E_{k-1}$). Hence, the$k$-th coordinate of$\zeta$must be nonzero. \par Let$c\in\mathbb{F}_{p}^{n\times n}$be the$n\times n$-matrix whose$k$-th column is$\zeta$whereas all its other columns are the corresponding columns of the identity matrix (i.e., for each$i\neq k$, the$i$-th column of$c$shall be$e_{i}$). Then, the$k$-th column of$c$is the vector$\zeta$, whose last$n-k$coordinates are zero. Thus, the last$n-k$entries of the$k$-th column of$c$are zero. Moreover, the$k$-th column of$c$is the vector$\zeta$, whose$k$-th coordinate is nonzero. Hence, the$k$-th entry of the$k$-th column of$c$is nonzero. Now, the matrix$c$is upper-triangular (since the last$n-k$entries of the$k$-th column of$c$are zero, while all other columns are the corresponding columns of the identity matrix) and its diagonal entries are nonzero (since the$k$-th entry of the$k$-th column of$c$is nonzero, while all other columns are the corresponding columns of the identity matrix). Thus, the matrix$c$is an invertible upper-triangular matrix. In other words,$c\in B$. Now,$ce_{k}=\left( \text{the }k\text{-th column of }c\right) =\zeta$, so that$\zeta=\underbrace{c}_{\in B}e_{k}\in Be_{k}$. \par Now, forget that we fixed$\zeta$. We thus have proven that$\zeta\in Be_{k}$for each$\zeta\in E_{k}\setminus E_{k-1}$. In other words,$E_{k}\setminus E_{k-1}\subseteq Be_{k}$. Combining this with$Be_{k}\subseteq E_{k}\setminus E_{k-1}$, we obtain$E_{k}\setminus E_{k-1}=Be_{k}$. This proves (\ref{pf.p7.7.1}).}. Every set of the form$E_{k}\setminus E_{k-1}$(with$k\in\left\{ 0,1,\ldots,n\right\} $) is an orbit of$B$in$V$% \ \ \ \ \footnote{\textit{Proof.} Let$k\in\left\{ 0,1,\ldots,n\right\} $. We must show that the set$E_{k}\setminus E_{k-1}$is an orbit of$B$in$V$. \par If$k=0$, then this is fairly clear (indeed, if$k=0$, then$\underbrace{E_{k}% }_{=E_{0}=\left\{ 0\right\} }\setminus\underbrace{E_{k-1}}_{=E_{-1}% =\varnothing}=\left\{ 0\right\} \setminus\varnothing=\left\{ 0\right\} =B0$, which is clearly an orbit of$B$in$V$). Thus, we WLOG assume that$k\neq0$. Hence,$k\in\left\{ 1,2,\ldots,n\right\} $(since$k\in\left\{ 1,2,\ldots,n\right\} $). Hence, (\ref{pf.p7.7.1}) shows that$E_{k}\setminus E_{k-1}=Be_{k}$. Thus,$E_{k}\setminus E_{k-1}$is an orbit of$B$in$V$(since$Be_{k}$is an orbit of$B$in$V$). Qed.}. The union of these orbits$E_{k}\setminus E_{k-1}$is the whole space$\mathbb{F}_{p}^{n}$(because this union is$\bigcup_{k=0}^{n}\left( E_{k}\setminus E_{k-1}\right) =\underbrace{E_{n}}_{=\mathbb{F}_{p}^{n}}\setminus\underbrace{E_{-1}% }_{=\varnothing}=\mathbb{F}_{p}^{n}\setminus\varnothing=\mathbb{F}_{p}^{n}$). Hence, these orbits$E_{k}\setminus E_{k-1}$are \textbf{all} the orbits of$B$in$V$. This is exactly what we wanted to prove.$\square$] \textbf{2.} You claim that \textquotedblleft the spaces$E_{k}$are the only$B$-invariant subspace of$V$\textquotedblright. This claim has a little typo in it (\textquotedblleft subspace\textquotedblright\ should be \textquotedblleft subspaces\textquotedblright), and again needs a proof. [\textit{Proof of the fact that the subspaces }$E_{k}$\textit{(for }%$k\in\left\{ 0,1,\ldots,n\right\} $\textit{) are the only }$B$% \textit{-invariant subspaces of }$V$\textit{:} For each$k\in\left\{ 0,1,\ldots,n\right\} $, the subspace$E_{k}$is a$B$-invariant subspace of$V$(because every$b\in B$satisfies$bE_{k}=E_{k}$). Conversely, every$B$-invariant subspace of$V$has the form$E_{k}$for some$k\in\left\{ 0,1,\ldots,n\right\} $\ \ \ \ \footnote{\textit{Proof.} Let$Q$be a$B$-invariant subspace of$V$. We must show that$Q$has the form$E_{k}$for some$k\in\left\{ 0,1,\ldots,n\right\} $. \par We have$Q\subseteq V=\mathbb{F}_{p}^{n}=E_{n}$. Hence, there exists some$k\in\left\{ -1,0,\ldots,n\right\} $such that$Q\subseteq E_{k}$(namely,$k=n$). Let$\ell$be the \textbf{largest} such$k$. Thus,$Q\subseteq E_{\ell}$. \par We have$0\in Q$(since$Q$is a subspace of$V$) but$0\notin\varnothing$. If we had$E_{\ell}=\varnothing$, then we would have$0\in Q\subseteq E_{\ell }=\varnothing$, which would contradict$0\notin\varnothing$. Hence, we cannot have$E_{\ell}=\varnothing$. Thus, we have$E_{\ell}\neq\varnothing=E_{-1}$. Consequently,$\ell\neq-1$. Hence,$\ell\geq0$, so that$\ell\in\left\{ 0,1,\ldots,n\right\} $and therefore$\ell-1\in\left\{ -1,0,\ldots ,n\right\} $. \par But$\ell$is the \textbf{largest}$k\in\left\{ -1,0,\ldots,n\right\} $such that$Q\subseteq E_{k}$(by the definition of$\ell$). Thus, every$k\in\left\{ -1,0,\ldots,n\right\} $satisfying$k<\ell$satisfies$Q\not \subseteq E_{k}$. Applying this to$k=\ell-1$, we obtain$Q\not \subseteq E_{\ell-1}$(since$\ell-1<\ell$). Thus, there exists some$q\in Q$such that$q\notin E_{\ell-1}$. Consider this$q$. Combining$q\in Q\subseteq E_{\ell}$with$q\notin E_{\ell-1}$, we obtain$q\in E_{\ell }\setminus E_{\ell-1}=Be_{\ell}$(by (\ref{pf.p7.7.1})). In other words, there exists some$b\in B$such that$q=be_{\ell}$. Consider this$b$. Since$B$is a group, we have$Bb=B$(since$b\in B$). Now,$B\underbrace{q}_{=be_{\ell}% }=\underbrace{Bb}_{=B}e_{\ell}=Be_{\ell}=E_{\ell}\setminus E_{\ell-1}$. Thus,$E_{\ell}\setminus E_{\ell-1}=B\underbrace{q}_{\in Q}\subseteq BQ\subseteq Q$(since the subspace$Q$is$B$-invariant). \par Now, let$r\in E_{\ell}$. We will show that$r\in Q$. In fact, if$r\in E_{\ell}\setminus E_{\ell-1}$, then this is obvious (because if$r\in E_{\ell-1}$, then$r\in E_{\ell}\setminus E_{\ell-1}\subseteq Q$). Thus, we WLOG assume that we don't have$r\in E_{\ell}\setminus E_{\ell-1}$. In other words, we have$r\notin E_{\ell}\setminus E_{\ell-1}$. Combining$r\in E_{\ell}$with$r\notin E_{\ell}\setminus E_{\ell-1}$, we obtain$r\in E_{\ell}\setminus\left( E_{\ell}\setminus E_{\ell-1}\right) \subseteq E_{\ell-1}$. \par If we had$r-q\in E_{\ell-1}$, then we would have$q=\underbrace{r}_{\in E_{\ell-1}}-\underbrace{\left( r-q\right) }_{\in E_{\ell-1}}\in E_{\ell -1}-E_{\ell-1}\subseteq E_{\ell-1}$(since$E_{\ell-1}$is a vector space), which would contradict$q\notin E_{\ell-1}$. Hence, we do not have$r-q\in E_{\ell-1}$. In other words, we have$r-q\notin E_{\ell-1}$. But$\underbrace{r}_{\in E_{\ell}\setminus E_{\ell-1}\subseteq E_{\ell}% }-\underbrace{q}_{\in E_{\ell}}\in E_{\ell}-E_{\ell}\subseteq E_{\ell}$(since$E_{\ell}$is a vector space). Combining this with$r-q\notin E_{\ell-1}$, we obtain$r-q\in E_{\ell}\setminus E_{\ell-1}\subseteq Q$. Now,$r=\underbrace{q}_{\in Q}+\underbrace{\left( r-q\right) }_{\in Q}\in Q+Q\subseteq Q$(since$Q$is a vector space). Hence, we have proven that$r\in Q$. \par Now, forget that we fixed$r$. We thus have shown that$r\in Q$for each$r\in E_{\ell}$. In other words,$E_{\ell}\subseteq Q$. Combining this with$Q\subseteq E_{\ell}$, we obtain$Q=E_{\ell}$. Thus,$Q=E_{k}$for some$k\in\left\{ 0,1,\ldots,n\right\} $(namely,$k=\ell$). In other words,$Q$has the form$E_{k}$for some$k\in\left\{ 0,1,\ldots,n\right\} $. Qed.}. Hence, the subspaces$E_{k}$(for$k\in\left\{ 0,1,\ldots,n\right\} $) are the only$B$-invariant subspaces of$V$. This completes the proof.$\square$] \item \textbf{Proof of Proposition 7.7:} Replace \textquotedblleft the point$\underline{E}_{I}\in\operatorname*{Flag}\nolimits_{I}\left( V\right) $\textquotedblright\ by \textquotedblleft the point$\underline{E}% \in\operatorname*{Flag}\nolimits_{I}\left( V\right) $\textquotedblright. \item \textbf{Proof of Proposition 7.7:} Replace \textquotedblleft any map\textquotedblright\ by \textquotedblleft any$G$-equivariant map\textquotedblright. \item \textbf{Proof of Proposition 7.7:} You write that \textquotedblleft It is also clear that$P_{I}\leq P_{J}$iff$I\subseteq J$\textquotedblright. Maybe it is worth giving a proof of this: [\textit{Proof of the fact that }$P_{I}\leq P_{J}$\textit{iff }$I\subseteq J$\textit{:} We want to show that$P_{I}\leq P_{J}$iff$I\subseteq J$. One direction of this equivalence is clear (namely: if$I\subseteq J$, then$P_{I}\leq P_{J}$). It remains to prove the other. In other words, it remains to prove that if$P_{I}\leq P_{J}$, then$I\subseteq J$. So let us assume that$P_{I}\leq P_{J}$. We must show that$I\subseteq J$. Lemma 7.5b yields$I=\left\{ i\ \mid\ s_{i}\in P_{I}\right\} \subseteq \left\{ i\ \mid\ s_{i}\in P_{J}\right\} $(since$P_{I}\leq P_{J}$). But Lemma 7.5b (applied to$J$instead of$I$) yields$J=\left\{ i\ \mid\ s_{i}\in P_{J}\right\} $. Hence,$I\subseteq\left\{ i\ \mid \ s_{i}\in P_{J}\right\} =J$. Thus,$I\subseteq J$is proven. This completes our proof.$\square$] \item \textbf{\S 7:} I think it is worthwhile stating three additional facts as consequences of the proof of Proposition 7.7: \textbf{Proposition 7.7a.} Let$V=\mathbb{F}_{p}^{n}$. Let$I\subseteq\left\{ 1,2,\ldots,n-1\right\} $. Then,$\operatorname*{Flag}\nolimits_{I}\left( V\right) \cong G/P_{I}$as$G$-sets. [\textit{Proof of Proposition 7.7a.} In the proof of Proposition 7.7, we have shown that$G$acts transitivitely on$\operatorname*{Flag}\nolimits_{I}% \left( V\right) $. Thus, for any$\underline{X}\in\operatorname*{Flag}% \nolimits_{I}\left( V\right) $, we have$\operatorname*{Flag}\nolimits_{I}% \left( V\right) \cong G/G_{\underline{X}}$as$G$-sets, where$G_{\underline{X}}$denotes the stabilizer of$\underline{X}$. Applying this to$\underline{X}=\underline{E}$(where$\underline{E}$is the \textquotedblleft obvious flag\textquotedblright\ defined in Definition 7.2), we conclude that$\operatorname*{Flag}\nolimits_{I}\left( V\right) \cong G/G_{\underline{E}}$as$G$-sets. But the stabilizer of$\underline{E}$is$P_{I}$(by the definition of$P_{I}$). In other words,$G_{\underline{E}% }=P_{I}$. Hence,$\operatorname*{Flag}\nolimits_{I}\left( V\right) \cong G/\underbrace{G_{\underline{E}}}_{=P_{I}}=G/P_{I}$as$G$-sets. This proves Proposition 7.7a.$\square$] \textbf{Proposition 7.7b.} Let$V=\mathbb{F}_{p}^{n}$. Let$I\subseteq\left\{ 1,2,\ldots,n-1\right\} $. For any$g\in G$, we shall use the notation$\overline{g}$for the coset$gP_{I}$of$g$in$G/P_{I}$. \textbf{(a)} There is precisely one$B$-fixed point in$G/P_{I}$. This$B$-fixed point is$\overline{1}$, and will be called the \textit{basepoint} of$G/P_{I}$. We have% \begin{equation} \left( G/P_{I}\right) ^{B}=\left\{ \overline{1}\right\} . \label{eq.p7.7b.1}% \end{equation} \textbf{(b)} Let$i\in\left\{ 1,2,\ldots,n-1\right\} $. Then,$\left( G/P_{I}\right) ^{P_{i}}=% \begin{cases} \left\{ \overline{1}\right\} , & \text{if }i\in I\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} $. [\textit{Proof of Proposition 7.7b.} Proposition 7.7a yields$\operatorname*{Flag}\nolimits_{I}\left( V\right) \cong G/P_{I}$as$G$-sets. We have the following general fact about group actions: If$\mathfrak{A}$is a subgroup of a group$\mathfrak{G}$, and if$\mathfrak{X}$is a$\mathfrak{G}% $-set, then \begin{equation} \mathfrak{X}^{\mathfrak{A}}\cong\operatorname*{Map}\nolimits_{\mathfrak{G}% }\left( \mathfrak{G}/\mathfrak{A},\mathfrak{X}\right) \label{pf.p7.7b.1}% \end{equation} as sets\footnote{This is easy to prove. (In fact, for each$g\in\mathfrak{G}$, let$\overline{g}$denote the coset$g\mathfrak{A}$of$g$in$\mathfrak{G}% /\mathfrak{A}$. Then, the map$\operatorname*{Map}\nolimits_{\mathfrak{G}% }\left( \mathfrak{G}/\mathfrak{A},\mathfrak{X}\right) \rightarrow \mathfrak{X}^{\mathfrak{A}}$sending each$f\in\operatorname*{Map}% \nolimits_{\mathfrak{G}}\left( \mathfrak{G}/\mathfrak{A},\mathfrak{X}\right) $to$f\left( \overline{1}\right) \in\mathfrak{X}^{\mathfrak{A}}$is a bijection. Indeed, its inverse map sends each$u\in\mathfrak{X}^{\mathfrak{A}% }$to the$G$-map$\mathfrak{G}/\mathfrak{A}\rightarrow\mathfrak{X}% ,\ \overline{g}\mapsto gu$.)}. \textbf{(a)} In the proof of Proposition 7.7, we have seen that the point$\underline{E}\in\operatorname*{Flag}\nolimits_{I}\left( V\right) $is the unique$B$-fixed point in$\operatorname*{Flag}\nolimits_{I}\left( V\right) $. In other words,$\left( \operatorname*{Flag}\nolimits_{I}\left( V\right) \right) ^{B}=\left\{ \underline{E}\right\} $. But$\operatorname*{Flag}% \nolimits_{I}\left( V\right) \cong G/P_{I}$as$G$-sets. Hence,$\left( \operatorname*{Flag}\nolimits_{I}\left( V\right) \right) ^{B}\cong\left( G/P_{I}\right) ^{B}$as sets. Thus,$\left( G/P_{I}\right) ^{B}\cong\left( \operatorname*{Flag}\nolimits_{I}\left( V\right) \right) ^{B}=\left\{ \underline{E}\right\} $as sets. Hence,$\left( G/P_{I}\right) ^{B}$is a$1$-element set (since$\left\{ \underline{E}\right\} $is a$1$-element set). For every$b\in B$, we have$b\overline{1}=\overline{b1}=\overline {b}=\overline{1}$in$G/P_{I}$(since$b\in B\leq P_{I}$). Therefore,$\overline{1}\in\left( G/P_{I}\right) ^{B}$. Therefore,$\left( G/P_{I}\right) ^{B}=\left\{ \overline{1}\right\} $(since$\left( G/P_{I}\right) ^{B}$is a$1$-element set). In other words, the set of all$B$-fixed points in$G/P_{I}$is$\left\{ \overline{1}\right\} $. In other words, there is precisely one$B$-fixed point in$G/P_{I}$, and this$B$-fixed point is$\overline{1}$. This completes the proof of Proposition 7.7b \textbf{(a)}. \textbf{(b)} Applying (\ref{pf.p7.7b.1}) to$\mathfrak{G}=G$,$\mathfrak{A}% =P_{i}$and$\mathfrak{X}=G/P_{I}$, we conclude that$\left( G/P_{I}\right) ^{P_{i}}\cong\operatorname*{Map}\nolimits_{G}\left( G/P_{i},G/P_{I}\right) $as sets. But recall that$\operatorname*{Flag}\nolimits_{I}\left( V\right) \cong G/P_{I}$as$G$-sets. Also, Proposition 7.7a (applied to$\left\{ i\right\} $instead of$I$) yields$\operatorname*{Flag}\nolimits_{\left\{ i\right\} }\left( V\right) \cong G/P_{\left\{ i\right\} }=G/P_{i}$as$G-sets. Hence,% \begin{align*} \left( G/P_{I}\right) ^{P_{i}} & \cong\operatorname*{Map}\nolimits_{G}% \left( \underbrace{G/P_{i}}_{\substack{\cong\operatorname*{Flag}% \nolimits_{\left\{ i\right\} }\left( V\right) \\=\operatorname*{Flag}% \nolimits_{i}\left( V\right) }},\underbrace{G/P_{I}}_{\cong% \operatorname*{Flag}\nolimits_{I}\left( V\right) }\right) \cong% \operatorname*{Map}\nolimits_{G}\left( \operatorname*{Flag}\nolimits_{i}% \left( V\right) ,\operatorname*{Flag}\nolimits_{I}\left( V\right) \right) \\ & =% \begin{cases} \left( \text{a singleton}\right) , & \text{if }\left\{ i\right\} \subseteq I\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} \\ & \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c}% \text{by Proposition 7.7 (applied to }\left\{ i\right\} \text{ and }I\\ \text{instead of }I\text{ and }J\text{)}% \end{array} \right) \\ & =% \begin{cases} \left( \text{a singleton}\right) , & \text{if }i\in I\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} . \end{align*} Therefore, ifi\notin I$, then$\left( G/P_{I}\right) ^{P_{i}}% \cong\varnothing$and thus$\left( G/P_{I}\right) ^{P_{i}}=\varnothing$. Hence, Proposition 7.7b \textbf{(b)} is proven in the case when$i\notin I$. We thus WLOG assume that we don't have$i\notin I$. Hence, we have$i\in I$. We must show that$\left( G/P_{I}\right) ^{P_{i}}=\left\{ \overline {1}\right\} $. Now,$\left( G/P_{I}\right) ^{P_{i}}\cong% \begin{cases} \left( \text{a singleton}\right) , & \text{if }i\in I\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} =\left( \text{a singleton}\right) $(since$i\in I$). Hence,$\left( G/P_{I}\right) ^{P_{i}}$is a$1$-element set. But$i\in I$, so that$\left\{ i\right\} \subseteq I$and thus$P_{\left\{ i\right\} }\subseteq P_{I}$. Therefore, every$p\in P_{i}$satisfies$p\overline{1}=\overline{p1}=\overline{p}=\overline{1}$in$G/P_{I}$(since$p\in P_{i}=P_{\left\{ i\right\} }\subseteq P_{I}$). In other words,$\overline{1}\in\left( G/P_{I}\right) ^{P_{i}}$. Since$\left( G/P_{I}\right) ^{P_{i}}$is a$1$-element set, we can therefore conclude that$\left( G/P_{I}\right) ^{P_{i}}=\left\{ \overline{1}\right\} $. This completes the proof of Proposition 7.7b \textbf{(b)}.] \textbf{Proposition 7.7c.} Let$V=\mathbb{F}_{p}^{n}$. Let$X$be a parabolic$G$-set. For each$y\in X^{B}$, set$I_{y}=\left\{ i\in\left\{ 1,2,\ldots,n-1\right\} \ \mid\ y\in X^{P_{i}}\right\} $. For each$y\in X$, let$G_{y}$denote the stabilizer of$y$in$G$. \textbf{(a)} If$y\in X^{B}$, then$P_{I_{y}}=G_{y}$. \textbf{(b)} Let$y_{1}\in X^{B}$,$y_{2}\in X^{B}$,$q_{1}\in G$and$q_{2}\in G$be such that$q_{1}y_{1}=q_{2}y_{2}$. Then,$y_{1}=y_{2}$and$q_{1}G_{y_{1}}=q_{2}G_{y_{1}}$. [\textit{Proof of Proposition 7.7c.} \textbf{(a)} Let$y\in X^{B}$. Then,$B\subseteq G_{y}$(since$y\in X^{B}$), so that$G_{y}\geq B$. Thus,$G_{y}$is a subgroup of$G$such that$G_{y}\geq B$. Let$I=\left\{ i\ \mid\ s_{i}\in G_{y}\right\} $. Hence, Proposition 7.5a (applied to$P=G_{y}$) shows that$G_{y}=P_{I}$. Now, let$j\in I_{y}$. Then,$j\in I_{y}=\left\{ i\in\left\{ 1,2,\ldots ,n-1\right\} \ \mid\ y\in X^{P_{i}}\right\} $. In other words,$j$is an element of$\left\{ 1,2,\ldots,n-1\right\} $and satisfies$y\in X^{P_{j}}$. But$P_{j}=P_{\left\{ j\right\} }=B\Sigma_{\left\{ j\right\} }B$(by Proposition 7.3, applied to$\left\{ j\right\} $instead of$I$). Now,$s_{j}=\underbrace{1}_{\in B}\underbrace{s_{j}}_{\in\Sigma_{\left\{ j\right\} }}\underbrace{1}_{\in B}\in B\Sigma_{\left\{ j\right\} }B=P_{j}$. From$y\in X^{P_{j}}$, we conclude that$py=y$for each$p\in P_{j}$. Applying this to$p=s_{j}$, we obtain$s_{j}y=y$(since$s_{j}\in P_{j}$). In other words,$s_{j}\in G_{y}$. In other words,$j\in\left\{ i\ \mid\ s_{i}\in G_{y}\right\} $. This rewrites as$j\in I$(since$I=\left\{ i\ \mid \ s_{i}\in G_{y}\right\} $). Now, forget that we fixed$j$. We thus have proven that$j\in I$for each$j\in I_{y}$. In other words,$I_{y}\subseteq I$. On the other hand, let$k\in I$. Thus,$k\in I=\left\{ i\ \mid\ s_{i}\in G_{y}\right\} $. In other words,$k$is an element of$\left\{ 1,2,\ldots,n-1\right\} $and satisfies$s_{k}\in G_{y}$. In other words,$s_{k}y=y$. But$P_{k}=P_{\left\{ k\right\} }=B\Sigma_{\left\{ k\right\} }B$(by Proposition 7.3, applied to$\left\{ k\right\} $instead of$I$). The definition of$\Sigma_{\left\{ k\right\} }$yields$\Sigma_{\left\{ k\right\} }=\left\langle s_{k}\right\rangle =\left\{ 1,s_{k}\right\} $. Hence,$gy=y$for each$g\in\Sigma_{\left\{ k\right\} }$(since$1y=y$and$s_{k}y=y$). In other words,$y\in X^{\Sigma_{\left\{ k\right\} }}$. Now, let$p\in P_{k}$. Then,$p\in P_{k}=B\Sigma_{\left\{ k\right\} }B$. In other words, there exist$b_{1}\in B$,$g\in\Sigma_{\left\{ k\right\} }$and$b_{2}\in B$such that$p=b_{1}gb_{2}$. Consider these$b_{1}$,$g$and$b_{2}$. Now,% $\underbrace{p}_{=b_{1}gb_{2}}y=b_{1}g\underbrace{b_{2}y}% _{\substack{=y\\\text{(since }y\in X^{B}\text{)}}}=b_{1}\underbrace{gy}% _{\substack{=y\\\text{(since }y\in X^{\Sigma_{\left\{ k\right\} }}\text{)}% }}=b_{1}y=y$ (since$y\in X^{B}$). Now, forget that we fixed$p$. We thus have proven that$py=y$for each$p\in P_{k}$. In other words,$y\in X^{P_{k}}$. Hence,$k$is an element of$\left\{ 1,2,\ldots,n-1\right\} $and satisfies$y\in X^{P_{k}}$. In other words,$k\in\left\{ i\in\left\{ 1,2,\ldots,n-1\right\} \ \mid\ y\in X^{P_{i}}\right\} $. In other words,$k\in I_{y}$(since$I_{y}=\left\{ i\in\left\{ 1,2,\ldots,n-1\right\} \ \mid\ y\in X^{P_{i}}\right\} $). Now, forget that we fixed$k$. We thus have proven that$k\in I_{y}$for each$k\in I$. In other words,$I\subseteq I_{y}$. Combining this with$I_{y}\subseteq I$, we obtain$I_{y}=I$. Hence,$P_{I_{y}}=P_{I}=G_{y}$. This proves Proposition 7.7c \textbf{(a)}. \textbf{(b)} Let$Y=Gy_{1}$be the$G$-orbit of$y_{1}$. Then,$Y$is a$G$-subset of$X^{B}$. Moreover,$Y\cong G/G_{y_{1}}$as$G$-sets (by the orbit-stabilizer theorem). Proposition 7.7c \textbf{(a)} (applied to$y=y_{1}% $) yields$P_{I_{y_{1}}}=G_{y_{1}}$. But Proposition 7.7b \textbf{(a)} (applied to$I=I_{y_{1}}$) yields$\left( G/P_{I_{y_{1}}}\right) ^{B}=\left\{ \overline{1}\right\} $. Hence,$\left\vert \left( G/P_{I_{y_{1}}}\right) ^{B}\right\vert =\left\vert \left\{ \overline {1}\right\} \right\vert =1$. We have$Y\cong G/\underbrace{G_{y_{1}}}_{=P_{I_{y_{1}}}}=G/P_{I_{y_{1}}}$as$G$-sets, and thus$Y^{B}\cong\left( G/P_{I_{y_{1}}}\right) ^{B}$as sets. Hence,$\left\vert Y^{B}\right\vert =\left\vert \left( G/P_{I_{y_{1}}% }\right) ^{B}\right\vert =1$. Both$y_{1}$and$y_{2}$are$B$-fixed points (since$y_{1}\in X^{B}$and$y_{2}\in X^{B}$). We have$q_{1}y_{1}=q_{2}y_{2}$. Multiplying both sides of this equality by$q_{2}^{-1}$, we obtain$q_{2}^{-1}q_{1}y_{1}% =\underbrace{q_{2}^{-1}q_{2}}_{=1}y_{2}=y_{2}$, so that$y_{2}% =\underbrace{q_{2}^{-1}q_{1}}_{\in G}y_{1}\in Gy_{1}=Y$. We have$y_{1}\in Y$(since$Y$is the$G$-orbit of$y_{1}$). Thus,$y_{1}\in Y^{B}$(since$y_{1}$is a$B$-fixed point). We also have$y_{2}\in Y$. Thus,$y_{2}\in Y^{B}$(since$y_{2}$is a$B$-fixed point). But$Y^{B}$is a$1$-element set (since$\left\vert Y^{B}\right\vert =1$). Thus, any two elements of$Y^{B}$are identical. Applying this to the two elements$y_{1}$and$y_{2}$of$Y^{B}$, we conclude that$y_{1}$and$y_{2}$are identical (since$y_{1}\in Y^{B}$and$y_{2}\in Y^{B}$). In other words,$y_{1}=y_{2}$. Now,$q_{2}^{-1}q_{1}y_{1}=y_{2}=y_{1}$. In other words,$q_{2}^{-1}q_{1}\in G_{y_{1}}$. In other words,$q_{1}G_{y_{1}}=q_{2}G_{y_{1}}$. This completes the proof of Proposition 7.7c \textbf{(b)}.$\square$] \item \textbf{Definition 7.8:} Replace \textquotedblleft the category of finite sets$Y$equipped with a list$\left( Y_{1},\ldots,Y_{n-1}\right) $of subsets.\textquotedblright\ by \textquotedblleft the category whose objects are finite sets$Y$equipped with a list$\left( Y_{1},\ldots,Y_{n-1}\right) $of subsets. Such an object will be denoted$\left( Y;Y_{1},\ldots ,Y_{n-1}\right) $. Morphisms$\left( Y;Y_{1},\ldots,Y_{n-1}\right) \rightarrow\left( Z;Z_{1},\ldots,Z_{n-1}\right) $in$\mathcal{P}^{\prime}$shall be maps$Y\rightarrow Z$mapping each$Y_{i}$into$Z_{i}$% .\textquotedblright \item \textbf{Proof of Proposition 7.9:} Replace \textquotedblleft Consider an object$Y\in\mathcal{P}^{\prime}$.\textquotedblright\ by \textquotedblleft Consider an object$\left( Y;Y_{1},\ldots,Y_{n-1}\right) \in\mathcal{P}% ^{\prime}$(abbreviated as$Y$).\textquotedblright. \item \textbf{Proof of Proposition 7.9:} Replace \textquotedblleft Now consider a morphism$f:Y\rightarrow Z$in$\mathcal{P}^{\prime}$% \textquotedblright\ by \textquotedblleft Now consider a morphism$f:\left( Y;Y_{1},\ldots,Y_{n-1}\right) \rightarrow\left( Z;Z_{1},\ldots ,Z_{n-1}\right) $in$\mathcal{P}^{\prime}$\textquotedblright. \item \textbf{Proof of Proposition 7.9:} You write: \textquotedblleft so there is a unique$G$-map$G/P_{I_{y}}\rightarrow G/P_{I_{f\left( y\right) }}% $\textquotedblright. The uniqueness of this$G$-map might need a proof\footnote{\textit{Proof.} We have$I_{y}\subseteq I_{f\left( y\right) }$. Thus,$P_{I_{y}}\subseteq P_{I_{f\left( y\right) }}$(because if two subsets$I$and$J$of$\left\{ 1,2,\ldots,n-1\right\} $satisfy$I\subseteq J$, then they also satisfy$P_{I}\subseteq P_{J}$). Hence, there clearly exists a$G$-map$G/P_{I_{y}}\rightarrow G/P_{I_{f\left( y\right) }}$(namely, the map that sends any coset$gP_{I_{y}}$of$P_{I_{y}}$to the coset$gP_{I_{f\left( y\right) }}$of$P_{I_{f\left( y\right) }}$). It remains to prove that there exists \textbf{at most one}$G$-map$G/P_{I_{y}% }\rightarrow G/P_{I_{f\left( y\right) }}$. \par Let$V=\mathbb{F}_{p}^{n}$. If$I$is any subset of$\left\{ 1,2,\ldots ,n-1\right\} $, then we have$\operatorname*{Flag}\nolimits_{I}\left( V\right) \cong G/P_{I}$as$G$-sets (because$G$acts transitively on the$G$-set$\operatorname*{Flag}\nolimits_{I}\left( V\right) $, and the stabilizer of the element$\underline{E}\in\operatorname*{Flag}\nolimits_{I}% \left( V\right) $is$P_{I}$). In other words, if$I$is a subset of$\left\{ 1,2,\ldots,n-1\right\} $, then$G/P_{I}\cong\operatorname*{Flag}% \nolimits_{I}\left( V\right) $as$G$-sets. Thus, if$I$and$J$are two subsets of$\left\{ 1,2,\ldots,n-1\right\} $, then% $\operatorname*{Map}\nolimits_{G}\left( \underbrace{G/P_{I}}_{\cong% \operatorname*{Flag}\nolimits_{I}\left( V\right) },\underbrace{G/P_{J}% }_{\cong\operatorname*{Flag}\nolimits_{J}\left( V\right) }\right) \cong\operatorname*{Map}\nolimits_{G}\left( \operatorname*{Flag}% \nolimits_{I}\left( V\right) ,\operatorname*{Flag}\nolimits_{J}\left( V\right) \right) =% \begin{cases} \text{a singleton}, & \text{if }I\subseteq J\\ \varnothing, & \text{otherwise}% \end{cases}$ (by Proposition 7.7). Hence, if$I$and$J$are two subsets of$\left\{ 1,2,\ldots,n-1\right\} $, then the set$\operatorname*{Map}\nolimits_{G}% \left( G/P_{I},G/P_{J}\right) $has at most one element. In other words, if$I$and$J$are two subsets of$\left\{ 1,2,\ldots,n-1\right\} $, then there exists \textbf{at most one}$G$-map$G/P_{I}\rightarrow G/P_{J}$. Applying this to$I=I_{y}$and$J=I_{f\left( y\right) }$, we conclude that there exists \textbf{at most one}$G$-map$G/P_{I_{y}}\rightarrow G/P_{I_{f\left( y\right) }}$. This concludes our proof.}. \item \textbf{Proof of Proposition 7.9:} Replace \textquotedblleft this gives us a functor$\mathcal{P}^{\prime}\rightarrow\mathcal{P}$\textquotedblright% \ by \textquotedblleft this gives us a functor$F^{\prime}:\mathcal{P}% ^{\prime}\rightarrow\mathcal{P}$\textquotedblright. \item \textbf{Proof of Proposition 7.9:} You write: \textquotedblleft Note that% $\left( G/P_{I}\right) ^{P_{i}}=\operatorname*{Map}\nolimits_{G}\left( \operatorname*{Flag}\nolimits_{i}\left( V\right) ,\operatorname*{Flag}% \nolimits_{I}\left( V\right) \right) =% \begin{cases} 1 & \text{if }i\in I\\ \varnothing & \text{otherwise.}% \end{cases}$ Using this we see that$FF^{\prime}=1_{\mathcal{P}^{\prime}}$% .\textquotedblright. I would suggest replacing this by the following (more detailed) argument: \textquotedblleft Let$Y\in\mathcal{P}^{\prime}$be an object. Then,$F^{\prime}Y=\coprod_{y\in Y}G/P_{I_{y}}$, so that $\left( F^{\prime}Y\right) ^{B}=\left( \coprod_{y\in Y}G/P_{I_{y}}\right) ^{B}\cong\coprod_{y\in Y}\underbrace{\left( G/P_{I_{y}}\right) ^{B}% }_{\substack{=\left\{ \overline{1}\right\} \\\text{(by (\ref{eq.p7.7b.1}), applied to }I=I_{y}\text{)}}}=\coprod_{y\in Y}\left\{ \overline{1}\right\} .$ Hence, there exists a bijection$Y\rightarrow\left( F^{\prime}Y\right) ^{B}$that sends each$y\in Y$to the element$\overline{1}$of$G/P_{I_{y}}$. Denote this bijection by$\eta_{Y}$. We have$FF^{\prime}Y=\left( F^{\prime}Y\right) ^{B}$as sets (by the definition of the functor$F$). Thus, the bijection$\eta_{Y}:Y\rightarrow \left( F^{\prime}Y\right) ^{B}$is a bijection$Y\rightarrow FF^{\prime}Y$. This bijection$\eta_{Y}$is an isomorphism in the category$\mathcal{P}% ^{\prime}$\ \ \ \ \footnote{\textit{Proof.} Fix$j\in\left\{ 1,2,\ldots ,n-1\right\} $. \par For each$y\in Y$, we have$I_{y}=\left\{ i\in\left\{ 1,2,\ldots ,n-1\right\} \ \mid\ y\in Y_{i}\right\} $(by the definition of$I_{y}$). Hence, for each$y\in Y$and$i\in\left\{ 1,2,\ldots,n-1\right\} $, we have the following logical equivalence:% $\left( i\in I_{y}\right) \ \Longleftrightarrow\ \left( y\in Y_{i}\right) .$ Applying this to$i=j$, we conclude that for each$y\in Y$, we have the following logical equivalence:% \begin{equation} \left( j\in I_{y}\right) \ \Longleftrightarrow\ \left( y\in Y_{j}\right) . \label{pf.p7.9.u.fn1.2}% \end{equation} \par For every$y\in Y, we have% \begin{align} \left( G/P_{I_{y}}\right) ^{P_{j}} & =% \begin{cases} \left\{ \overline{1}\right\} , & \text{if }j\in I_{y}\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} \ \ \ \ \ \ \ \ \ \ \left( \text{by Proposition 7.7b \textbf{(b)}, applied to }j\text{ instead of }i\right) \nonumber\\ & =% \begin{cases} \left\{ \overline{1}\right\} , & \text{if }y\in Y_{j}\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} \label{pf.p7.9.u.fn1.3}% \end{align} (because of the equivalence (\ref{pf.p7.9.u.fn1.2})). \par FromF^{\prime}Y=\coprod_{y\in Y}G/P_{I_{y}}, we obtain% \begin{align*} \left( F^{\prime}Y\right) ^{P_{j}} & =\left( \coprod_{y\in Y}G/P_{I_{y}% }\right) ^{P_{j}}\cong\coprod_{y\in Y}\underbrace{\left( G/P_{I_{y}}\right) ^{P_{j}}}_{\substack{=% \begin{cases} \left\{ \overline{1}\right\} , & \text{if }y\in Y_{j}\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} \\\text{(by (\ref{pf.p7.9.u.fn1.3}))}}}=\coprod_{y\in Y}% \begin{cases} \left\{ \overline{1}\right\} , & \text{if }y\in Y_{j}\text{;}\\ \varnothing, & \text{otherwise}% \end{cases} \\ & =\coprod_{y\in Y_{j}}\left\{ \overline{1}\right\} =\eta_{Y}\left( Y_{j}\right) \end{align*} (because the definition of\eta_{Y}$yields$\eta_{Y}\left( Y_{j}\right) =\coprod_{y\in Y_{j}}\left\{ \overline{1}\right\} $). \par Now, forget that we fixed$j$. We thus have proven that $\left( F^{\prime}Y\right) ^{P_{j}}=\eta_{Y}\left( Y_{j}\right)$ for each$j\in\left\{ 1,2,\ldots,n-1\right\} $. Hence, we have$\eta _{Y}\left( Y_{j}\right) \subseteq\left( F^{\prime}Y\right) ^{P_{j}}$and$\left( \eta_{Y}\right) ^{-1}\left( \left( F^{\prime}Y\right) ^{P_{j}% }\right) \subseteq Y_{j}$for each$j\in\left\{ 1,2,\ldots,n-1\right\} $. \par Recall that$Y=\left( Y;Y_{1},\ldots,Y_{n-1}\right) $and$FF^{\prime }Y=\left( \left( F^{\prime}Y\right) ^{B};\left( F^{\prime}Y\right) ^{P_{1}},\ldots,\left( F^{\prime}Y\right) ^{P_{n-1}}\right) $(by the definition of$F$). Hence, the bijection$\eta_{Y}:Y\rightarrow FF^{\prime}Y$is a morphism in$\mathcal{P}^{\prime}$(since$\eta_{Y}\left( Y_{j}\right) \subseteq\left( F^{\prime}Y\right) ^{P_{j}}$for each$j\in\left\{ 1,2,\ldots,n-1\right\} $), and its inverse$\left( \eta_{Y}\right) ^{-1}:FF^{\prime}Y\rightarrow Y$is also a morphism in$\mathcal{P}^{\prime}$(since$\left( \eta_{Y}\right) ^{-1}\left( \left( F^{\prime}Y\right) ^{P_{j}}\right) \subseteq Y_{j}$for each$j\in\left\{ 1,2,\ldots ,n-1\right\} $). Thus,$\eta_{Y}$is an isomorphism in the category$\mathcal{P}^{\prime}$. Qed.}. Now, forget that we fixed$Y$. Thus, for each object$Y\in\mathcal{P}^{\prime }$, we have constructed an isomorphism$\eta_{Y}:Y\rightarrow FF^{\prime}Y$in the category$\mathcal{P}^{\prime}$. It is straightforward to see that this isomorphism$\eta_{Y}$is functorial in$Y$. Thus, we have defined a natural isomorphism$\eta:1_{\mathcal{P}^{\prime}}\rightarrow FF^{\prime}$. Therefore,$FF^{\prime}\cong1_{\mathcal{P}^{\prime}}$as functors.\textquotedblright. \item \textbf{Proof of Proposition 7.9:} After \textquotedblleft The only$B$-fixed point in$G/P_{I}$is the basepoint\textquotedblright, I would add \textquotedblleft(by Proposition 7.7b \textbf{(a)})\textquotedblright. \item \textbf{Proof of Proposition 7.9:} After \textquotedblleft and the basepoint is fixed by$P_{i}$iff$i\in I$\textquotedblright, I would add \textquotedblleft(by Proposition 7.7b \textbf{(b)})\textquotedblright. \item \textbf{Proof of Proposition 7.9:} Replace \textquotedblleft%$X=F^{\prime}FX$\textquotedblright\ by \textquotedblleft$X\cong F^{\prime}FX$by a functorial isomorphism (i.e., we have$1_{\mathcal{P}}\cong F^{\prime}% F$)\textquotedblright. More importantly, I believe that this claim should be proven. Here is my proof: [\textit{Proof of the functorial isomorphism }$1_{\mathcal{P}}\cong F^{\prime }F$\textit{:} Let$X\in\mathcal{P}$be an object. Then,$FX=\left( X^{B};X^{P_{1}},\ldots,X^{P_{n-1}}\right) $(by the definition of the functor$F$). Hence, the definition of the functor$F^{\prime}$shows that$F^{\prime }FX=\coprod_{y\in X^{B}}G/P_{I_{y}}$, where we set$I_{y}=\left\{ i\in\left\{ 1,2,\ldots,n-1\right\} \ \mid\ y\in X^{P_{i}}\right\} $for each$y\in X^{B}$. We shall now define a map$\varepsilon_{X}:F^{\prime }FX\rightarrow X$as follows: Let$p\in F^{\prime}FX$. Then,$p\in F^{\prime}FX=\coprod_{y\in X^{B}% }G/P_{I_{y}}$. In other words,$p\in G/P_{I_{y}}$for some$y\in X^{B}$. Consider this$y$. Write$p$in the form$p=\overline{q}$for some$q\in G$(where$\overline{q}$denotes the coset$qP_{I_{y}}$of$q$in$G/P_{I_{y}}$). Then, the element$qy$of$X$does not depend on the choice of$q$% \ \ \ \ \footnote{\textit{Proof.} Let$q_{1}$and$q_{2}$be two elements$q\in G$satisfying$p=\overline{q}$. We must prove that$q_{1}y=q_{2}y$. \par We know that$q_{1}$is an element$q\in G$satisfying$p=\overline{q}$. In other words,$q_{1}$is an element of$G$and satisfies$p=\overline{q_{1}}$. Similarly,$q_{2}$is an element of$G$and satisfies$p=\overline{q_{2}}$. From$\overline{q_{1}}=p=\overline{q_{2}}$, we conclude that$q_{1}P_{I_{y}% }=q_{2}P_{I_{y}}$. In other words,$q_{1}=q_{2}h$for some$h\in P_{I_{y}}$. \par Let$G_{y}$denote the stabilizer of$y$in$G$. Then, Proposition 7.7c \textbf{(a)} yields$P_{I_{y}}=G_{y}$. Therefore,$h\in P_{I_{y}}=G_{y}$. In other words,$hy=y$. \par Now,$\underbrace{q_{1}}_{=q_{2}h}y=q_{2}\underbrace{hy}_{=y}=q_{2}y$. This completes our proof.}. Hence, we can define$\varepsilon_{X}\left( p\right) $to be the element$qy$of$X$. Thus, a map$\varepsilon_{X}:F^{\prime }FX\rightarrow X$is defined. This map$\varepsilon_{X}:F^{\prime}FX\rightarrow X$is$G$% -equivariant\footnote{\textit{Proof.} Let$p\in FF^{\prime}X$and$g\in G$. We must show that$\varepsilon_{X}\left( gp\right) =g\varepsilon_{X}\left( p\right) $. \par Indeed, we have$p\in F^{\prime}FX=\coprod_{y\in X^{B}}G/P_{I_{y}}$. In other words,$p\in G/P_{I_{y}}$for some$y\in X^{B}$. Consider this$y$. Write$p$in the form$p=\overline{q}$for some$q\in G$(where$\overline{q}$denotes the coset$qP_{I_{y}}$of$q$in$G/P_{I_{y}}$). Then,$\varepsilon_{X}\left( p\right) =qy$(by the definition of$\varepsilon_{X}$). On the other hand,$gp\in G/P_{I_{y}}$(since$p\in G/P_{I_{y}}$) and$g\underbrace{p}% _{=\overline{q}}=g\overline{q}=\overline{gq}$with$gq\in G$. Hence, the definition of$\varepsilon_{X}$yields$\varepsilon_{X}\left( gp\right) =g\underbrace{qy}_{=\varepsilon_{X}\left( p\right) }=g\varepsilon_{X}\left( p\right) $. \par Now, forgot that we fixed$p$and$g$. We thus have shown that$\varepsilon _{X}\left( gp\right) =g\varepsilon_{X}\left( p\right) $for each$p\in FF^{\prime}X$and$g\in G$. In other words, the map$\varepsilon_{X}% :F^{\prime}FX\rightarrow X$is$G$-equivariant. Qed.}. Moreover, this map$\varepsilon_{X}$is injective\footnote{\textit{Proof.} Let$p_{1}\in F^{\prime}FX$and$p_{2}\in F^{\prime}FX$be such that$\varepsilon_{X}\left( p_{1}\right) =\varepsilon_{X}\left( p_{2}\right) $. We shall show that$p_{1}=p_{2}$. \par For each$y\in X$, let$G_{y}$denote the stabilizer of$y$in$G$. \par We have$p_{1}\in F^{\prime}FX=\coprod_{y\in X^{B}}G/P_{I_{y}}$. In other words,$p_{1}\in G/P_{I_{y}}$for some$y\in X^{B}$. Denote this$y$by$y_{1}$. Thus,$y_{1}\in X^{B}$and$p_{1}\in G/P_{I_{y_{1}}}$. Write$p_{1}$in the form$p_{1}=\overline{q_{1}}^{/I_{y_{1}}}$for some$q_{1}\in G$(where$\overline{q_{1}}^{/I_{y_{1}}}$denotes the coset$q_{1}P_{I_{y_{1}}}$of$q_{1}$in$G/P_{I_{y_{1}}}$). Then,$\varepsilon_{X}\left( p_{1}\right) =q_{1}y_{1}$(by the definition of$\varepsilon_{X}$). \par We have$p_{2}\in F^{\prime}FX=\coprod_{y\in X^{B}}G/P_{I_{y}}$. In other words,$p_{2}\in G/P_{I_{y}}$for some$y\in X^{B}$. Denote this$y$by$y_{2}$. Thus,$y_{2}\in X^{B}$and$p_{2}\in G/P_{I_{y_{2}}}$. Write$p_{2}$in the form$p_{2}=\overline{q_{2}}^{/I_{y_{2}}}$for some$q_{2}\in G$(where$\overline{q_{2}}^{/I_{y_{2}}}$denotes the coset$q_{2}P_{I_{y_{2}}}$of$q_{2}$in$G/P_{I_{y_{2}}}$). Then,$\varepsilon_{X}\left( p_{2}\right) =q_{2}y_{2}$(by the definition of$\varepsilon_{X}$). \par Now,$q_{1}y_{1}=\varepsilon_{X}\left( p_{1}\right) =\varepsilon_{X}\left( p_{2}\right) =q_{2}y_{2}$. Hence, Proposition 7.7c \textbf{(b)} shows that$y_{1}=y_{2}$and$q_{1}G_{y_{1}}=q_{2}G_{y_{1}}$. \par On the other hand, Proposition 7.7c \textbf{(a)} (applied to$y=y_{1}$) shows that$P_{I_{y_{1}}}=G_{y_{1}}$. Thus, the equality$q_{1}G_{y_{1}}% =q_{2}G_{y_{1}}$rewrites as$q_{1}P_{I_{y_{1}}}=q_{2}P_{I_{y_{1}}}$. \par The definition of$\overline{q_{1}}^{/I_{y_{1}}}$yields$\overline{q_{1}% }^{/I_{y_{1}}}=q_{1}P_{I_{y_{1}}}=q_{2}P_{I_{y_{1}}}=q_{2}P_{I_{y_{2}}}$(since$y_{1}=y_{2}$). Hence,$p_{1}=\overline{q_{1}}^{/I_{y_{1}}}% =q_{2}P_{I_{y_{2}}}$. \par The definition of$\overline{q_{2}}^{/I_{y_{2}}}$yields$\overline{q_{2}% }^{/I_{y_{2}}}=q_{2}P_{I_{y_{2}}}$. Hence,$p_{2}=\overline{q_{2}}^{/I_{y_{2}% }}=q_{2}P_{I_{y_{2}}}$. Comparing this with$p_{1}=q_{2}P_{I_{y_{2}}}$, we obtain$p_{1}=p_{2}$. \par Now, let us forget that we fixed$p_{1}$and$p_{2}$. We thus have proven that if$p_{1}\in F^{\prime}FX$and$p_{2}\in F^{\prime}FX$are such that$\varepsilon_{X}\left( p_{1}\right) =\varepsilon_{X}\left( p_{2}\right) $, then$p_{1}=p_{2}$. In other words, the map$\varepsilon_{X}$is injective. Qed.} and surjective\footnote{\textit{Proof.} Let$x\in X$. \par Let$Y=Gx$be the$G$-orbit of$x$. Then,$Y\cong G/G_{x}$(by the orbit-stabilizer theorem). However,$X$is a parabolic$G$-set; thus, the stabilizer of every element of$X$is parabolic. In other words, for every$\xi\in X$, the subgroup$G_{\xi}$of$G$is parabolic. Applying this to$\xi=x$, we conclude that the subgroup$G_{x}$of$G$is parabolic. In other words,$G_{x}$contains a conjugate of$B$(by the definition of \textquotedblleft parabolic\textquotedblright). In other words, there exists some$q\in G$such that$G_{x}\supseteq qBq^{-1}$. Consider this$q$. \par Set$z=q^{-1}x$. Clearly,$z=\underbrace{q^{-1}}_{\in G}x\in Gx=Y$. \par Recall that$G_{rx}=rG_{x}r^{-1}$for each$r\in G$. Applying this to$r=q^{-1}$, we obtain$G_{q^{-1}x}=q^{-1}\underbrace{G_{x}}_{\supseteq qBq^{-1}}\underbrace{\left( q^{-1}\right) ^{-1}}_{=q}\supseteq \underbrace{q^{-1}q}_{=1}B\underbrace{q^{-1}q}_{=1}=B$. Since$z=q^{-1}x$, we obtain$G_{z}=G_{q^{-1}x}\supseteq B$. Hence,$B\subseteq G_{z}$. In other words,$z\in Y^{B}$(since$z\in Y$). Thus,$G/P_{I_{z}}$is a component of the disjoint union$\coprod_{y\in X^{B}}G/P_{I_{y}}$. \par Let$\overline{q}$denote the coset$qP_{I_{z}}$of$q$in$G/P_{I_{z}}$. We have$\overline{q}\in G/P_{I_{z}}\subseteq\coprod_{y\in X^{B}}G/P_{I_{y}}$(since$G/P_{I_{z}}$is a component of the disjoint union$\coprod_{y\in X^{B}}G/P_{I_{y}}$). Thus,$\overline{q}\in\coprod_{y\in X^{B}}G/P_{I_{y}% }=FF^{\prime}X$. Therefore,$\varepsilon_{X}\left( \overline{q}\right) $is well-defined. Moreover, the definition of$\varepsilon_{X}$shows that$\varepsilon_{X}\left( \overline{q}\right) =qz$(since$\overline{q}\in G/P_{I_{z}}$and since$\overline{q}=\overline{q}$). Thus,$\varepsilon _{X}\left( \overline{q}\right) =q\underbrace{z}_{=q^{-1}x}% =\underbrace{qq^{-1}}_{=1}x=x$, so taht$x=\varepsilon_{X}\left( \underbrace{\overline{q}}_{\in F^{\prime}FX}\right) \in\varepsilon_{X}\left( F^{\prime}FX\right) $. \par Now, forget that we fixed$x$. We thus have shown that$x\in\varepsilon _{X}\left( F^{\prime}FX\right) $for each$x\in X$. In other words,$X\subseteq\varepsilon_{X}\left( F^{\prime}FX\right) $. In other words, the map$\varepsilon_{X}$is surjective. Qed.}. Hence, the map$\varepsilon_{X}$is bijective, and thus is a$G$-set isomorphism (since it is$G$-equivariant). Now, forget that we fixed$X$. Thus, for each object$X\in\mathcal{P}$, we have constructed a$G$-set isomorphism$\varepsilon_{X}:F^{\prime }FX\rightarrow X$. Moreover, this isomorphism$\varepsilon_{X}$is functorial in$X$\ \ \ \ \footnote{\textit{Proof.} Let$Y$and$Z$be two objects of$\mathcal{P}$, and let$f:Y\rightarrow Z$be a$G$-equivariant map. We must prove that the diagram% \begin{equation}% %TCIMACRO{\TeXButton{x}{\xymatrix{ %F^\prime F Y \ar[r]^-{\varepsilon_Y} \ar[d]_{F^\prime F f} & Y \ar[d]^{f} \\ %F^\prime F Z \ar[r]_-{\varepsilon_Z} & Z %}} }% %BeginExpansion \xymatrix{ F^\prime F Y \ar[r]^-{\varepsilon_Y} \ar[d]_{F^\prime F f} & Y \ar[d]^{f} \\ F^\prime F Z \ar[r]_-{\varepsilon_Z} & Z } %EndExpansion \label{pf.p7.9.epsilon-is-functorial.diagram}% \end{equation} is commutative. \par Let$p\in F^{\prime}FY$. Then,$FY=\left( Y^{B};Y^{P_{1}},\ldots,Y^{P_{n-1}% }\right) $(by the definition of the functor$F$). Hence, the definition of the functor$F^{\prime}$shows that$F^{\prime}FY=\coprod_{y\in Y^{B}% }G/P_{I_{y}}$, where we set$I_{y}=\left\{ i\in\left\{ 1,2,\ldots ,n-1\right\} \ \mid\ y\in Y^{P_{i}}\right\} $for each$y\in Y^{B}$. \par We have$p\in F^{\prime}FY=\coprod_{y\in Y^{B}}G/P_{I_{y}}$. In other words,$p\in G/P_{I_{y}}$for some$y\in Y^{B}$. Consider this$y$. Write$p$in the form$p=\overline{q}$for some$q\in G$(where$\overline{q}$denotes the coset$qP_{I_{y}}$of$q$in$G/P_{I_{y}}$). Then,$\varepsilon_{Y}\left( p\right) =qy$(by the definition of$\varepsilon_{Y}$). \par The definition of the action of the functor$F$on the morphism$f:Y\rightarrow Z$shows that$Ff:Y^{B}\rightarrow Z^{B}$is the restriction of the map$f:Y\rightarrow Z$to the$B$-fixed points. Thus,$\left( Ff\right) \left( y\right) =f\left( y\right) $. \par On the other hand, the definition of the action of the functor$F^{\prime}$on the morphism$Ff:FY\rightarrow FZ$yields$\left( F^{\prime}Ff\right) \left( \overline{q}\right) =\overline{q}\in G/P_{I_{\left( Ff\right) \left( y\right) }}=G/P_{I_{f\left( y\right) }}$(since$\left( Ff\right) \left( y\right) =f\left( y\right) $). Hence, the definition of$\varepsilon_{Z}$yields$\varepsilon_{Z}\left( \left( F^{\prime}Ff\right) \left( \overline{q}\right) \right) =qf\left( y\right) $. Hence,% $\left( \varepsilon_{Z}\circ\left( F^{\prime}Ff\right) \right) \left( \underbrace{p}_{=\overline{q}}\right) =\left( \varepsilon_{Z}\circ\left( F^{\prime}Ff\right) \right) \left( \overline{q}\right) =\varepsilon _{Z}\left( \left( F^{\prime}Ff\right) \left( \overline{q}\right) \right) =qf\left( y\right) .$ Comparing this with% $\left( f\circ\varepsilon_{Y}\right) \left( p\right) =f\left( \underbrace{\varepsilon_{Y}\left( p\right) }_{=qy}\right) =f\left( qy\right) =qf\left( y\right) \ \ \ \ \ \ \ \ \ \ \left( \text{since the map }f\text{ is }G\text{-equivariant}\right) ,$ we obtain$\left( \varepsilon_{Z}\circ\left( F^{\prime}Ff\right) \right) \left( p\right) =\left( f\circ\varepsilon_{Y}\right) \left( p\right) $. \par Now, let us forget that we fixed$p$. We thus have proven that$\left( \varepsilon_{Z}\circ\left( F^{\prime}Ff\right) \right) \left( p\right) =\left( f\circ\varepsilon_{Y}\right) \left( p\right) $for each$p\in F^{\prime}FY$. In other words,$\varepsilon_{Z}\circ\left( F^{\prime }Ff\right) =f\circ\varepsilon_{Y}$. In other words, the diagram (\ref{pf.p7.9.epsilon-is-functorial.diagram}) is commutative. This completes the proof.}. Hence, we have defined a natural isomorphism$\varepsilon :F^{\prime}F\rightarrow1_{\mathcal{P}}$. Therefore,$1_{\mathcal{P}}\cong F^{\prime}F$as functors.] \item \textbf{\S 8:} I would begin this section with the following introduction: \textquotedblleft We consider the localization$\mathbb{Z}_{\left( p\right) }$of the ring$\mathbb{Z}$at its prime ideal$\left( p\right) =p\mathbb{Z}$. Explicitly,$\mathbb{Z}_{\left( p\right) }$is the subring% $\left\{ \dfrac{a}{b}\ \mid\ \left( a,b\right) \in\mathbb{Z}\times \mathbb{Z}\text{ and }\gcd\left( b,p\right) =1\right\}$ of$\mathbb{Q}$. \textbf{Lemma 8.0a.} Let$V$be an$n$-dimensional$\mathbb{F}_{p}$-vector space. Then: \textbf{(a)} We have$\left\vert \operatorname*{Flag}\left( V\right) \right\vert =\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }$. \textbf{(b)} We have$\left\vert \operatorname*{Flag}\left( V\right) \right\vert \equiv1\operatorname{mod}p$and$\left\vert \operatorname*{Flag}% \left( V\right) \right\vert ^{-1}\in\mathbb{Z}_{\left( p\right) }$. [\textit{Proof of Lemma 8.0a.} Fix some$\underline{W}\in\operatorname*{Flag}% \left( V\right) $. (Such a$\underline{W}$clearly exists.) For each$\sigma\in\Sigma_{n}$, there is a subset$Y\left( \sigma,\underline{W}% \right) \subset\operatorname*{Flag}\left( V\right) $defined by$Y\left( \sigma,\underline{W}\right) =\left\{ \underline{U}\in\operatorname*{Flag}% \left( V\right) \ \mid\ \delta\left( \underline{U},\underline{W}\right) =\sigma\right\} $. (Here,$\delta\left( \underline{U},\underline{W}\right) $is the Jordan permutation, defined as in \S 4.) Clearly,$\operatorname*{Flag}\left( V\right) $is the union of its disjoint subsets$\left\{ \underline{U}\in\operatorname*{Flag}\left( V\right) \ \mid\ \delta\left( \underline{U},\underline{W}\right) =\sigma\right\} $for all$\sigma\in\Sigma_{n}$(because for each$\underline{U}\in \operatorname*{Flag}\left( V\right) $, there is exactly one$\sigma\in \Sigma_{n}$satisfying$\delta\left( \underline{U},\underline{W}\right) =\sigma). Hence,% \begin{align*} \left\vert \operatorname*{Flag}\left( V\right) \right\vert & =\sum _{\sigma\in\Sigma_{n}}\left\vert \underbrace{\left\{ \underline{U}% \in\operatorname*{Flag}\left( V\right) \ \mid\ \delta\left( \underline{U}% ,\underline{W}\right) =\sigma\right\} }_{=Y\left( \sigma,\underline{W}% \right) }\right\vert =\sum_{\sigma\in\Sigma_{n}}\underbrace{\left\vert Y\left( \sigma,\underline{W}\right) \right\vert }_{\substack{=p^{l\left( \sigma\right) }\\\text{(by Corollary 5.2a, applied}\\\text{to }V\text{ and }\underline{W}\text{ instead of }W\text{ and }\underline{V}\text{)}}}\\ & =\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }. \end{align*} This proves Lemma 8.0a \textbf{(a)}. \textbf{(b)} Lemma 8.0a \textbf{(a)} yields% \begin{align*} \left\vert \operatorname*{Flag}\left( V\right) \right\vert & =\sum _{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }=\underbrace{p^{l\left( \operatorname*{id}\right) }}_{=p^{0}=1}+\sum_{\substack{\sigma\in\Sigma _{n};\\\sigma\neq\operatorname*{id}}}\underbrace{p^{l\left( \sigma\right) }% }_{\substack{\equiv0\operatorname{mod}p\\\text{(since }l\left( \sigma\right) \geq1\\\text{(since }\sigma\neq\operatorname*{id}\text{))}}}\\ & \equiv1+\underbrace{\sum_{\substack{\sigma\in\Sigma_{n};\\\sigma \neq\operatorname*{id}}}0}_{=0}=1\operatorname{mod}p. \end{align*} Hence,\left\vert \operatorname*{Flag}\left( V\right) \right\vert $is coprime to$p$. Thus,$\left\vert \operatorname*{Flag}\left( V\right) \right\vert ^{-1}\in\mathbb{Z}_{\left( p\right) }$. This proves Lemma 8.0a \textbf{(b)}.$\square$]\textquotedblright You use Lemma 8.0a \textbf{(b)} implicitly in Definition 8.5. \item \textbf{Definition 8.1:} Replace \textquotedblleft ring\textquotedblright\ by \textquotedblleft$\mathbb{Z}_{\left( p\right) }% $-algebra\textquotedblright. \item \textbf{Definition 8.1:} Replace \textquotedblleft$\mathbb{Z}\left[ \operatorname*{Flag}\right] $\textquotedblright\ by \textquotedblleft%$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] $\textquotedblright. \item \textbf{\S 8, between Definition 8.1 and Definition 8.2:} You write: \textquotedblleft this construction gives an equivalence$\left[ \mathcal{V},\left\{ \operatorname*{sets}\right\} \right] =\left\{ G-\text{sets}\right\} $\textquotedblright. This argument is non-constructive\footnote{Namely, it seems to use \par \begin{itemize} \item either the fact that every category is equivalent to its skeleton, \par \item or the fact that any functor that is essentially surjective, full and faithful must be an equivalence of categories. \end{itemize} \par As far as I know, none of these two facts has a constructive proof.} and (in my opinion) overkill. It appears to me that you are not actually using the full power of this equivalence either; instead, you seem to only use the natural$\mathbb{Z}_{\left( p\right) }-algebra isomorphism% \begin{align*} \mathcal{H} & =\operatorname*{End}\nolimits_{\mathcal{VA}}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] \right) \cong\operatorname*{End}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ G\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}% \left( \mathbb{F}_{p}^{n}\right) \right] \right) \\ & \cong\operatorname*{End}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ G\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ G/B\right] \right) , \end{align*} which has an elementary and constructive proof. Namely, this isomorphism follows from Proposition 8.1b further below. Before I state this proposition, let me state a simple fact from category theory: \textbf{Proposition 8.1a.} Let\mathcal{C}$and$\mathcal{D}$be two categories. Let$C\in\mathcal{C}$be an object. Let$F:\mathcal{C}% \rightarrow\mathcal{D}$is a functor. Let$\mathcal{E}_{F}\left( C\right) $denote the subset% $\left\{ f\in\operatorname*{End}\nolimits_{\mathcal{D}}\left( F\left( C\right) \right) \ \mid\ f\circ F\left( k\right) =F\left( k\right) \circ f\text{ for each }k\in\operatorname*{End}\nolimits_{\mathcal{C}}C\right\}$ of$\operatorname*{End}\nolimits_{\mathcal{D}}\left( F\left( C\right) \right) $. \textbf{(a)} The subset$\mathcal{E}_{F}\left( C\right) $is a submonoid of$\operatorname*{End}\nolimits_{\mathcal{D}}\left( F\left( C\right) \right) $. Moreover, there is a canonical monoid homomorphism$\varepsilon :\operatorname*{End}\nolimits_{\left[ \mathcal{C},\mathcal{D}\right] }F\rightarrow\mathcal{E}_{F}\left( C\right) $that sends each natural transformation$\alpha:F\Longrightarrow F$to its component$\alpha _{C}:F\left( C\right) \rightarrow F\left( C\right) $. \textbf{(b)} Consider this$\varepsilon$. Assume that each two objects of$\mathcal{C}$are isomorphic. Then,$\varepsilon$is a monoid isomorphism. [\textit{Proof of Proposition 8.1a.} \textbf{(a)} This is a simple exercise in category theory. \textbf{(b)} The map$\varepsilon$is injective\footnote{\textit{Proof.} Let$\alpha$and$\beta$be two elements of$\operatorname*{End}\nolimits_{\left[ \mathcal{C},\mathcal{D}\right] }F$such that$\varepsilon\left( \alpha\right) =\varepsilon\left( \beta\right) $. We shall show that$\alpha=\beta$. \par Let$A\in\mathcal{C}$be any object. Then, the objects$A$and$C$of$\mathcal{C}$are isomorphic (since each two objects of$\mathcal{C}$are isomorphic). In other words, there exists an isomorphism$j:A\rightarrow C$in$\mathcal{C}$. Consider this$j$. Thus, the morphism$j^{-1}$exists (since$j$is an isomorphism). \par Recall that$\alpha\in\operatorname*{End}\nolimits_{\left[ \mathcal{C}% ,\mathcal{D}\right] }F$. In other words,$\alpha$is a natural transformation from$F$to$F$. Thus, the diagram% $% %TCIMACRO{\TeXButton{x}{\xymatrix{ %F\left(A\right) \ar[r]^{\alpha_A} \ar[d]_{F\left(j\right)} & F\left %(A\right) \ar[d]^{F\left(j\right)} \\ %F\left(C\right) \ar[r]_{\alpha_C} & F\left(C\right) %}}}% %BeginExpansion \xymatrix{ F\left(A\right) \ar[r]^{\alpha_A} \ar[d]_{F\left(j\right)} & F\left (A\right) \ar[d]^{F\left(j\right)} \\ F\left(C\right) \ar[r]_{\alpha_C} & F\left(C\right) }% %EndExpansion$ is commutative. In other words, we have$\alpha_{A}\circ F\left( j\right) =F\left( j\right) \circ\alpha_{C}$. But$F$is a functor; thus,$F\left( j^{-1}\right) =\left( F\left( j\right) \right) ^{-1}$. The definition of$\varepsilon$yields$\varepsilon\left( \alpha\right) =\alpha_{C}$. Thus,% $F\left( j\right) \circ\underbrace{\varepsilon\left( \alpha\right) }_{=\alpha_{C}}\circ\underbrace{F\left( j^{-1}\right) }_{=\left( F\left( j\right) \right) ^{-1}}=\underbrace{F\left( j\right) \circ\alpha_{C}% }_{=\alpha_{A}\circ F\left( j\right) }\circ\left( F\left( j\right) \right) ^{-1}=\alpha_{A}\circ\underbrace{F\left( j\right) \circ\left( F\left( j\right) \right) ^{-1}}_{=\operatorname*{id}}=\alpha_{A}.$ Hence,$\alpha_{A}=F\left( j\right) \circ\varepsilon\left( \alpha\right) \circ F\left( j^{-1}\right) $. The same argument (applied to$\beta$instead of$\alpha$) shows that$\beta_{A}=F\left( j\right) \circ\varepsilon\left( \beta\right) \circ F\left( j^{-1}\right) $. \par Now,% $\alpha_{A}=F\left( j\right) \circ\underbrace{\varepsilon\left( \alpha\right) }_{=\varepsilon\left( \beta\right) }\circ F\left( j^{-1}\right) =F\left( j\right) \circ\varepsilon\left( \beta\right) \circ F\left( j^{-1}\right) =\beta_{A}.$ \par Now, forget that we fixed$A$. We thus have proven that$\alpha_{A}=\beta_{A}$for each object$A\in\mathcal{C}$. In other words,$\alpha=\beta$. \par Now, forget that we fixed$\alpha$and$\beta$. We thus have shown that if$\alpha$and$\beta$are two elements of$\operatorname*{End}% \nolimits_{\left[ \mathcal{C},\mathcal{D}\right] }F$such that$\varepsilon\left( \alpha\right) =\varepsilon\left( \beta\right) $, then$\alpha=\beta$. In other words, the map$\varepsilon$is injective. Qed.}. We shall now show that$\varepsilon$is surjective. Indeed, fix any$\rho\in\mathcal{E}_{F}\left( C\right) $. Let$A\in \mathcal{C}$be any object. We are going to construct a morphism$\alpha _{A}:F\left( C\right) \rightarrow F\left( C\right) $in$\mathcal{D}$. We have $\rho\in\mathcal{E}_{F}\left( C\right) =\left\{ f\in\operatorname*{End}% \nolimits_{\mathcal{D}}\left( F\left( C\right) \right) \ \mid\ f\circ F\left( k\right) =F\left( k\right) \circ f\text{ for each }k\in \operatorname*{End}\nolimits_{\mathcal{C}}C\right\}$ (by the definition of$\mathcal{E}_{F}\left( C\right) $). In other words,$\rho$is an element of$\operatorname*{End}\nolimits_{\mathcal{D}}\left( F\left( C\right) \right) $and satisfies% \begin{equation} \left( \rho\circ F\left( k\right) =F\left( k\right) \circ\rho\text{ for each }k\in\operatorname*{End}\nolimits_{\mathcal{C}}C\right) . \label{pf.p8.1a.b.rhoF}% \end{equation} The objects$A$and$C$of$\mathcal{C}$are isomorphic (since each two objects of$\mathcal{C}$are isomorphic). In other words, there exists an isomorphism$j:C\rightarrow A$in$\mathcal{C}$. Consider this$j$. Thus, the morphism$j^{-1}$exists (since$j$is an isomorphism). Now, define a morphism$\alpha_{A}:F\left( C\right) \rightarrow F\left( C\right) $in$\mathcal{D}$by$\alpha_{A}=F\left( j\right) \circ\rho\circ F\left( j^{-1}\right) $. This morphism$\alpha_{A}$is independent on the choice of$j$\ \ \ \ \footnote{\textit{Proof.} Let$j_{1}$and$j_{2}$be two isomorphisms$j:C\rightarrow A$in$\mathcal{C}$. We will prove that$F\left( j_{1}\right) \circ\rho\circ F\left( j_{1}^{-1}\right) =F\left( j_{2}\right) \circ\rho\circ F\left( j_{2}^{-1}\right) $. \par We recall that$j_{1}$and$j_{2}$are two isomorphisms$C\rightarrow A$in$\mathcal{C}$. Thus,$j_{1}^{-1}\circ j_{2}:C\rightarrow C$is an isomorphism in$\mathcal{C}$as well. In particular,$j_{1}^{-1}\circ j_{2}\in \operatorname*{End}\nolimits_{\mathcal{C}}C$. Thus, (\ref{pf.p8.1a.b.rhoF}) (applied to$k=j_{1}^{-1}\circ j_{2}$) yields$\rho\circ F\left( j_{1}% ^{-1}\circ j_{2}\right) =F\left( j_{1}^{-1}\circ j_{2}\right) \circ\rho$. But$F$is a functor; thus,$F\left( j_{1}^{-1}\circ j_{2}\right) =\left( F\left( j_{1}\right) \right) ^{-1}\circ F\left( j_{2}\right) . But% \begin{align*} & F\left( j_{1}\right) \circ\rho\circ\underbrace{F\left( j_{1}% ^{-1}\right) }_{\substack{=\left( F\left( j_{1}\right) \right) ^{-1}\\\text{(since }F\text{ is a functor)}}}\\ & =F\left( j_{1}\right) \circ\rho\circ\left( F\left( j_{1}\right) \right) ^{-1}\\ & =F\left( j_{1}\right) \circ\rho\circ\underbrace{\left( F\left( j_{1}\right) \right) ^{-1}\circ F\left( j_{2}\right) }_{=F\left( j_{1}^{-1}\circ j_{2}\right) }\circ\underbrace{\left( F\left( j_{2}\right) \right) ^{-1}}_{\substack{=F\left( j_{2}^{-1}\right) \\\text{(since }F\text{ is a functor)}}}\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }F\left( j_{1}\right) \circ \rho\circ\left( F\left( j_{1}\right) \right) ^{-1}\circ \underbrace{F\left( j_{2}\right) \circ\left( F\left( j_{2}\right) \right) ^{-1}}_{=\operatorname*{id}}=F\left( j_{1}\right) \circ\rho \circ\left( F\left( j_{1}\right) \right) ^{-1}\right) \\ & =F\left( j_{1}\right) \circ\underbrace{\rho\circ F\left( j_{1}^{-1}\circ j_{2}\right) }_{=F\left( j_{1}^{-1}\circ j_{2}\right) \circ\rho}\circ F\left( j_{2}^{-1}\right) \\ & =\underbrace{F\left( j_{1}\right) \circ F\left( j_{1}^{-1}\circ j_{2}\right) }_{\substack{=F\left( j_{1}\circ j_{1}^{-1}\circ j_{2}\right) \\\text{(since }F\text{ is a functor)}}}\circ\rho\circ F\left( j_{2}% ^{-1}\right) \\ & =F\left( \underbrace{j_{1}\circ j_{1}^{-1}}_{=\operatorname*{id}}\circ j_{2}\right) \circ\rho\circ F\left( j_{2}^{-1}\right) =F\left( j_{2}\right) \circ\rho\circ F\left( j_{2}^{-1}\right) . \end{align*} \par Now, forget that we fixedj_{1}$and$j_{2}$. We thus have shown that if$j_{1}$and$j_{2}$are two isomorphisms$j:C\rightarrow A$in$\mathcal{C}$, then$F\left( j_{1}\right) \circ\rho\circ F\left( j_{1}^{-1}\right) =F\left( j_{2}\right) \circ\rho\circ F\left( j_{2}^{-1}\right) $. In other words, the morphism$F\left( j\right) \circ\rho\circ F\left( j^{-1}\right) $is independent on the choice on$j$. In other words, the morphism$\alpha_{A}$is independent on the choice of$j$(since$\alpha_{A}=F\left( j\right) \circ\rho\circ F\left( j^{-1}\right) $). Qed.}. Now, forget that we fixed$A$. Thus, for each$A\in\mathcal{C}$, we have defined a morphism$\alpha_{A}:F\left( C\right) \rightarrow F\left( C\right) $in$\mathcal{D}$. This morphism$\alpha_{A}$satisfies% \begin{equation} \alpha_{A}=F\left( j\right) \circ\rho\circ F\left( j^{-1}\right) \ \ \ \ \ \ \ \ \ \ \text{for every isomorphism }j:C\rightarrow A\text{ in }\mathcal{C} \label{pf.p8.1a.b.alphaA}% \end{equation} (by the definition of$\alpha_{A}$). If$A$and$B$are two objects in$\mathcal{C}$, and if$f:A\rightarrow B$is a morphism in$\mathcal{C}$, then the diagram% \begin{equation}% %TCIMACRO{\TeXButton{x}{\xymatrix{ %F\left(A\right) \ar[r]^{\alpha_A} \ar[d]_{F\left(f\right)} & F\left %(A\right) \ar[d]^{F\left(f\right)} \\ %F\left(B\right) \ar[r]_{\alpha_B} & F\left(B\right) %}} }% %BeginExpansion \xymatrix{ F\left(A\right) \ar[r]^{\alpha_A} \ar[d]_{F\left(f\right)} & F\left (A\right) \ar[d]^{F\left(f\right)} \\ F\left(B\right) \ar[r]_{\alpha_B} & F\left(B\right) } %EndExpansion \label{pf.p8.1a.b.naturality}% \end{equation} is commutative\footnote{\textit{Proof.} Let$A$and$B$be two objects in$\mathcal{C}$. Let$f:A\rightarrow B$be a morphism in$\mathcal{C}$. \par The objects$A$and$C$of$\mathcal{C}$are isomorphic (since each two objects of$\mathcal{C}$are isomorphic). In other words, there exists an isomorphism$j:C\rightarrow A$in$\mathcal{C}$. Consider this$j$. Thus, (\ref{pf.p8.1a.b.alphaA}) yields$\alpha_{A}=F\left( j\right) \circ\rho\circ F\left( j^{-1}\right) $. \par The objects$B$and$C$of$\mathcal{C}$are isomorphic (since each two objects of$\mathcal{C}$are isomorphic). In other words, there exists an isomorphism$i:C\rightarrow B$in$\mathcal{C}$. Consider this$i$. Thus, (\ref{pf.p8.1a.b.alphaA}) (applied to$B$and$i$instead of$A$and$j$) yields$\alpha_{B}=F\left( i\right) \circ\rho\circ F\left( i^{-1}\right) $. \par Since$i:C\rightarrow B$is an isomorphism, its inverse$i^{-1}:B\rightarrow C$is well-defined and an isomorphism as well. The composition$i^{-1}\circ f\circ j:C\rightarrow C$is thus an endomorphism of$C$in$\mathcal{C}$. In other words,$i^{-1}\circ f\circ j\in\operatorname*{End}\nolimits_{\mathcal{C}% }C$. Hence, (\ref{pf.p8.1a.b.rhoF}) (applied to$k=i^{-1}\circ f\circ j$) yields$\rho\circ F\left( i^{-1}\circ f\circ j\right) =F\left( i^{-1}\circ f\circ j\right) \circ\rho$. But$F$is a functor; thus,$F\left( i^{-1}\circ f\circ j\right) =\left( F\left( i\right) \right) ^{-1}\circ F\left( f\right) \circ F\left( j\right) . Now,% \begin{align*} \underbrace{\alpha_{B}}_{=F\left( i\right) \circ\rho\circ F\left( i^{-1}\right) }\circ F\left( f\right) \circ F\left( j\right) & =F\left( i\right) \circ\rho\circ\underbrace{F\left( i^{-1}\right) \circ F\left( f\right) \circ F\left( j\right) }_{=F\left( i^{-1}\circ f\circ j\right) }=F\left( i\right) \circ\underbrace{\rho\circ F\left( i^{-1}\circ f\circ j\right) }_{=F\left( i^{-1}\circ f\circ j\right) \circ\rho}\\ & =F\left( i\right) \circ\underbrace{F\left( i^{-1}\circ f\circ j\right) }_{=\left( F\left( i\right) \right) ^{-1}\circ F\left( f\right) \circ F\left( j\right) }\circ\rho=\underbrace{F\left( i\right) \circ\left( F\left( i\right) \right) ^{-1}}_{=\operatorname*{id}}\circ F\left( f\right) \circ F\left( j\right) \circ\rho\\ & =F\left( f\right) \circ F\left( j\right) \circ\rho, \end{align*} so that% $\underbrace{\alpha_{B}\circ F\left( f\right) \circ F\left( j\right) }_{=F\left( f\right) \circ F\left( j\right) \circ\rho}\circ F\left( j^{-1}\right) =F\left( f\right) \circ\underbrace{F\left( j\right) \circ\rho\circ F\left( j^{-1}\right) }_{=\alpha_{A}}=F\left( f\right) \circ\alpha_{A}.$ Comparing this with% $\alpha_{B}\circ F\left( f\right) \circ F\left( j\right) \circ \underbrace{F\left( j^{-1}\right) }_{\substack{=\left( F\left( j\right) \right) ^{-1}\\\text{(since }F\text{ is a functor)}}}=\alpha_{B}\circ F\left( f\right) \circ\underbrace{F\left( j\right) \circ\left( F\left( j\right) \right) ^{-1}}_{=\operatorname*{id}}=\alpha_{B}\circ F\left( f\right) ,$ we obtainF\left( f\right) \circ\alpha_{A}=\alpha_{B}\circ F\left( f\right) $. In other words, the diagram (\ref{pf.p8.1a.b.naturality}) is commutative. Qed.}. Therefore, the morphisms$\alpha_{A}$(defined for all objects$A\in\mathcal{C}$) can be assembled to a natural transformation$\alpha:F\Longrightarrow F$. Consider this$\alpha$. We have$\alpha \in\operatorname*{End}\nolimits_{\left[ \mathcal{C},\mathcal{D}\right] }F$(since$\alpha$is a natural transformation$F\Longrightarrow F$). Moreover, the definition of$\varepsilon$shows that$\varepsilon\left( \alpha\right) =\alpha_{C}$. But$\operatorname*{id}:C\rightarrow C$is an isomorphism in$\mathcal{C}$. Hence, (\ref{pf.p8.1a.b.alphaA}) (applied to$A=C$and$j=\operatorname*{id}$) yields% $\alpha_{C}=F\left( \operatorname*{id}\right) \circ\rho\circ F\left( \underbrace{\operatorname*{id}\nolimits^{-1}}_{=\operatorname*{id}}\right) =\underbrace{F\left( \operatorname*{id}\right) }% _{\substack{=\operatorname*{id}\\\text{(since }F\text{ is a functor)}}% }\circ\rho\circ\underbrace{F\left( \operatorname*{id}\right) }% _{\substack{=\operatorname*{id}\\\text{(since }F\text{ is a functor)}}}=\rho.$ Comparing this with$\varepsilon\left( \alpha\right) =\alpha_{C}$, we obtain$\rho=\varepsilon\left( \underbrace{\alpha}_{\in\operatorname*{End}% \nolimits_{\left[ \mathcal{C},\mathcal{D}\right] }F}\right) \in \varepsilon\left( \operatorname*{End}\nolimits_{\left[ \mathcal{C}% ,\mathcal{D}\right] }F\right) $. Now, forget that we fixed$\rho$. We thus have shown that$\rho\in \varepsilon\left( \operatorname*{End}\nolimits_{\left[ \mathcal{C}% ,\mathcal{D}\right] }F\right) $for each$\rho\in\mathcal{E}_{F}\left( C\right) $. In other words,$\mathcal{E}_{F}\left( C\right) \subseteq \varepsilon\left( \operatorname*{End}\nolimits_{\left[ \mathcal{C}% ,\mathcal{D}\right] }F\right) $. In other words, the map$\varepsilon$is surjective. So we know that the map$\varepsilon$is both injective and surjective. Thus,$\varepsilon$is bijective. Furthermore,$\varepsilon$is a monoid homomorphism. Thus,$\varepsilon$is a bijective monoid homomorphism. Therefore,$\varepsilon$is a monoid isomorphism. This proves Proposition 8.1a \textbf{(b)}.$\square] \textbf{Proposition 8.1b.} We have% \begin{align*} \mathcal{H} & =\operatorname*{End}\nolimits_{\mathcal{VA}}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] \right) \cong\operatorname*{End}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ G\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}% \left( \mathbb{F}_{p}^{n}\right) \right] \right) \\ & \cong\operatorname*{End}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ G\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ G/B\right] \right) \end{align*} as\mathbb{Z}_{\left( p\right) }$-algebras. More precisely, the following holds: \textbf{(a)} Recall the definition of$\mathcal{E}_{F}\left( C\right) $in Proposition 8.1a (where$\mathcal{C}$and$\mathcal{D}$are two categories,$C\in\mathcal{C}$is an object, and$F:\mathcal{C}\rightarrow\mathcal{D}$is a functor). Applying this to$\mathcal{C}=\mathcal{V}$,$\mathcal{D}% =\mathcal{A}$,$C=\mathbb{F}_{p}^{n}$and$F=\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] $, we obtain a set$\mathcal{E}% _{\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] }\left( \mathbb{F}_{p}^{n}\right) $. Proposition 8.1a \textbf{(a)} (applied to$\mathcal{C}=\mathcal{V}$,$\mathcal{D}=\mathcal{A}$,$C=\mathbb{F}_{p}^{n}$and$F=\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] $) shows that this set$\mathcal{E}_{\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] }\left( \mathbb{F}_{p}^{n}\right) $is a submonoid of$\operatorname*{End}\nolimits_{\mathcal{A}}\left( \left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] \right) \left( \mathbb{F}_{p}^{n}\right) \right) $, and that there is a monoid homomorphism$\varepsilon:\operatorname*{End}\nolimits_{\left[ \mathcal{V}% ,\mathcal{A}\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] \right) \rightarrow\mathcal{E}_{\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Flag}\right] }\left( \mathbb{F}_{p}^{n}\right) $. Consider this$\varepsilon$. Then,$\varepsilon$is a$\mathbb{Z}_{\left( p\right) }$-algebra isomorphism$\operatorname*{End}\nolimits_{\mathcal{VA}}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] \right) \rightarrow \operatorname*{End}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ G\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right] \right) $. Thus,% $\operatorname*{End}\nolimits_{\mathcal{VA}}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] \right) \cong% \operatorname*{End}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ G\right] }\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right] \right)$ as$\mathbb{Z}_{\left( p\right) }$-algebras. \textbf{(b)} Consider the complete flag$\underline{E}=\left( E_{0}% \sigma\left( i+1\right) $\textquotedblright\ by \textquotedblleft If$\sigma\left( i\right) <\sigma\left( i+1\right) $\textquotedblright. \item \textbf{Proof of Proposition 8.11:} Replace \textquotedblleft We choose any reduced word\textquotedblright\ by \textquotedblleft In this case, we choose any reduced word\textquotedblright. \item \textbf{Proof of Proposition 8.11:} Before the long equation that begins with \textquotedblleft$T_{\sigma}^{\prime}T_{i}^{\prime}=T_{\tau}^{\prime }\left( T_{i}^{\prime}\right) ^{2}$\textquotedblright, I would add \textquotedblleft$T_{\sigma}^{\prime}=T_{\tau}^{\prime}T_{i}^{\prime}$and thus\textquotedblright. \item \textbf{Proof of Proposition 8.11:} I would replace \textquotedblleft%$pT_{\tau}^{\prime}+\left( p-1\right) T_{\tau s_{i}}^{\prime}\in A$\textquotedblright\ by \textquotedblleft$pT_{\tau}^{\prime}+\left( p-1\right) \underbrace{T_{\tau}^{\prime}T_{i}^{\prime}}_{=T_{\sigma}^{\prime }}=pT_{\tau}^{\prime}+\left( p-1\right) T_{\sigma}^{\prime}\in A$\textquotedblright. \item \textbf{Proof of Proposition 8.11:} I would replace \textquotedblleft%$1\in A$\textquotedblright\ by \textquotedblleft$1=T_{\operatorname*{id}% }^{\prime}\in A$\textquotedblright. \item \textbf{Proof of Proposition 8.11:} Replace \textquotedblleft injectve\textquotedblright\ by \textquotedblleft injective\textquotedblright. \item \textbf{\S 9:} I suggest using the LaTeX syntax \texttt{% %TCIMACRO{\TEXTsymbol{\backslash}}% %BeginExpansion$\backslash$% %EndExpansion operatorname\{St\}} instead of \texttt{% %TCIMACRO{\TEXTsymbol{\backslash}}% %BeginExpansion$\backslash$% %EndExpansion text\{St\}} in order to achieve the \textquotedblleft$\operatorname*{St}% $\textquotedblright\ subscripts. Otherwise, these subscripts are italicized whenever they appear inside propositions (because text in propositions is italicized). \item \textbf{\S 9:} At the very beginning of \S 9, I would add the following lemma (which is tacitly used in the definition of$\omega$): \textbf{Lemma 9.0a.} We have$\left\vert G/U\right\vert \equiv\left( -1\right) ^{n}\operatorname{mod}p$and thus$\left\vert G/U\right\vert ^{-1}\in\mathbb{Z}_{\left( p\right) }$. [\textit{Proof of Lemma 9.0a.} Lemma 8.0a \textbf{(b)} (applied to$V=\mathbb{F}_{p}^{n}$) yields$\left\vert \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right\vert \equiv1\operatorname{mod}p$and$\left\vert \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right\vert ^{-1}\in\mathbb{Z}_{\left( p\right) }$. Proposition 8.1b \textbf{(b)} shows that there is a natural isomorphism$G/B\rightarrow\operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) $of$G$-sets. Hence,$\left\vert G/B\right\vert =\left\vert \operatorname*{Flag}% \left( \mathbb{F}_{p}^{n}\right) \right\vert \equiv1\operatorname{mod}p$. But from$\left\vert G/U\right\vert =\left\vert G\right\vert /\left\vert U\right\vert $and$\left\vert G/B\right\vert =\left\vert G\right\vert /\left\vert B\right\vert , we obtain% \begin{align} \dfrac{\left\vert G/U\right\vert }{\left\vert G/B\right\vert } & =\dfrac{\left\vert G\right\vert /\left\vert U\right\vert }{\left\vert G\right\vert /\left\vert B\right\vert }=\dfrac{\left\vert B\right\vert }{\left\vert U\right\vert }=\dfrac{\left( p-1\right) ^{n}p^{n\left( n-1\right) /2}}{p^{n\left( n-1\right) /2}}\nonumber\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left\vert B\right\vert =\left( p-1\right) ^{n}p^{n\left( n-1\right) /2}\text{ and }\left\vert U\right\vert =p^{n\left( n-1\right) /2}\right) \nonumber\\ & =\left( p-1\right) ^{n}. \label{pf.l9.0a.1}% \end{align} Thus,% $\left\vert G/U\right\vert =\left( \underbrace{p-1}_{\equiv -1\operatorname{mod}p}\right) ^{n}\cdot\underbrace{\left\vert G/B\right\vert }_{\equiv1\operatorname{mod}p}\equiv\left( -1\right) ^{n}\operatorname{mod}% p.$ Thus,\left\vert G/U\right\vert $is coprime to$p$(since$\left( -1\right) ^{n}$is coprime to$p$). Hence,$\left\vert G/U\right\vert ^{-1}\in\mathbb{Z}_{\left( p\right) }$. This proves Lemma 9.0a.$\square$] \item \textbf{\S 9:} I believe some more work is needed in order to justify the claim that \textquotedblleft$\operatorname*{End}\left( \mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\right] \right) =\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}% $\textquotedblright\ (again, an isomorphism, not a literal equality). Here is how I would prove this claim: \textbf{Lemma 9.0b.} Let$\tau:G\rightarrow\operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) $be the map sending each$g\in G$to the basis$\left( ge_{1},ge_{2},\ldots,ge_{n}\right) \in\operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) $. This map$\tau$is well-defined and bijective. [\textit{Proof of Lemma 9.0b.} If$g\in G$, then$\left( ge_{1},ge_{2}% ,\ldots,ge_{n}\right) \in\operatorname*{Base}\left( \mathbb{F}_{p}% ^{n}\right) $\ \ \ \ \footnote{\textit{Proof.} Let$g\in G$. Hence,$g$is an automorphism of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$. But$\left( e_{1},e_{2},\ldots,e_{n}\right) $is a basis of the$\mathbb{F}_{p}% $-vector space$\mathbb{F}_{p}^{n}$. Thus, the image of this basis$\left( e_{1},e_{2},\ldots,e_{n}\right) $under$g$must also be a basis of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$(since$g$is an automorphism of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$). In other words,$\left( ge_{1},ge_{2},\ldots,ge_{n}\right) $must be a basis of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$(since the image of the basis$\left( e_{1},e_{2},\ldots,e_{n}\right) $under$g$is$\left( ge_{1},ge_{2},\ldots,ge_{n}\right) $). In other words,$\left( ge_{1}% ,ge_{2},\ldots,ge_{n}\right) \in\operatorname*{Base}\left( \mathbb{F}% _{p}^{n}\right) $. Qed.}. Hence, the map$\tau$is well-defined. It remains to prove that this map$\tau$is bijective. The map$\tau$is injective\footnote{\textit{Proof.} Let$g\in G$and$h\in G$be such that$\tau\left( g\right) =\tau\left( h\right) $. We shall show that$g=h$. \par The definition of$\tau$yields$\tau\left( g\right) =\left( ge_{1}% ,ge_{2},\ldots,ge_{n}\right) $and$\tau\left( h\right) =\left( he_{1},he_{2},\ldots,he_{n}\right) $. Thus,$\left( ge_{1},ge_{2}% ,\ldots,ge_{n}\right) =\tau\left( g\right) =\tau\left( h\right) =\left( he_{1},he_{2},\ldots,he_{n}\right) $. In other words,$ge_{i}=he_{i}$for each$i\in\left\{ 1,2,\ldots,n\right\} $. But$g$and$h$are elements of$G=\operatorname*{GL}\nolimits_{n}\left( \mathbb{F}_{p}\right) $. Thus,$g$and$h$are$\mathbb{F}_{p}$-linear maps. These two$\mathbb{F}_{p}$-linear maps are equal to each other on each entry of the basis$\left( e_{1}% ,e_{2},\ldots,e_{n}\right) $of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$(since$ge_{i}=he_{i}$for each$i\in\left\{ 1,2,\ldots,n\right\} $). Hence, these two maps must be identical. In other words,$g=h$. \par Now, forget that we fixed$g$and$h$. We thus have shown that if$g\in G$and$h\in G$are such that$\tau\left( g\right) =\tau\left( h\right) $, then$g=h$. In other words, the map$\tau$is injective. Qed.} and surjective\footnote{\textit{Proof.} Let$b\in\operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) $. Thus,$b$is a basis of the$\mathbb{F}_{p}% $-vector space$\mathbb{F}_{p}^{n}$. Hence,$b$is a list of$\dim\left( \mathbb{F}_{p}^{n}\right) =n$elements of$\mathbb{F}_{p}^{n}$. Write$b$in the form$\left( b_{1},b_{2},\ldots,b_{n}\right) $. (This is possible, since$b$is a list of$n$elements of$\mathbb{F}_{p}^{n}$). Thus,$\left( b_{1},b_{2},\ldots,b_{n}\right) $is a basis of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$(since$\left( b_{1},b_{2},\ldots,b_{n}\right) =b\in\operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) $). \par Let$g:\mathbb{F}_{p}^{n}\rightarrow\mathbb{F}_{p}^{n}$be the unique$\mathbb{F}_{p}$-linear map that sends each$e_{i}$(with$i\in\left\{ 1,2,\ldots,n\right\} $) to$b_{i}$. (This is well-defined, since$\left( e_{1},e_{2},\ldots,e_{n}\right) $is a basis of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$.) Then,$ge_{i}=b_{i}$for each$i\in\left\{ 1,2,\ldots,n\right\} $. Hence, the map$g$sends the basis$\left( e_{1},e_{2},\ldots,e_{n}\right) $of$\mathbb{F}_{p}^{n}$to the list$\left( b_{1},b_{2},\ldots,b_{n}\right) $. Therefore, the map$g$sends a basis of$\mathbb{F}_{p}^{n}$to a basis of$\mathbb{F}_{p}^{n}$(since both lists$\left( e_{1},e_{2},\ldots,e_{n}\right) $and$\left( b_{1}% ,b_{2},\ldots,b_{n}\right) $are bases of$\mathbb{F}_{p}^{n}$). Thus,$g$is an isomorphism of$\mathbb{F}_{p}$-vector spaces between$\mathbb{F}_{p}^{n}$and$\mathbb{F}_{p}^{n}$. In other words,$g$is an automorphism of the$\mathbb{F}_{p}$-vector space$\mathbb{F}_{p}^{n}$. Thus,$g\in \operatorname*{Aut}\left( \mathbb{F}_{p}^{n}\right) =\operatorname*{GL}% \nolimits_{n}\left( \mathbb{F}_{p}\right) =G$. The definition of$\tau$yields$\tau\left( g\right) =\left( ge_{1},ge_{2},\ldots,ge_{n}\right) =\left( b_{1},b_{2},\ldots,b_{n}\right) $(since$ge_{i}=b_{i}$for each$i\in\left\{ 1,2,\ldots,n\right\} $). Thus,$\tau\left( g\right) =\left( b_{1},b_{2},\ldots,b_{n}\right) =b$. Hence,$b=\tau\left( \underbrace{g}% _{\in G}\right) \in\tau\left( G\right) $. \par Now, forget that we fixed$b$. We thus have proven that$b\in\tau\left( G\right) $for each$b\in\operatorname*{Base}\left( \mathbb{F}_{p}% ^{n}\right) $. In other words,$\operatorname*{Base}\left( \mathbb{F}% _{p}^{n}\right) \subseteq\tau\left( G\right) $. In other words, the map$\tau$is surjective. Qed.}. Hence, the map$\tau$is bijective. This completes the proof of Lemma 9.0b.$\square$] \textbf{Lemma 9.0c.} Recall that for every$g\in G$, we have defined a natural transformation$g^{\ast}:\operatorname*{Base}\Longrightarrow \operatorname*{Base}$, and thus we also obtain a natural transformation$\mathbb{Z}_{\left( p\right) }\left[ g^{\ast}\right] :\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \Longrightarrow\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\right] $. The latter natural transformation is an element of the ring$\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $. (By abuse of notation, we can denote this natural transformation$\mathbb{Z}_{\left( p\right) }\left[ g^{\ast}\right] :\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \Longrightarrow\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\right] $by$g^{\ast}$again; but we shall not do so in this lemma, because this would risk confusing it with the natural transformation$g^{\ast}:\operatorname*{Base}% \Longrightarrow\operatorname*{Base}$.) Let$\gamma:\mathbb{Z}_{\left( p\right) }\left[ G\right] \rightarrow \operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $be the$\mathbb{Z}_{\left( p\right) }$-linear map that sends each each$\left[ g\right] \in G$to$\mathbb{Z}% _{\left( p\right) }\left[ g^{\ast}\right] \in\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $. (This is well-defined, since the family$\left( \left[ g\right] \right) _{g\in G}$is a basis of the$\mathbb{Z}_{\left( p\right) }$-module$\mathbb{Z}_{\left( p\right) }\left[ G\right] $.) This map$\gamma$is a$\mathbb{Z}_{\left( p\right) }$-algebra isomorphism$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}% }\rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $. Thus,$\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \cong\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}$as$\mathbb{Z}_{\left( p\right) }$-algebras. [\textit{Proof of Lemma 9.0c.} Consider the map$\tau:G\rightarrow \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) $defined in Lemma 9.0b. Lemma 9.0b shows that this map$\tau$is well-defined and bijective. Hence,$\tau$is an isomorphism of sets. Thus, it induces an isomorphism$\mathbb{Z}_{\left( p\right) }\left[ \tau\right] :\mathbb{Z}_{\left( p\right) }\left[ G\right] \rightarrow\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] $of$\mathbb{Z}_{\left( p\right) }$-modules. Let$\mathbf{e}\in\operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) $be the basis$\left( e_{1},e_{2},\ldots,e_{n}\right) $of$\mathbb{F}_{p}^{n}$. Then,% \begin{equation} g^{\ast}\mathbf{e}=\tau\left( g\right) \ \ \ \ \ \ \ \ \ \ \text{for every }g\in G \label{pf.l9.0c.1}% \end{equation} \footnote{\textit{Proof of (\ref{pf.l9.0c.1}):} Let$g\in G$. Then,$\tau\left( g\right) =\left( ge_{1},ge_{2},\ldots,ge_{n}\right) $(by the definition of$\tau$). Comparing this with$g^{\ast}\mathbf{e}=\left( ge_{1},ge_{2},\ldots,ge_{n}\right) $(by Lemma 8.7a \textbf{(a)}), we obtain$g^{\ast}\mathbf{e}=\tau\left( g\right) $. This proves (\ref{pf.l9.0c.1}).}. For every$u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $, the element$\gamma\left( u\right) \in\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $can be applied to the element$\left[ \mathbf{e}\right] \in\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \left( \mathbb{F}_{p}^{n}\right) $, and the result is a new element$\gamma\left( u\right) \cdot\left[ \mathbf{e}\right] $of$\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \left( \mathbb{F}_{p}^{n}\right) =\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] $. We have \begin{equation} \gamma\left( u\right) \cdot\left[ \mathbf{e}\right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) \ \ \ \ \ \ \ \ \ \ \text{for every }u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] \label{pf.l9.0c.2}% \end{equation} \footnote{\textit{Proof of (\ref{pf.l9.0c.2}):} Let$u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $. We must prove the equality (\ref{pf.l9.0c.2}). \par Both$\gamma\left( u\right) $and$\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) $depend$\mathbb{Z}_{\left( p\right) }$-linearly on$u$. Hence, the equality (\ref{pf.l9.0c.2}) is$\mathbb{Z}_{\left( p\right) }$-linear in$u$. Thus, for the proof of this equality, we can WLOG assume that$u$belongs to the basis$\left( \left[ g\right] \right) _{g\in G}$of the$\mathbb{Z}_{\left( p\right) }$-module$\mathbb{Z}_{\left( p\right) }\left[ G\right] $. Assume this. Hence,$u=\left[ g\right] $for some$g\in G$. Consider this$g$. \par Now,$\gamma\left( \underbrace{u}_{=\left[ g\right] }\right) =\gamma\left( \left[ g\right] \right) =\mathbb{Z}_{\left( p\right) }\left[ g^{\ast}\right] $(by the definition of$\gamma$). Hence,$\underbrace{\gamma\left( u\right) }_{=\mathbb{Z}_{\left( p\right) }\left[ g^{\ast}\right] }\cdot\left[ \mathbf{e}\right] =\left( \mathbb{Z}_{\left( p\right) }\left[ g^{\ast}\right] \right) \left[ \mathbf{e}\right] =\left[ \underbrace{g^{\ast}\mathbf{e}}_{\substack{=\tau \left( g\right) \\\text{(by (\ref{pf.l9.0c.1}))}}}\right] =\left[ \tau\left( g\right) \right] $. \par On the other hand,$\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( \underbrace{u}_{=\left[ g\right] }\right) =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( \left[ g\right] \right) =\left[ \tau\left( g\right) \right] $(by the definition of$\mathbb{Z}_{\left( p\right) }\left[ \tau\right] $). Comparing this with$\gamma\left( u\right) \cdot\left[ \mathbf{e}\right] =\left[ \tau\left( g\right) \right] $, we obtain$\gamma\left( u\right) \cdot\left[ \mathbf{e}\right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) $. This proves (\ref{pf.l9.0c.2}).}. The map$\gamma$is injective\footnote{\textit{Proof.} Let$u\in \operatorname*{Ker}\gamma$. Thus,$u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $and$\gamma\left( u\right) =0$. But (\ref{pf.l9.0c.2}) yields$\gamma\left( u\right) \cdot\left[ \mathbf{e}\right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) $. Hence,$\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) =\underbrace{\gamma\left( u\right) }_{=0}% \cdot\left[ \mathbf{e}\right] =0$, so that$u\in\operatorname*{Ker}\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) $. \par But the map$\mathbb{Z}_{\left( p\right) }\left[ \tau\right] $is an isomorphism. Thus,$\mathbb{Z}_{\left( p\right) }\left[ \tau\right] $is injective, so that$\operatorname*{Ker}\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) =0$. Hence,$u\in\operatorname*{Ker}\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) =0$, so that$u=0$. \par Now, forget that we fixed$u$. We thus have shown that$u=0$for each$u\in\operatorname*{Ker}\gamma$. In other words,$\operatorname*{Ker}\gamma =0$. Hence, the map$\gamma$is injective (since$\gamma$is$\mathbb{Z}% _{\left( p\right) }$-linear). Qed.} and surjective\footnote{\textit{Proof.} Let$X$be the functor$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \in\mathcal{VA}$. Thus,$X\left( \mathbb{F}% _{p}^{n}\right) =\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \left( \mathbb{F}_{p}^{n}\right) =\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] $. Hence,$\left[ \mathbf{e}\right] \in\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] =X\left( \mathbb{F}_{p}^{n}\right) $. \par Let$\alpha\in\operatorname*{End}\left( X\right) $. Then,$\alpha$can be applied to the element$\left[ \mathbf{e}\right] $of$X\left( \mathbb{F}_{p}^{n}\right) $. The result is an element$\alpha\left[ \mathbf{e}\right] \in X\left( \mathbb{F}_{p}^{n}\right) =\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}% ^{n}\right) \right] $. \par The map$\mathbb{Z}_{\left( p\right) }\left[ \tau\right] :\mathbb{Z}% _{\left( p\right) }\left[ G\right] \rightarrow\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] $is an isomorphism, and thus is surjective. Hence,$\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}% ^{n}\right) \right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( \mathbb{Z}_{\left( p\right) }\left[ G\right] \right) $. Thus,$\alpha\left[ \mathbf{e}\right] \in\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( \mathbb{F}_{p}^{n}\right) \right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( \mathbb{Z}_{\left( p\right) }\left[ G\right] \right) $. In other words, there exists an$u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $such that$\alpha\left[ \mathbf{e}\right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) $. Consider this$u$. \par We have$\alpha\in\operatorname*{End}X$and$\gamma\left( u\right) \in\operatorname*{End}\left( \underbrace{\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] }_{=X}\right) =\operatorname*{End}X$. Thus, both$\alpha$and$\gamma\left( u\right) $are elements of$\operatorname*{End}X$. In other words, both$\alpha$and$\gamma\left( u\right) $are natural transformations$X\Longrightarrow X$. Also,$\alpha\left[ \mathbf{e}\right] =\left( \mathbb{Z}_{\left( p\right) }\left[ \tau\right] \right) \left( u\right) =\gamma\left( u\right) \cdot\left[ \mathbf{e}\right] $(by (\ref{pf.l9.0c.2})). Hence, Lemma 8.7b (applied to$Y=X$and$\beta=\gamma\left( u\right) $) yields$\alpha =\gamma\left( \underbrace{u}_{\in\mathbb{Z}_{\left( p\right) }\left[ G\right] }\right) \in\gamma\left( \mathbb{Z}_{\left( p\right) }\left[ G\right] \right) $. \par Now, forget that we fixed$\alpha$. We thus have proven that$\alpha\in \gamma\left( \mathbb{Z}_{\left( p\right) }\left[ G\right] \right) $for each$\alpha\in\operatorname*{End}\left( X\right) $. In other words,$\operatorname*{End}\left( X\right) \subseteq\gamma\left( \mathbb{Z}% _{\left( p\right) }\left[ G\right] \right) $. Since$X=\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\right] $, this rewrites as$\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \subseteq\gamma\left( \mathbb{Z}% _{\left( p\right) }\left[ G\right] \right) $. In other words, the map$\gamma$is surjective. Qed.}. Hence, the map$\gamma$is bijective. Also,$\gamma$is a$\mathbb{Z}_{\left( p\right) }$-linear map$\mathbb{Z}% _{\left( p\right) }\left[ G\right] \rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $. In other words,$\gamma$is a$\mathbb{Z}_{\left( p\right) }$-linear map$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}% }\rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $(since$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}=\mathbb{Z}_{\left( p\right) }\left[ G\right] $as a$\mathbb{Z}_{\left( p\right) }$-module). We have$\gamma\left( 1\right) =\operatorname*{id}\nolimits_{\mathbb{Z}% _{\left( p\right) }\left[ \operatorname*{Base}\right] }$% \ \ \ \ \footnote{\textit{Proof.} The definition of$\gamma$yields$\gamma\left( 1\right) =\mathbb{Z}_{\left( p\right) }\left[ \underbrace{1^{\ast}}_{=\operatorname*{id}\nolimits_{\operatorname*{Base}}% }\right] =\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{id}% \nolimits_{\operatorname*{Base}}\right] =\operatorname*{id}% \nolimits_{\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] }$(by Remark 8.1f \textbf{(a)}, applied to$P=\operatorname*{Base}$). Qed.}. Also, recall that any$g\in G$and$h\in G$satisfy \begin{equation} \left( gh\right) ^{\ast}=h^{\ast}g^{\ast}. \label{pf.l9.0c.6}% \end{equation} Now, any$v\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $and$u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $satisfy% \begin{equation} \gamma\left( uv\right) =\gamma\left( v\right) \circ\gamma\left( u\right) \label{pf.l9.0c.7}% \end{equation} \footnote{\textit{Proof of (\ref{pf.l9.0c.7}):} Let$v\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $and$u\in\mathbb{Z}_{\left( p\right) }\left[ G\right] $. We must prove the equality (\ref{pf.l9.0c.7}). This equality is$\mathbb{Z}_{\left( p\right) }$-linear in each of$u$and$v$(since$\gamma$is a$\mathbb{Z}_{\left( p\right) }$-linear map). Hence, for the proof of this equality, we can WLOG assume that both$u$and$v$belong to the basis$\left( \left[ g\right] \right) _{g\in G}$of the$\mathbb{Z}% _{\left( p\right) }$-module$\mathbb{Z}_{\left( p\right) }\left[ G\right] $. Assume this. Thus,$u=\left[ g\right] $and$v=\left[ h\right] $for some elements$g\in G$and$h\in G$. Consider these$g$and$h. We have% \begin{align*} \gamma\left( \underbrace{u}_{=\left[ g\right] }\underbrace{v}_{=\left[ h\right] }\right) & =\gamma\left( \underbrace{\left[ g\right] \left[ h\right] }_{=\left[ gh\right] }\right) =\gamma\left( \left[ gh\right] \right) \\ & =\mathbb{Z}_{\left( p\right) }\left[ \underbrace{\left( gh\right) ^{\ast}}_{\substack{=h^{\ast}g^{\ast}\\\text{(by (\ref{pf.l9.0c.6}))}% }}\right] \ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }% \gamma\right) \\ & =\mathbb{Z}_{\left( p\right) }\left[ h^{\ast}g^{\ast}\right] . \end{align*} Comparing this with% \begin{align*} \gamma\left( \underbrace{v}_{=\left[ h\right] }\right) \circ\gamma\left( \underbrace{u}_{=\left[ g\right] }\right) & =\underbrace{\gamma\left( \left[ h\right] \right) }_{\substack{=\mathbb{Z}_{\left( p\right) }\left[ h^{\ast}\right] \\\text{(by the definition of }\gamma\text{)}}% }\circ\underbrace{\gamma\left( \left[ g\right] \right) }% _{\substack{=\mathbb{Z}_{\left( p\right) }\left[ g^{\ast}\right] \\\text{(by the definition of }\gamma\text{)}}}=\mathbb{Z}_{\left( p\right) }\left[ h^{\ast}\right] \circ\mathbb{Z}_{\left( p\right) }\left[ g^{\ast }\right] \\ & =\mathbb{Z}_{\left( p\right) }\left[ h^{\ast}\circ g^{\ast}\right] \\ & \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c}% \text{by Remark 8.1f \textbf{(b)}, applied to }P=\operatorname*{Base}\text{, }Q=\operatorname*{Base}\text{, }R=\operatorname*{Base}\text{,}\\ \alpha_{1}=g^{\ast}\text{ and }\alpha_{2}=h^{\ast}% \end{array} \right) \\ & =\mathbb{Z}_{\left( p\right) }\left[ h^{\ast}g^{\ast}\right] , \end{align*} we obtain\gamma\left( uv\right) =\gamma\left( v\right) \circ \gamma\left( u\right) $. This proves (\ref{pf.l9.0c.7}).}. In other words,$\gamma$is multiplicative when viewed as a map$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}\rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $(because$uv$is the product of$v$and$u$in the$\mathbb{Z}_{\left( p\right) }$-algebra$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}$). Combining this with$\gamma\left( 1\right) =\operatorname*{id}\nolimits_{\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] }$, we conclude that$\gamma$is a$\mathbb{Z}% _{\left( p\right) }$-algebra homomorphism$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}\rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $(because$\gamma$is a$\mathbb{Z}_{\left( p\right) }$-linear map$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}% }\rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $). Since$\gamma$is bijective, we thus conclude that$\gamma$is a$\mathbb{Z}_{\left( p\right) }$-algebra isomorphism$\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}\rightarrow\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) $. Thus,$\operatorname*{End}\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \cong\mathbb{Z}_{\left( p\right) }\left[ G\right] ^{\operatorname*{op}}$as$\mathbb{Z}_{\left( p\right) }% $-algebras. This proves Lemma 9.0c.$\square$] \item \textbf{Proposition 9.2:} Replace \textquotedblleft The map\textquotedblright\ by \textquotedblleft The$\mathbb{Z}_{\left( p\right) }$-linear map\textquotedblright. \item \textbf{Proof of Proposition 9.2:} After \textquotedblleft in the$i$'th space\textquotedblright, add \textquotedblleft(whenever$\underline{v}% \in\operatorname*{Base}\left( V\right) $)\textquotedblright. \item \textbf{Proof of Proposition 9.2:} Replace \textquotedblleft a reduced word$s_{i_{1}}\cdots s_{i_{r}}$for$\Sigma$\textquotedblright\ by \textquotedblleft a reduced word$s_{i_{1}}\cdots s_{i_{r}}$for$\sigma $\textquotedblright. \item \textbf{Proof of Proposition 9.2:} Replace every appearance of \textquotedblleft$\widehat{\xi}$\textquotedblright\ in this proof by \textquotedblleft$\xi$\textquotedblright. \item \textbf{Proof of Proposition 9.2:} After \textquotedblleft$\left( -1\right) ^{r}\mu=\widehat{\xi}\left( T_{\sigma}\right) \mu$% \textquotedblright, add \textquotedblleft(since$\left( -1\right) ^{r}=\operatorname*{sgn}\left( \sigma\right) =\xi\left( T_{\sigma}\right) $)\textquotedblright. \item \textbf{Proof of Proposition 9.2:} After \textquotedblleft It follows that$\xi\left( ab\right) \mu=\xi\left( a\right) \xi\left( b\right) \mu $\textquotedblright, add \textquotedblleft(since$ab\mu=\xi\left( ab\right) \mu$and thus$\xi\left( ab\right) \mu=a\underbrace{b\mu}_{=\xi\left( b\right) \mu}=a\xi\left( b\right) \mu=\xi\left( b\right) \underbrace{a\mu }_{=\xi\left( a\right) \mu}=\xi\left( b\right) \xi\left( a\right) \mu=\xi\left( a\right) \xi\left( b\right) \mu$)\textquotedblright. \item \textbf{Proof of Proposition 9.2:} You write: \textquotedblleft(This could also have been deduced from Proposition 8.11.)\textquotedblright. A few details about this deduction would be useful. Namely, here is how it works: [\textit{Proof of the fact that }$\xi\textit{is a ring map:} Clearly, we have% \begin{align*} \left( -1\right) ^{2} & =p+\left( p-1\right) \left( -1\right) ,\\ \left( -1\right) \left( -1\right) \left( -1\right) & =\left( -1\right) \left( -1\right) \left( -1\right) ,\\ \left( -1\right) \left( -1\right) & =\left( -1\right) \left( -1\right) . \end{align*} Thus, the relations in Proposition 8.11 remain valid if eachT_{k}$in them is replaced by$-1$. Hence, Proposition 8.11 shows that there exists a unique$\mathbb{Z}_{\left( p\right) }$-algebra homomorphism$\eta:\mathcal{H}% \rightarrow\mathbb{Z}_{\left( p\right) }$that sends each$T_{k}$to$-1$. Consider this$\eta$. Now, if$\sigma\in\Sigma_{n}$, then we can fix any reduced word$s_{i_{1}}s_{i_{2}}\cdots s_{i_{r}}$for$\sigma, and then we find% \begin{align*} \eta\left( \underbrace{T_{\sigma}}_{\substack{=T_{i_{1}}T_{i_{2}}\cdots T_{i_{r}}\\\text{(by Corollary 8.10)}}}\right) & =\eta\left( T_{i_{1}% }T_{i_{2}}\cdots T_{i_{r}}\right) =\eta\left( T_{i_{1}}\right) \eta\left( T_{i_{2}}\right) \cdots\eta\left( T_{i_{r}}\right) \\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }\eta\text{ is a }\mathbb{Z}% _{\left( p\right) }\text{-algebra homomorphism}\right) \\ & =\underbrace{\left( -1\right) \left( -1\right) \cdots\left( -1\right) }_{r\text{ times}}\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }\eta\left( T_{i_{p}}\right) =-1\text{ for each }p\text{ (by the definition of }\eta\text{)}\right) \\ & =\left( -1\right) ^{r}=\operatorname*{sgn}\left( \sigma\right) \\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }\sigma=s_{i_{1}}s_{i_{2}}\cdots s_{i_{r}}\text{ and thus }\operatorname*{sgn}\left( \sigma\right) =\left( -1\right) ^{r}\right) \\ & =\xi\left( T_{\sigma}\right) . \end{align*} Thus, we have found that\eta\left( T_{\sigma}\right) =\xi\left( T_{\sigma}\right) $for each$\sigma\in\Sigma_{n}$. In other words, the maps$\eta$and$\xi$are equal to each other on the basis$\left( T_{\sigma }\right) _{\sigma\in\Sigma_{n}}$of the$\mathbb{Z}_{\left( p\right) }% $-module$\mathcal{H}$. Hence, these two maps$\eta$and$\xi$must be identical (since they are both$\mathbb{Z}_{\left( p\right) }$-linear). In other words,$\xi=\eta$. Thus,$\xi$is a ring homomorphism (since$\eta$is a ring homomorphism).$\square$] \item \textbf{Proof of Proposition 9.3:} This proof has several flaws. In particular, the expression \textquotedblleft$\left\langle \sigma^{\ast}\pi ^{t}\left[ \underline{U}\right] ,\left[ \underline{W}\right] \right\rangle $\textquotedblright\ makes no sense, and the formula$\pi\sigma^{\ast}\pi ^{t}=\left\vert B/U\right\vert p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma }$is false. Let me show a correct (and more detailed) proof: [\textit{Proof of Proposition 9.3.} Fix$\sigma\in\Sigma_{n}$. Let$V\in\mathcal{V}$, and let$\underline{W}\in\operatorname*{Flag}\left( V\right) $. For each$\underline{U}\in\operatorname*{Flag}\left( V\right) , we have% \begin{align*} & \left\langle \pi\sigma^{\ast}\pi^{t}\left[ \underline{W}\right] ,\left[ \underline{U}\right] \right\rangle \\ & =\left\langle \pi\sigma^{\ast}\sum_{\substack{\mathbf{w}\in \operatorname*{Base}\left( V\right) ;\\\pi\left( \mathbf{w}\right) =\underline{W}}}\left[ \mathbf{w}\right] ,\left[ \underline{U}\right] \right\rangle \ \ \ \ \ \ \ \ \ \ \left( \text{since }\pi^{t}\left[ \underline{W}\right] =\sum_{\substack{\mathbf{w}\in\operatorname*{Base}% \left( V\right) ;\\\pi\left( \mathbf{w}\right) =\underline{W}}}\left[ \mathbf{w}\right] \right) \\ & =\sum_{\substack{\mathbf{w}\in\operatorname*{Base}\left( V\right) ;\\\pi\left( \mathbf{w}\right) =\underline{W}}}\left\langle \underbrace{\pi \sigma^{\ast}\left[ \mathbf{w}\right] }_{=\left[ \pi\left( \sigma^{\ast }\mathbf{w}\right) \right] },\left[ \underline{U}\right] \right\rangle =\sum_{\substack{\mathbf{w}\in\operatorname*{Base}\left( V\right) ;\\\pi\left( \mathbf{w}\right) =\underline{W}}}\underbrace{\left\langle \left[ \pi\left( \sigma^{\ast}\mathbf{w}\right) \right] ,\left[ \underline{U}\right] \right\rangle }_{=\delta_{\pi\left( \sigma^{\ast }\mathbf{w}\right) ,\underline{U}}}=\sum_{\substack{\mathbf{w}\in \operatorname*{Base}\left( V\right) ;\\\pi\left( \mathbf{w}\right) =\underline{W}}}\delta_{\pi\left( \sigma^{\ast}\mathbf{w}\right) ,\underline{U}}\\ & =\left( \text{the number of all }\mathbf{w}\in\operatorname*{Base}\left( V\right) \text{ such that }\pi\left( \mathbf{w}\right) =\underline{W}\text{ and }\pi\left( \sigma^{\ast}\mathbf{w}\right) =\underline{U}\right) \\ & =\left( \text{the number of all }\left( v_{1},v_{2},\ldots,v_{n}\right) \in\operatorname*{Base}\left( V\right) \text{ such }\right. \\ & \ \ \ \ \ \ \ \ \ \ \left. \text{that }\pi\left( v_{1},v_{2},\ldots ,v_{n}\right) =\underline{W}\text{ and }\pi\left( \underbrace{\sigma^{\ast }\left( v_{1},v_{2},\ldots,v_{n}\right) }_{=\left( v_{\sigma\left( 1\right) },v_{\sigma\left( 2\right) },\ldots,v_{\sigma\left( n\right) }\right) }\right) =\underline{U}\right) \\ & \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c}% \text{here, we have substituted }\left( v_{1},v_{2},\ldots,v_{n}\right) \text{ for the index }\mathbf{w}\text{,}\\ \text{since each element of }\operatorname*{Base}\left( V\right) \text{ is an }n\text{-tuple}% \end{array} \right) \\ & =\left( \text{the number of all }\left( v_{1},v_{2},\ldots,v_{n}\right) \in\operatorname*{Base}\left( V\right) \text{ such }\right. \\ & \ \ \ \ \ \ \ \ \ \ \left. \text{that }\underbrace{\pi\left( v_{1}% ,v_{2},\ldots,v_{n}\right) =\underline{W}}_{\Longleftrightarrow\ \left( W_{i}=\operatorname*{span}\left\{ v_{1},v_{2},\ldots,v_{i}\right\} \text{ for all }i\right) }\text{ and }\underbrace{\pi\left( v_{\sigma\left( 1\right) },v_{\sigma\left( 2\right) },\ldots,v_{\sigma\left( n\right) }\right) =\underline{U}}_{\Longleftrightarrow\ \left( U_{i}% =\operatorname*{span}\left\{ v_{\sigma\left( 1\right) },v_{\sigma\left( 2\right) },\ldots,v_{\sigma\left( i\right) }\right\} \text{ for all }i\right) }\right) \\ & =\left( \text{the number of all }\left( v_{1},v_{2},\ldots,v_{n}\right) \in\operatorname*{Base}\left( V\right) \text{ such }\right. \\ & \ \ \ \ \ \ \ \ \ \ \left. \text{that }\left( W_{i}=\operatorname*{span}% \left\{ v_{1},v_{2},\ldots,v_{i}\right\} \text{ for all }i\right) \right) \\ & \ \ \ \ \ \ \ \ \ \ \left. \text{and }\left( U_{i}=\operatorname*{span}% \left\{ v_{\sigma\left( 1\right) },v_{\sigma\left( 2\right) }% ,\ldots,v_{\sigma\left( i\right) }\right\} \text{ for all }i\right) \right) \\ & =\left( \text{the number of all bases }\left( v_{1},v_{2},\ldots ,v_{n}\right) \text{ of }V\text{ such that for all }i\right. \\ & \ \ \ \ \ \ \ \ \ \ \left. \text{we have }U_{i}=\operatorname*{span}% \left\{ v_{\sigma\left( 1\right) },v_{\sigma\left( 2\right) }% ,\ldots,v_{\sigma\left( i\right) }\right\} \text{ and }W_{i}% =\operatorname*{span}\left\{ v_{1},v_{2},\ldots,v_{i}\right\} \right) \\ & =% \begin{cases} \left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }, & \text{if }\delta\left( \left[ \underline{U}\right] ,\left[ \underline{W}\right] \right) =\sigma;\\ 0, & \text{otherwise}% \end{cases} \end{align*} (by Corollary 5.5). Hence,% \begin{align*} \pi\sigma^{\ast}\pi^{t}\left[ \underline{W}\right] & =\sum_{\underline{U}% \in\operatorname*{Flag}\left( V\right) }% \begin{cases} \left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }, & \text{if }\delta\left( \left[ \underline{U}\right] ,\left[ \underline{W}\right] \right) =\sigma;\\ 0, & \text{otherwise}% \end{cases} \left[ \underline{U}\right] \\ & =\sum_{\substack{\underline{U}\in\operatorname*{Flag}\left( V\right) ;\\\delta\left( \left[ \underline{U}\right] ,\left[ \underline{W}\right] \right) =\sigma}}\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }\left[ \underline{U}\right] \\ & =\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }% \underbrace{\sum_{\substack{\underline{U}\in\operatorname*{Flag}\left( V\right) ;\\\delta\left( \left[ \underline{U}\right] ,\left[ \underline{W}\right] \right) =\sigma}}}_{\substack{=\sum _{\substack{\underline{U}\in\operatorname*{Flag}\left( V\right) ;\\\delta\left( \left[ \underline{W}\right] ,\left[ \underline{U}\right] \right) =\sigma^{-1}}}\\\text{(because for each }\underline{U}\in \operatorname*{Flag}\left( V\right) \text{,}\\\text{the condition }\left( \delta\left( \left[ \underline{U}\right] ,\left[ \underline{W}\right] \right) =\sigma\right) \text{ is}\\\text{equivalent to }\left( \delta\left( \left[ \underline{W}\right] ,\left[ \underline{U}\right] \right) =\sigma^{-1}\right) \\\text{(since }\delta\left( \left[ \underline{W}\right] ,\left[ \underline{U}\right] \right) =\delta\left( \left[ \underline{U}\right] ,\left[ \underline{W}\right] \right) ^{-1}\text{))}}}\left[ \underline{U}\right] \\ & =\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }% \sum_{\substack{\underline{U}\in\operatorname*{Flag}\left( V\right) ;\\\delta\left( \left[ \underline{W}\right] ,\left[ \underline{U}\right] \right) =\sigma^{-1}}}\left[ \underline{U}\right] . \end{align*} Comparing this with% $\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }% \underbrace{T_{\sigma^{-1}}\left[ \underline{W}\right] }_{=\sum _{\substack{\underline{U}\in\operatorname*{Flag}\left( V\right) ;\\\delta\left( \left[ \underline{W}\right] ,\left[ \underline{U}\right] \right) =\sigma^{-1}}}\left[ \underline{U}\right] }=\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }\sum_{\substack{\underline{U}% \in\operatorname*{Flag}\left( V\right) ;\\\delta\left( \left[ \underline{W}\right] ,\left[ \underline{U}\right] \right) =\sigma^{-1}% }}\left[ \underline{U}\right] ,$ we obtain\pi\sigma^{\ast}\pi^{t}\left[ \underline{W}\right] =\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma^{-1}}\left[ \underline{W}\right] $. Now, forget that we fixed$\underline{W}$. We thus have shown that$\pi \sigma^{\ast}\pi^{t}\left[ \underline{W}\right] =\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma^{-1}}\left[ \underline{W}% \right] $for each$\underline{W}\in\operatorname*{Flag}\left( V\right) $. In other words, the two maps$\pi\sigma^{\ast}\pi^{t}$and$\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma^{-1}}$are equal to each other on the basis$\left( \left[ \underline{W}\right] \right) _{W\in\operatorname*{Flag}\left( V\right) }$of the$\mathbb{Z}% _{\left( p\right) }$-module$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\left( V\right) \right] $. Since these two maps are$\mathbb{Z}_{\left( p\right) }$-linear, we can thus conclude that they are identical. In other words,$\pi\sigma^{\ast}\pi^{t}=\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma^{-1}}$. Now, forget that we fixed$\sigma$. We thus have shown that \begin{equation} \pi\sigma^{\ast}\pi^{t}=\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}% \rho\right) }T_{\sigma^{-1}}\ \ \ \ \ \ \ \ \ \ \text{for each }\sigma \in\Sigma_{n}. \label{pf.p9.2.1}% \end{equation} On the other hand, every$\sigma\in\Sigma_{n}$satisfies% \begin{equation} l\left( \sigma\rho\right) =l\left( \sigma^{-1}\rho\right) \label{pf.p9.2.2}% \end{equation} \footnote{\textit{Proof of (\ref{pf.p9.2.2}):} Let$\sigma\in\Sigma_{n}$. Then, Corollary 2.19 \textbf{(a)} yields$l\left( \sigma^{-1}\rho\right) =n\left( n-1\right) /2-l\left( \sigma\right) $. But Corollary 2.19 \textbf{(b)} yields$l\left( \sigma\rho\right) =n\left( n-1\right) /2-l\left( \sigma\right) $. Thus,$l\left( \sigma^{-1}\rho\right) =n\left( n-1\right) /2-l\left( \sigma\right) =l\left( \sigma\rho\right) . This proves (\ref{pf.p9.2.2}).}. Now, \begin{align*} e & =\pi\omega\pi^{t}\\ & =\pi\left\vert G/U\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \operatorname*{sgn}\left( \sigma\right) \sigma^{\ast}\pi^{t}% \ \ \ \ \ \ \ \ \ \ \left( \text{since }\omega=\left\vert G/U\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}\operatorname*{sgn}\left( \sigma\right) \sigma^{\ast}\right) \\ & =\left\vert G/U\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \underbrace{\operatorname*{sgn}\left( \sigma\right) }_{=\operatorname*{sgn}% \left( \sigma^{-1}\right) }\underbrace{\pi\sigma^{\ast}\pi^{t}% }_{\substack{=\left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma^{-1}}\\\text{(by (\ref{pf.p9.2.1}))}}}\\ & =\left\vert G/U\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \operatorname*{sgn}\left( \sigma^{-1}\right) \left( p-1\right) ^{n}p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma^{-1}}\\ & =\left\vert G/U\right\vert ^{-1}\underbrace{\left( p-1\right) ^{n}% }_{\substack{=\dfrac{\left\vert G/U\right\vert }{\left\vert G/B\right\vert }\\\text{(by (\ref{pf.l9.0a.1}))}}}\underbrace{\sum_{\sigma\in\Sigma_{n}% }\operatorname*{sgn}\left( \sigma^{-1}\right) p^{l\left( \sigma^{-1}% \rho\right) }T_{\sigma^{-1}}}_{\substack{=\sum_{\sigma\in\Sigma_{n}% }\operatorname*{sgn}\left( \sigma\right) p^{l\left( \sigma\rho\right) }T_{\sigma}\\\text{(here, we have substituted }\sigma\text{ for }\sigma ^{-1}\\\text{in the sum, since the map }\Sigma_{n}\rightarrow\Sigma _{n},\ \sigma\mapsto\sigma^{-1}\\\text{is a bijection)}}}\\ & =\underbrace{\left\vert G/U\right\vert ^{-1}\dfrac{\left\vert G/U\right\vert }{\left\vert G/B\right\vert }}_{=\left\vert G/B\right\vert ^{-1}}\sum_{\sigma\in\Sigma_{n}}\operatorname*{sgn}\left( \sigma\right) \underbrace{p^{l\left( \sigma\rho\right) }}_{\substack{=p^{l\left( \sigma^{-1}\rho\right) }\\\text{(since }l\left( \sigma\rho\right) =l\left( \sigma^{-1}\rho\right) \\\text{(by (\ref{pf.p9.2.2})))}}}T_{\sigma}\\ & =\left\vert G/B\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \operatorname*{sgn}\left( \sigma\right) p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma}. \end{align*} This proves Proposition 9.3.\square$] \item \textbf{Proof of Proposition 9.4:} Replace \textquotedblleft First, we have\textquotedblright\ by \textquotedblleft Proposition 9.3 yields\textquotedblright. \item \textbf{Proof of Proposition 9.4:} The first chain of equalities in the proof needs some justification (e.g., why do we have$\sum_{\sigma}p^{l\left( \sigma^{-1}\rho\right) }=\left\vert \coprod_{\sigma}X\left( \sigma^{-1}% \rho\right) \right\vert $, and why is$\left\vert G/B\right\vert ^{-1}\left\vert \coprod_{\tau}X\left( \tau\right) \right\vert =1$?). I would actually suggest the following alternative argument: Proposition 8.1b \textbf{(b)} shows that there is a natural isomorphism$G/B\rightarrow\operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) $of$G$-sets. Hence,$\left\vert G/B\right\vert =\left\vert \operatorname*{Flag}% \left( \mathbb{F}_{p}^{n}\right) \right\vert $. But every$V\in\mathcal{V}$satisfies$\left\vert \operatorname*{Flag}\left( V\right) \right\vert =\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }$(by Lemma 8.0a \textbf{(b)}). Applying this to$V=\mathbb{F}_{p}^{n}$, we obtain$\left\vert \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right\vert =\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }$. The map$\Sigma _{n}\rightarrow\Sigma_{n},\ \sigma\mapsto\sigma^{-1}\rho$is a bijection (since$\Sigma_{n}$is a group). Hence, we can substitute$\sigma^{-1}\rho$for$\sigma$in the sum$\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }$. We thus obtain$\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }=\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma^{-1}\rho\right) }$. Hence,% \begin{equation} \left\vert G/B\right\vert =\left\vert \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right\vert =\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma\right) }=\sum_{\sigma\in\Sigma_{n}}p^{l\left( \sigma^{-1}\rho\right) }. \label{pf.p9.4.1}% \end{equation} Now, Proposition 9.3 yields% $e=\left\vert G/B\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}\operatorname*{sgn}% \left( \sigma\right) p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma}.$ Applying the map$\xito both sides of this equality, we find% \begin{align*} \xi\left( e\right) & =\xi\left( \left\vert G/B\right\vert ^{-1}% \sum_{\sigma\in\Sigma_{n}}\operatorname*{sgn}\left( \sigma\right) p^{l\left( \sigma^{-1}\rho\right) }T_{\sigma}\right) \\ & =\left\vert G/B\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \operatorname*{sgn}\left( \sigma\right) p^{l\left( \sigma^{-1}\rho\right) }\underbrace{\xi\left( T_{\sigma}\right) }_{\substack{=\operatorname*{sgn}% \left( \sigma\right) \\\text{(by the definition of }\xi\text{)}}}\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since the map }\xi\text{ is }% \mathbb{Z}_{\left( p\right) }\text{-linear}\right) \\ & =\left\vert G/B\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \operatorname*{sgn}\left( \sigma\right) p^{l\left( \sigma^{-1}\rho\right) }\operatorname*{sgn}\left( \sigma\right) =\left\vert G/B\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}\underbrace{\left( \operatorname*{sgn}\left( \sigma\right) \right) ^{2}}_{=1}p^{l\left( \sigma^{-1}\rho\right) }\\ & =\left\vert G/B\right\vert ^{-1}\sum_{\sigma\in\Sigma_{n}}% \underbrace{\left( \operatorname*{sgn}\left( \sigma\right) \right) ^{2}% }_{=1}p^{l\left( \sigma^{-1}\rho\right) }\\ & =\left\vert G/B\right\vert ^{-1}\underbrace{\sum_{\sigma\in\Sigma_{n}% }p^{l\left( \sigma^{-1}\rho\right) }}_{\substack{=\left\vert G/B\right\vert \\\text{(by (\ref{pf.p9.4.1}))}}}=\left\vert G/B\right\vert ^{-1}\left\vert G/B\right\vert =1. \end{align*} \item \textbf{Proof of Proposition 9.4:} Replace \textquotedblleft%\widehat{\xi}\left( e\right) \mu$\textquotedblright\ by \textquotedblleft%$\xi\left( e\right) \mu$\textquotedblright. \item \textbf{Proof of Proposition 9.4:} I would replace \textquotedblleft As$e_{\operatorname*{St}}=\pi^{t}\mu$we see that$\pi^{t}\left( M_{\operatorname*{St}}\right) =\operatorname{image}\left( \pi^{t}\mu\right) =M_{\operatorname*{St}}^{\prime}$\textquotedblright\ by the more detailed argument \textquotedblleft As$M_{\operatorname*{St}}=\operatorname{image}% \left( \mu\right) $, we see that$\pi^{t}\left( M_{\operatorname*{St}% }\right) =\operatorname{image}\left( \underbrace{\pi^{t}\mu}% _{=e_{\operatorname*{St}}}\right) =\operatorname{image}\left( e_{\operatorname*{St}}\right) =M_{\operatorname*{St}}^{\prime}$% \textquotedblright. \item \textbf{Proof of Proposition 9.4:} I would replace \textquotedblleft Thus, if we let$\beta$be the restriction of$\pi^{t}$to$M_{\operatorname*{St}}$, we see that$\beta$gives an epimorphism$M_{\operatorname*{St}}\rightarrow M_{\operatorname*{St}}^{\prime}% $\textquotedblright\ by \textquotedblleft Thus,$\pi^{t}$restricts to an epimorphism$\beta:M_{\operatorname*{St}}\rightarrow M_{\operatorname*{St}% }^{\prime}$\textquotedblright. (This is both shorter and also explains unambiguously what the codomain of$\beta$is.) \item \textbf{Proof of Proposition 9.4:} After \textquotedblleft As the map$\mu\pi^{t}=e$restricts to$1$on$M_{\operatorname*{St}}$\textquotedblright, I would add \textquotedblleft(because$e$is idempotent, and$M_{\operatorname*{St}}$is its image)\textquotedblright. \item \textbf{Proof of Proposition 9.5:} I don't understand the first sentence of this proof: Why does the Yoneda isomorphism exist? (I only know Yoneda isomorphisms for functors to$\operatorname*{Set}$, not for functors to$\mathcal{A}$; but even if I were to write down the obvious generalization, there remains the question why$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] $is a Hom-functor.) And supposing that the Yoneda isomorphism exists, why does it yield that$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] $is projective? What I do see is that the functor$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] $is \textquotedblleft pointwise projective\textquotedblright, in the sense that the image of each object of$\mathcal{V}$under this functor is a projective$\mathbb{Z}_{\left( p\right) }$-module. (This is obvious, because the image of an object$V\in\mathcal{V}$under the functor$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] $is the$\mathbb{Z}_{\left( p\right) }$-module$\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] \right) \left( V\right) =\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\left( V\right) \right] $, which is free and therefore projective.) Therefore, the functor$M_{\operatorname*{St}}$is \textquotedblleft pointwise projective\textquotedblright\ as well (since, as youpoint out,$M_{\operatorname*{St}}\left( V\right) $is a direct summand in$\left( \mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}% \right] \right) \left( V\right) $). \item \textbf{Proof of Proposition 9.5:} After \textquotedblleft the image of$e_{\operatorname*{St}}$, which is a summand in$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Base}\right] $\textquotedblright, I suggest adding \textquotedblleft(since$e_{\operatorname*{St}}$is idempotent)\textquotedblright. \item \textbf{Proof of Proposition 9.5:} After \textquotedblleft%$M_{\operatorname*{St}}$is also the image of a self-adjoint idempotent on$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] $\textquotedblright, I suggest adding \textquotedblleft(namely, of$e$)\textquotedblright. \item \textbf{Proof of Proposition 9.5:} After \textquotedblleft the rank of$M_{\operatorname*{St}}$is the trace of$e$\textquotedblright, I suggest adding \textquotedblleft(since$M_{\operatorname*{St}}$is the image of the idempotent endomorphism$e$)\textquotedblright. \item \textbf{Proof of Proposition 9.5:} After \textquotedblleft the map$T_{\sigma}$is the identity\textquotedblright, I would add \textquotedblleft% (by Corollary 4.6)\textquotedblright. \item \textbf{Proof of Proposition 9.5:} I would suggest replacing \textquotedblleft with trace$\left\vert \operatorname*{Flag}\right\vert =\left\vert G/B\right\vert \textquotedblright\ by \textquotedblleft with trace% \begin{align*} \left\vert \operatorname*{Flag}\left( V\right) \right\vert & =\left\vert \operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \right\vert \ \ \ \ \ \ \ \ \ \ \left( \text{since }V\cong\mathbb{F}_{p}^{n}\text{ as }\mathbb{F}_{p}\text{-vector spaces}\right) \\ & =\left\vert G/B\right\vert \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c}% \text{since Proposition 8.1b \textbf{(b)} shows that there is a}\\ \text{natural isomorphism }G/B\rightarrow\operatorname*{Flag}\left( \mathbb{F}_{p}^{n}\right) \text{ of }G\text{-sets}% \end{array} \right) \end{align*} \textquotedblright. \item \textbf{Proof of Proposition 9.5:} I suggest replacing \textquotedblleft Next, note that\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}% \right] =M_{\operatorname*{St}}\oplus N$\textquotedblright\ by \textquotedblleft But$e$is idempotent; thus,$\mathbb{Z}_{\left( p\right) }\left[ \operatorname*{Flag}\right] =\underbrace{\operatorname{image}\left( e\right) }_{=M_{\operatorname*{St}}}\oplus\underbrace{\operatorname{image}% \left( 1-e\right) }_{=N}=M_{\operatorname*{St}}\oplus N\$\textquotedblright. \end{itemize} \end{document}