\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{framed}
\usepackage{comment}
\usepackage{color}
\usepackage{needspace}
\usepackage{tabls}
\usepackage[breaklinks=True]{hyperref}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{CSTFile=LaTeX article (bright).cst}
%TCIDATA{Created=Sat Mar 27 17:33:36 2004}
%TCIDATA{LastRevised=Monday, February 15, 2021 16:46:45}
%TCIDATA{SuppressPackageManagement}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\newcommand{\id}{\operatorname*{id}}
\newcommand{\gr}{\operatorname*{gr}}
\newcommand{\combine}{\operatorname*{combine}}
\newcommand{\dirsum}{\operatorname*{dirsum}}
\newcommand{\Hom}{\operatorname*{Hom}}
\newcommand{\arinj}{\ar@{_{(}->}}
\newcommand{\arsurj}{\ar@{->>}}
\newcommand{\arelem}{\ar@{|->}}
\newcommand{\xycs}{\xymatrixcolsep}
\newcommand{\parbreak}{\medskip \rule{14cm}{0.3mm} \medskip}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\newenvironment{convention}[1][Convention]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\NOEXPAND{\parbreak}{\medskip \parbreak \medskip}
\fi
\setlength\tablinesep{3pt}
\setlength\arraylinesep{3pt}
\setlength\extrarulesep{3pt}
\setlength\textheight{22.5cm}
\setlength\textwidth{14.8cm}
\begin{document}
\begin{center}
\textbf{La d\'{e}composition en poids des alg\`{e}bres de Hopf}
\textit{Fr\'{e}d\'{e}ric Patras}
Ann. Inst. Fourier, Grenoble \textbf{43}, 4 (1993), pp. 1067--1087
\url{http://aif.cedram.org/cgi-bin/fitem?id=AIF_1993__43_4_1067_0} (published version)
\textbf{Errata and remarks} (by \textit{Darij Grinberg})
\bigskip
\end{center}
The following are remarks I have made while reading the above-cited paper by
Fr\'{e}d\'{e}ric Patras. I think it is an interesting and rather readable text
(despite some minor typos and tersely written proofs).
Some of the below remarks are just quick corrections of minor mistakes (at
least as far as I can tell; I can neither guarantee that these ``mistakes''
really are mistakes, nor that my ``corrections'' are correct!). Some others
are detailed expositions of certain proofs which have been only vaguely
sketched in Patras's paper. Finally, some others give alternative proofs for
results in Patras's paper (sometimes inserting additional results into
Patras's paper, to be used as lemmata later on).
Different remarks are separated by horizontal lines, like this:
\parbreak
\textbf{Page 1068:} I think ``aux endomorphismes $\psi^{k}$'' should be ``aux
endomorphismes $\Psi^{k}$'' here.
\parbreak
\textbf{Page 1069, Definition 1.1:} There is nothing wrong here, but I think
it would be helpful to notice that what Patras calls ``alg\`{e}bre de Hopf''
is \textit{not} the same as what modern-day algebraists call a Hopf algebra.
What Patras calls ``alg\`{e}bre de Hopf'' is a kind of super-version of a
graded bialgebra (not necessarily having an antipode!); in constrast, what
modern-day algebraists call a Hopf algebra is just a bialgebra with antipode.
(Nevertheless, I am going to use the words ``Hopf algebra'' for what Patras
calls ``alg\`{e}bre de Hopf'' in the following.)
\parbreak
\textbf{Page 1070, fifth line of this page:} Here, Patras write:
``Une big\`{e}bre gradu\'{e}e ou une alg\`{e}bre de Hopf est \textit{connexe}
si $H_{0}\cong K$.''
This definition is good when $K$ is a field, but in the general case when $K$
is a commutative ring, it is not a reasonable definition of ``connected''.
Since Patras, in his paper, always works over a field $K$, this is not a
problem for him, but I still prefer the following (in my opinion, better)
definition of ``connected'': A graded bialgebra or Hopf algebra $H$ is
\textit{connected} if and only if the map $\epsilon\mid_{H_{0}}:H_{0}%
\rightarrow K$ is an isomorphism.
Note that, when $K$ is a field, this definition is equivalent to Patras's
definition, because we have the following equivalence of assertions:%
\begin{align*}
& \ \left( H_{0}\cong K\text{ as }K\text{-vector spaces}\right) \\
& \Longleftrightarrow\ \left( \dim\left( H_{0}\right) =1\right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{where }\dim V\text{ denotes the dimension
of any }K\text{-vector space }V\right) \\
& \Longleftrightarrow\ \left( \dim\left( \operatorname*{Ker}\left(
\epsilon\mid_{H_{0}}\right) \right) =0\right) \\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since we know that the map }\epsilon\mid_{H_{0}}:H_{0}\rightarrow
K\text{ is surjective (because}\\
\left( \epsilon\mid_{H_{0}}\right) \left( 1\right) =\epsilon\left(
1\right) =1\text{ (by the axioms of a bialgebra, since }H\text{ is a
bialgebra)),}\\
\text{and thus (by the isomorphism theorem) }K\cong H_{0}\diagup
\operatorname*{Ker}\left( \epsilon\mid_{H_{0}}\right) \text{, so that}\\
\dim K=\dim\left( H_{0}\diagup\operatorname*{Ker}\left( \epsilon\mid_{H_{0}%
}\right) \right) =\dim H_{0}-\dim\left( \operatorname*{Ker}\left(
\epsilon\mid_{H_{0}}\right) \right) \text{,}\\
\text{so that }\dim H_{0}=\underbrace{\dim K}_{=1}+\dim\left(
\operatorname*{Ker}\left( \epsilon\mid_{H_{0}}\right) \right)
=1+\dim\left( \operatorname*{Ker}\left( \epsilon\mid_{H_{0}}\right)
\right) \text{,}\\
\text{and therefore the equation }\dim\left( H_{0}\right) =1\text{ is
equivalent to }\dim\left( \operatorname*{Ker}\left( \epsilon\mid_{H_{0}%
}\right) \right) =0
\end{array}
\right) \\
& \Longleftrightarrow\ \left( \operatorname*{Ker}\left( \epsilon\mid
_{H_{0}}\right) =0\right) \ \Longleftrightarrow\ \left( \epsilon\mid
_{H_{0}}\text{ is injective}\right) \ \Longleftrightarrow\ \left(
\epsilon\mid_{H_{0}}\text{ is bijective}\right) \\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since we know that the map }\epsilon\mid_{H_{0}}:H_{0}\rightarrow
K\text{ is surjective (because}\\
\left( \epsilon\mid_{H_{0}}\right) \left( 1\right) =\epsilon\left(
1\right) =1\text{ (by the axioms of a bialgebra, since }H\text{ is a
bialgebra)),}\\
\text{and thus this map }\epsilon\mid_{H_{0}}\text{ is injective if and only
if it is bijective}%
\end{array}
\right) \\
& \Longleftrightarrow\ \left( \epsilon\mid_{H_{0}}\text{ is an
isomorphism}\right) .
\end{align*}
\parbreak
\textbf{Page 1070, two lines above Definition 1.2:} Here, Patras writes:
``[...] l'ensemble $\mathcal{L}\left( H\right) $ des endomorphismes
lin\'{e}aires de $H$ [...]''.
I don't think that $\mathcal{L}\left( H\right) $ denotes the set of all
linear endomorphisms of $H$ throughout the text. It seems to me that
$\mathcal{L}\left( H\right) $ indeed denotes the set of all linear
endomorphisms of $H$ when $H$ is just a bialgebra (not graded); however, when
$H$ is a graded bialgebra or an ``alg\`{e}bre de Hopf'' (I would translate
this by ``Hopf algebra'', but as I said, this does not mean what people
nowadays mean by a ``Hopf algebra''), $\mathcal{L}\left( H\right) $ denotes
the set of all \textit{graded}\footnote{A linear map $f:V\rightarrow W$
between graded vector spaces $V$ and $W$ is said to be \textit{graded} (or
\textit{compatible with the grading}) if, for every $n\in\mathbb{N}$, it
satisfies $f\left( V_{n}\right) \subseteq W_{n}$.} linear endomorphisms of
$H$.
Note that I might be wrong about this, and $\mathcal{L}\left( H\right) $
might indeed mean the set of \textit{all} linear endomorphisms of $H$
throughout the text. In this case, however, the homomorphism $\rho_{n}$
defined on page 1074 (``Notons $\rho_{n}$ l'homomorphisme de restriction de
$\mathcal{L}\left( H\right) $ dans $\mathcal{L}\left( H\right) _{n}$.'')
is not a simple restriction homomorphism (i. e., it is not just given by
$\rho_{n}\left( f\right) =f\mid_{\bigoplus\limits_{i=0}^{n}H_{i}}$ for every
$f\in\mathcal{L}\left( H\right) $), but instead requires a more subtle
definition: It must then be defined by%
\[
\left( \rho_{n}\left( f\right) =\sum_{i=0}^{n}p_{i}\circ f\circ
p_{i}\ \ \ \ \ \ \ \ \ \ \text{for all }f\in\mathcal{L}\left( H\right)
\right) ,
\]
where $p_{i}:H\rightarrow H$ denotes the map which sends every element of $H$
to its $i$-th graded component (seen again as an element of $H$).
\parbreak
\textbf{Page 1071, proof of Proposition 1.4:} Here, Patras writes: ``La
deuxi\`{e}me partie de la proposition se ram\`{e}ne \`{a} \'{e}tablir
l'\'{e}galit\'{e} :%
\[
\Delta^{\left[ k\right] }\circ\Pi^{\left[ l\right] }=\Pi_{\left(
k\right) }^{\left[ l\right] }\circ\left( \Delta^{\left[ k\right]
}\right) ^{\otimes l},
\]
qui est une cons\'{e}quence \`{a} peu pr\`{e}s imm\'{e}diate des axiomes de
structure des big\`{e}bres commutatives.'' This is not totally precise. The
identity $\Delta^{\left[ k\right] }\circ\Pi^{\left[ l\right] }%
=\Pi_{\left( k\right) }^{\left[ l\right] }\circ\left( \Delta^{\left[
k\right] }\right) ^{\otimes l}$ is true in any bialgebra, not only in
commutative ones (it follows from the axioms of a bialgebra by a double
induction over $k$ and $l$). However, deriving the ``deuxi\`{e}me partie de la
proposition'' from this identity requires the bialgebra to be commutative.
Here are the details of this derivation: We have $\left( \Delta^{\left[
k\right] }\right) ^{\otimes l}\circ\Delta^{\left[ l\right] }%
=\Delta^{\left[ lk\right] }$ (this holds for any coalgebra, and can be
proven by induction using the coassociativity and counity axioms of a
coalgebra\footnote{This proof can be found in [P3] (Lemma II.8 of [P3], to be
precise).}) and $\Pi^{\left[ k\right] }\circ\Pi_{\left( k\right)
}^{\left[ l\right] }=\Pi^{\left[ lk\right] }$ (this holds for any
commutative algebra, and is easy to see - but doesn't generally hold for
noncommutative algebras!), so that%
\begin{align*}
& \underbrace{\Psi^{k}}_{=I^{\ast k}=\Pi^{\left[ k\right] }\circ I^{\otimes
k}\circ\Delta^{\left[ k\right] }=\Pi^{\left[ k\right] }\circ
\Delta^{\left[ k\right] }}\circ\underbrace{\Psi^{l}}_{=I^{\ast l}%
=\Pi^{\left[ l\right] }\circ I^{\otimes l}\circ\Delta^{\left[ l\right]
}=\Pi^{\left[ l\right] }\circ\Delta^{\left[ l\right] }}\\
& =\Pi^{\left[ k\right] }\circ\underbrace{\Delta^{\left[ k\right] }%
\circ\Pi^{\left[ l\right] }}_{=\Pi_{\left( k\right) }^{\left[ l\right]
}\circ\left( \Delta^{\left[ k\right] }\right) ^{\otimes l}}\circ
\Delta^{\left[ l\right] }=\underbrace{\Pi^{\left[ k\right] }\circ
\Pi_{\left( k\right) }^{\left[ l\right] }}_{=\Pi^{\left[ lk\right] }%
}\circ\underbrace{\left( \Delta^{\left[ k\right] }\right) ^{\otimes
l}\circ\Delta^{\left[ l\right] }}_{=\Delta^{\left[ lk\right] }}%
=\Pi^{\left[ lk\right] }\circ\Delta^{\left[ lk\right] }\\
& =\Pi^{\left[ lk\right] }\circ I^{\otimes lk}\circ\Delta^{\left[
lk\right] }=I^{\ast lk}=\Psi^{lk},
\end{align*}
and this proves the second part of Proposition 1.4.
\parbreak
\textbf{Page 1072:} A typo: ``Notons $\Phi^{k}$ the $n$-i\`{e}me
endomorphisme'' should be ``Notons $\Phi^{k}$ the $k$-i\`{e}me endomorphisme''.
\parbreak
\textbf{Page 1073, proof of Proposition 2.3:} There is nothing wrong to be
corrected here, but I don't find the proof of this proposition as obvious as
Patras does, so let me write down this proof here:
\begin{proof}
[Proof of Proposition 2.3.]We can prove that any commuting $x\in M$ and $y\in
M$ satisfy $\log_{k}x+\log_{k}y=\log_{k}\left( xy\right) $ (this follows
from the well-known fact that $\log\left( 1+X\right) +\log\left(
1+Y\right) =\log\left( \left( 1+X\right) \left( 1+Y\right) \right) $ in
the ring $K\left[ \left[ X,Y\right] \right] $ of formal power series,
using the fact that the $K$-representation $A$ is unipotent of rank $k$).
Using this fact, we can prove (by induction over $n$) that every $x\in M$ and
$n\in\mathbb{N}$ satisfy $n\log_{k}x=\log_{k}\left( x^{n}\right) $.
However, for every $x\in M$, we have%
\[
\left( \rho\circ\Phi^{n}\right) \left( x\right) =\rho\left(
\underbrace{\Phi^{n}\left( x\right) }_{=x^{n}}\right) =\rho\left(
x^{n}\right)
\]
and%
\begin{align*}
\left( \sum\limits_{i=0}^{k-1}n^{i}\cdot\varepsilon^{i}\right) \left(
x\right) & =\sum\limits_{i=0}^{k-1}n^{i}\cdot\underbrace{\varepsilon
^{i}\left( x\right) }_{=\dfrac{\left( \log_{k}x\right) ^{i}}{i!}}%
=\sum\limits_{i=0}^{k-1}n^{i}\cdot\dfrac{\left( \log_{k}x\right) ^{i}}{i!}\\
& =\sum\limits_{i=0}^{k-1}\dfrac{\left( n\log_{k}x\right) ^{i}}{i!}%
=\exp_{k}\left( \underbrace{n\log_{k}x}_{=\log_{k}\left( x^{n}\right)
}\right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\exp_{k}\left(
n\log_{k}x\right) \right) \\
& =\exp_{k}\left( \log_{k}\left( x^{n}\right) \right)
=\underbrace{\left( \exp_{k}\circ\log_{k}\right) }_{\substack{=\rho
\\\text{(by Lemma 2.1)}}}\left( x^{n}\right) =\rho\left( x^{n}\right) .
\end{align*}
Hence, for every $x\in M$, we have $\left( \rho\circ\Phi^{n}\right) \left(
x\right) =\rho\left( x\right) =\left( \sum\limits_{i=0}^{k-1}n^{i}%
\cdot\varepsilon^{i}\right) \left( x\right) $. Thus, $\rho\circ\Phi
^{n}=\sum\limits_{i=0}^{k-1}n^{i}\cdot\varepsilon^{i}$. This proves
Proposition 2.3.
\end{proof}
\parbreak
\textbf{Page 1074, proof of Lemma 3.1:} Let me add that the same argument
which Patras used to prove Lemma 3.1 can be used to prove a more general statement:
\begin{quote}
\textbf{Lemma 3.11.} Let $\rho_{n}^{\prime}:\operatorname*{Hom}\nolimits_{K}%
\left( H,H\right) \rightarrow\operatorname*{Hom}\nolimits_{K}\left(
\bigoplus\limits_{i=0}^{n}H_{i},H\right) $ be the map which takes every
linear map $g\in\operatorname*{Hom}\nolimits_{K}\left( H,H\right) $ to the
restriction of $g$ to $\bigoplus\limits_{i=0}^{n}H_{i}$.
For every map $f\in\operatorname*{Hom}\nolimits_{K}\left( H,H\right) $
satisfying $f\left( 1\right) =1$, we have
\[
\left( \rho_{n}^{\prime}\left( f-1\right) \right) ^{\ast\left(
n+1\right) }=0
\]
(where $1$ denotes the unity of the $K$-algebra $\mathcal{L}\left( H\right)
$, i. e., the map $\eta\circ\epsilon$).
\end{quote}
\begin{proof}
[Proof of Lemma 3.11.]Copy the proof of Lemma 3.1, replacing every occurence
of $\Psi^{k}$ by $f$, and replacing every occurence of $\rho_{n}$ by $\rho
_{n}^{\prime}$. This gives a proof of Lemma 3.11.
\end{proof}
Note that we replaced $\rho_{n}$ by $\rho_{n}^{\prime}$ in the statement of
Lemma 3.11 because we didn't want to require $f$ to be graded. If
$f\in\operatorname*{Hom}\nolimits_{K}\left( H,H\right) $ is a graded map,
then $\rho_{n}\left( f\right) $ is ``more or less the same'' as $\rho
_{n}^{\prime}\left( f\right) $ (the only difference between the maps
$\rho_{n}\left( f\right) $ and $\rho_{n}^{\prime}\left( f\right) $ is that
the codomain of $\rho_{n}\left( f\right) $ is $\bigoplus\limits_{i=0}%
^{n}H_{i}$, whereas the codomain of $\rho_{n}^{\prime}\left( f\right) $ is
the whole $H$). However, if $f$ is not a graded map, $\rho_{n}\left(
f\right) $ is either not defined or not identic with $\rho_{n}^{\prime
}\left( f\right) $ (depending on how $\rho_{n}$ is defined: see my remark
about ``Page 1070, two lines above Definition 1.2'' above).
Here is a very useful consequence of Lemma 3.11:
\begin{quote}
\textbf{Lemma 3.12.} Let $f\in\operatorname*{Hom}\nolimits_{K}\left(
H,H\right) $ be a map satisfying $f\left( 1\right) =1$. Then, for every
$x\in H$, the infinite sum $\sum\limits_{n=1}^{\infty}\left( -1\right)
^{n+1}\dfrac{\left( f-1\right) ^{\ast n}}{n}\left( x\right) $ has only
finitely many nonzero terms.
\end{quote}
\begin{proof}
[Proof of Lemma 3.12.]Let $x\in H$. Since $x\in H=\bigoplus\limits_{i\in
\mathbb{N}}H_{i}=\bigcup\limits_{j\in\mathbb{N}}\left( \bigoplus
\limits_{i=0}^{j}H_{i}\right) $, there exists some $j\in\mathbb{N}$ such that
$x\in\bigoplus\limits_{i=0}^{j}H_{i}$. Consider this $j$.
Recall that $\rho_{j}^{\prime}:\operatorname*{Hom}\nolimits_{K}\left(
H,H\right) \rightarrow\operatorname*{Hom}\nolimits_{K}\left( \bigoplus
\limits_{i=0}^{j}H_{i},H\right) $ is the map which takes every linear map
$g\in\operatorname*{Hom}\nolimits_{K}\left( H,H\right) $ to the restriction
of $g$ to $\bigoplus\limits_{i=0}^{j}H_{i}$. Hence, for every $n\in\mathbb{N}%
$, the map $\rho_{j}^{\prime}\left( \left( f-1\right) ^{\ast n}\right) $
is the restriction of the map $\left( f-1\right) ^{\ast n}$ to
$\bigoplus\limits_{i=0}^{j}H_{i}$. Since $x\in\bigoplus\limits_{i=0}^{j}H_{i}%
$, this yields that $\left( \rho_{j}^{\prime}\left( \left( f-1\right)
^{\ast n}\right) \right) \left( x\right) =\left( f-1\right) ^{\ast
n}\left( x\right) $ for every $n\in\mathbb{N}$. But we also have $\rho
_{j}^{\prime}\left( \left( f-1\right) ^{\ast n}\right) =\left( \rho
_{j}^{\prime}\left( f-1\right) \right) ^{\ast n}$ for every $n\in
\mathbb{N}$ (since $\rho_{j}^{\prime}$ is a $K$-algebra homomorphism).
But Lemma 3.11 (applied to $j$ instead of $n$) yields $\left( \rho
_{j}^{\prime}\left( f-1\right) \right) ^{\ast\left( j+1\right) }=0$.
Hence, every integer $n\geq j+1$ satisfies%
\begin{align*}
\left( \rho_{j}^{\prime}\left( f-1\right) \right) ^{\ast n} & =\left(
\rho_{j}^{\prime}\left( f-1\right) \right) ^{\ast\left( \left(
j+1\right) +\left( n-\left( j+1\right) \right) \right) }\\
& =\underbrace{\left( \rho_{j}^{\prime}\left( f-1\right) \right)
^{\ast\left( j+1\right) }}_{=0}\ast\left( \rho_{j}^{\prime}\left(
f-1\right) \right) ^{\ast\left( n-\left( j+1\right) \right) }=0.
\end{align*}
Thus, every integer $n\geq j+1$ satisfies
\begin{align*}
& \left( -1\right) ^{n+1}\dfrac{\left( f-1\right) ^{\ast n}}{n}\left(
x\right) \\
& =\left( -1\right) ^{n+1}\dfrac{\left( f-1\right) ^{\ast n}\left(
x\right) }{n}=\left( -1\right) ^{n+1}\dfrac{\left( \rho_{j}^{\prime
}\left( \left( f-1\right) ^{\ast n}\right) \right) \left( x\right) }%
{n}\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( \rho_{j}^{\prime}\left(
\left( f-1\right) ^{\ast n}\right) \right) \left( x\right) =\left(
f-1\right) ^{\ast n}\left( x\right) \right) \\
& =\left( -1\right) ^{n+1}\dfrac{\left( \rho_{j}^{\prime}\left(
f-1\right) \right) ^{\ast n}\left( x\right) }{n}%
\ \ \ \ \ \ \ \ \ \ \left( \text{since }\rho_{j}^{\prime}\left( \left(
f-1\right) ^{\ast n}\right) =\left( \rho_{j}^{\prime}\left( f-1\right)
\right) ^{\ast n}\right) \\
& =\left( -1\right) ^{n+1}\dfrac{0}{n}\ \ \ \ \ \ \ \ \ \ \left(
\text{since }\left( \rho_{j}^{\prime}\left( f-1\right) \right) ^{\ast
n}=0\text{ (due to }n\geq j+1\text{)}\right) \\
& =0.
\end{align*}
This proves Lemma 3.12.
\end{proof}
\parbreak
\textbf{Page 1074:} Two lines above Proposition 3.2, Patras writes:
``$\varepsilon_{n}^{i}$ est donc un morphisme de $E$ dans $\mathcal{L}\left(
H\right) _{n}$''. It would be helpful to emphasize that ``morphisme'' means a
morphism of sets here, not a morphism of monoids (unless I am missing something!).
\parbreak
\textbf{Page 1074:} Four lines above Proposition 3.2, Patras writes: ``Nous
noterons dans la suite $\varepsilon_{n}^{i}$, $1\leq i\leq n$, [...]''. I
think that considering the $\varepsilon_{n}^{i}$ only for $1\leq i\leq n$ (but
not for $i=0$) is a bad decision, since it leads to several minor mistakes
afterwards. For example, the first identity on page 1077,%
\[
\rho_{n}\left( \Psi^{\zeta}\right) =\rho_{n}\left( \sum_{i=1}^{n}\zeta
^{i}\cdot e^{i}\right) ,
\]
is not completely correct, since the sum on the right hand is missing an $i=0$
term, but as long as $e^{0}$ is not defined, this does not make much sense.
For another example, Definition 3.7 does not uniquely define $\Psi^{\zeta}$,
because $H$ is not the direct sum of all $H_{n}^{\left( i\right) }$ unless
we allow $i$ to be $0$.
I think the simplest way to clean up this mess is to define the maps
$\varepsilon_{n}^{i}$ for all $0\leq i\leq n$ in the same as way as they are
defined for all $1\leq i\leq n$ in the text. This yields that $\varepsilon
_{n}^{0}\left( x\right) =\dfrac{\left( \log_{1}x\right) ^{\ast0}}%
{0!}=\dfrac{1}{1}=1$ (where $1$ denotes the unity of $\mathcal{L}\left(
H\right) $; this is the map $\eta\circ\epsilon$ (not the map $I$)). Thus, in
particular, $\varepsilon_{n}^{0}\left( I\right) =1$, so that
\begin{align*}
e_{n}^{0} & =\varepsilon_{n}^{0}\left( I\right) \mid_{H_{n}}%
\ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }e_{n}^{0}\right) \\
& =1\mid_{H_{n}}=%
\begin{cases}
1, & \text{if }n=0;\\
0, & \text{if }n\neq0
\end{cases}
\ \ .
\end{align*}
Hence, for $n\neq0$, we have $e_{n}^{0}=0$. This is why we don't have care
about $e_{n}^{0}$ when $n\neq0$. However, for $n=0$, we have $e_{0}^{0}=1$.
Now we have the following (in my opinion, slightly better) version of
Proposition 3.2:
\begin{quote}
\textbf{Proposition 3.2'.} For every $n\in\mathbb{N}$ (including the case
$n=0$), we have $\Psi_{n}^{k}=\sum\limits_{i=0}^{n}k^{i}\cdot e_{n}^{i}$.
\end{quote}
\begin{proof}
[Proof of Proposition 3.2'.]Let $n\in\mathbb{N}$. Then, by the definition of
$\Psi_{n}^{k}$, we have%
\begin{align*}
\Psi_{n}^{k} & =\Psi^{k}\mid_{H_{n}}=\underbrace{\left( \Psi^{k}%
\mid_{_{\bigoplus\limits_{i=0}^{n}H_{i}}}\right) }_{=\rho_{n}\left( \Psi
^{k}\right) }\mid_{H_{n}}=\rho_{n}\left( \underbrace{\Psi^{k}}_{=\Phi
^{k}\left( I\right) }\right) \mid_{H_{n}}\\
& =\rho_{n}\left( \Phi^{k}\left( I\right) \right) \mid_{H_{n}}=\left(
\rho_{n}\circ\Phi^{k}\right) \left( I\right) \mid_{H_{n}}=\left(
\sum_{i=0}^{n}k^{i}\cdot\varepsilon_{n}^{i}\right) \left( I\right)
\mid_{H_{n}}\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since }\rho_{n}\circ\Phi^{k}=\sum\limits_{i=0}^{n}k^{i}\cdot
\varepsilon_{n}^{i}\text{ by Proposition 2.3 (applied to}\\
E\text{, }\mathcal{L}\left( H\right) _{n}\text{, }\rho_{n}\text{,
}n+1\text{, }k\text{ instead of }M\text{, }A\text{, }\rho\text{, }k\text{ and
}n\text{)}%
\end{array}
\right) \\
& =\sum_{i=0}^{n}k^{i}\cdot\underbrace{\varepsilon_{n}^{i}\left( I\right)
\mid_{H_{n}}}_{\substack{=e_{n}^{i}\\\text{(because this is}\\\text{how }%
e_{n}^{i}\text{ was defined)}}}=\sum_{i=0}^{n}k^{i}\cdot e_{n}^{i}.
\end{align*}
This proves Proposition 3.2'.
\end{proof}
Proposition 3.2 is merely an obvious consequence of Proposition 3.2':
\begin{proof}
[Proof of Proposition 3.2.]For every $n>0$, we have%
\begin{align*}
\Psi_{n}^{k} & =\sum_{i=0}^{n}k^{i}\cdot e_{n}^{i}%
\ \ \ \ \ \ \ \ \ \ \left( \text{by Proposition 3.2'}\right) \\
& =k^{0}\cdot\underbrace{e_{n}^{0}}_{=0\text{ (since }n\neq0\text{)}}%
+\sum_{i=1}^{n}k^{i}\cdot e_{n}^{i}=\sum_{i=1}^{n}k^{i}\cdot e_{n}^{i}.
\end{align*}
This proves Proposition 3.2.
\end{proof}
\parbreak
\textbf{Page 1075, Definition 3.3:} It would not harm to add here that
$e_{n}^{i}$ is understood to be $0$ if $i>n$. (Otherwise, $e_{n}^{i}$ would
not be defined at all for $i>n$.)
\parbreak
\textbf{Page 1075, proof of Proposition 3.4:} Patras' proof of Proposition 3.4
confines itself to one sentence: ``Les deux identit\'{e}s r\'{e}sultent
respectivement de 1.4 et 3.2, et de la d\'{e}finition 2.2 des projecteurs de
poids $i$.''
I don't think this enough, however indirectly ``r\'{e}sultent'' is meant. It
is indeed easy to conclude $e^{i}\ast e^{j}=\dbinom{i+j}{i}\cdot e^{i+j}$ from
1.4; however, concluding $e^{i}\circ e^{j}=\delta_{j}^{i}\cdot e^{i}$ from 3.2
is rather difficult. Here is how I would prove Proposition 3.4:
First, a rather standard combinatorial identity which we won't prove:
\begin{quote}
\textbf{Theorem 0.1.} Let $N\in\mathbb{N}$. Then, the equalities%
\begin{equation}
\sum_{k=0}^{N}\left( -1\right) ^{k}\dbinom{N}{k}k^{\ell}%
=0\ \ \ \ \ \ \ \ \ \ \text{for every }\ell\in\left\{ 0,1,...,N-1\right\}
\label{0.1-1}%
\end{equation}
and%
\begin{equation}
\sum_{k=0}^{N}\left( -1\right) ^{k}\dbinom{N}{k}k^{N}=\left( -1\right)
^{N}N! \label{0.1-2}%
\end{equation}
are satisfied in $\mathbb{Z}$.
\end{quote}
(This Theorem 0.1 is, for example, the result of applying Theorem 1 of [DG1]
to $R=\mathbb{Z}$.)
This has, as a consequence, a kind of ``polynomials that are zero at all
nonnegative integers must be identically zero'' result for torsionfree abelian groups:
\begin{quote}
\textbf{Theorem 0.2.} Let $R$ be a torsionfree abelian group. Let
$n\in\mathbb{N}$. Let $\left( \alpha_{0},\alpha_{1},...,\alpha_{n}\right) $
and $\left( \beta_{0},\beta_{1},...,\beta_{n}\right) $ be two $\left(
n+1\right) $-tuples of elements of $R$ such that every $k\in\mathbb{N}$
satisfies $\sum\limits_{m=0}^{n}k^{m}\alpha_{m}=\sum\limits_{m=0}^{n}%
k^{m}\beta_{m}$. Then, $\alpha_{m}=\beta_{m}$ for every $m\in\left\{
0,1,...,n\right\} $.
\end{quote}
\begin{proof}
[Proof of Theorem 0.2.]We are going to prove that for every $\ell\in\left\{
0,1,...,n\right\} $, we have%
\begin{equation}
\alpha_{n-\ell}=\beta_{n-\ell}. \label{0.2.pf.1}%
\end{equation}
\textit{Proof of (\ref{0.2.pf.1}).} We will prove (\ref{0.2.pf.1}) by strong
induction over $\ell$. A strong induction does not need an induction base, so
let us start with the induction step:
\textit{Induction step:} Let $L\in\left\{ 0,1,...,n\right\} $ be arbitrary.
Assume that (\ref{0.2.pf.1}) is already proven for all $\ell\in\left\{
0,1,...,n\right\} $ satisfying $\ell0$), so that $\epsilon\left( x\right) =0$.
We are going to prove (\ref{1.18}) by induction over $\ell$:
\textit{Induction base:} We have
\[
\underbrace{\Psi^{0}}_{=\eta\circ\epsilon}\left( x\right) =\left( \eta
\circ\epsilon\right) \left( x\right) =\eta\underbrace{\left(
\epsilon\left( x\right) \right) }_{=0}=\eta\left( 0\right) =0\equiv
0x\operatorname{mod}\left( \sum_{i=1}^{n-1}H_{i}H_{n-i}\right) \cap H_{n}.
\]
Thus, (\ref{1.18}) holds for $\ell=0$. This completes the induction base.
\textit{Induction step:} Let $L\in\mathbb{N}$ be arbitrary. Assume that
(\ref{1.18}) holds for $\ell=L$. We now must show that (\ref{1.18}) holds for
$\ell=L+1$.
Since $\Psi^{L}$ and $\Psi^{L+1}$ are graded maps, we have $\Psi^{L}\left(
H_{n}\right) \subseteq H_{n}$ and $\Psi^{L+1}\left( H_{n}\right) \subseteq
H_{n}$.
By the definition of $\Psi^{L}$, we have $\Psi^{L}=I^{\ast L}$. Also,
Corollary 1.18 \textbf{(a)} (applied to $\ell=L$) yields $\Psi^{L}\left(
1\right) =1$ and $\left( \Psi^{L}-I\right) \left( H_{0}\right) =0$.
By the definition of $\Psi^{L+1}$, we have%
\[
\Psi^{L+1}=I^{\ast\left( L+1\right) }=I\ast\underbrace{I^{\ast L}}%
_{=\Psi^{L}}=I\ast\Psi^{L}=\mu\circ\left( I\otimes\Psi^{L}\right)
\circ\Delta
\]
(by the definition of convolution). Thus,%
\begin{align*}
\Psi^{L+1}\left( x\right) & =\left( \mu\circ\left( I\otimes\Psi
^{L}\right) \circ\Delta\right) \left( x\right) =\mu\left( \left(
I\otimes\Psi^{L}\right) \underbrace{\left( \Delta\left( x\right) \right)
}_{\substack{\in x\otimes1+1\otimes x+\sum\limits_{k=1}^{n-1}H_{k}\otimes
H_{n-k}\\\text{(by Lemma 1.17)}}}\right) \\
& \in\mu\underbrace{\left( \left( I\otimes\Psi^{L}\right) \left(
x\otimes1+1\otimes x+\sum\limits_{k=1}^{n-1}H_{k}\otimes H_{n-k}\right)
\right) }_{\substack{=\left( I\otimes\Psi^{L}\right) \left( x\otimes
1\right) +\left( I\otimes\Psi^{L}\right) \left( 1\otimes x\right)
+\sum\limits_{k=1}^{n-1}\left( I\otimes\Psi^{L}\right) \left( H_{k}\otimes
H_{n-k}\right) \\\text{(since }I\otimes\Psi^{L}\text{ is }K\text{-linear)}%
}}\\
& =\mu\left( \underbrace{\left( I\otimes\Psi^{L}\right) \left(
x\otimes1\right) }_{=I\left( x\right) \otimes\Psi^{L}\left( 1\right)
}+\underbrace{\left( I\otimes\Psi^{L}\right) \left( 1\otimes x\right)
}_{=I\left( 1\right) \otimes\Psi^{L}\left( x\right) }+\sum\limits_{k=1}%
^{n-1}\underbrace{\left( I\otimes\Psi^{L}\right) \left( H_{k}\otimes
H_{n-k}\right) }_{=I\left( H_{k}\right) \otimes\Psi^{L}\left(
H_{n-k}\right) }\right) \\
& =\mu\left( I\left( x\right) \otimes\Psi^{L}\left( 1\right) +I\left(
1\right) \otimes\Psi^{L}\left( x\right) +\sum\limits_{k=1}^{n-1}I\left(
H_{k}\right) \otimes\Psi^{L}\left( H_{n-k}\right) \right) \\
& =\underbrace{I\left( x\right) }_{=x}\cdot\underbrace{\Psi^{L}\left(
1\right) }_{=1}+\underbrace{I\left( 1\right) }_{=1}\cdot\Psi^{L}\left(
x\right) +\sum\limits_{k=1}^{n-1}\underbrace{I\left( H_{k}\right) }%
_{=H_{k}}\cdot\underbrace{\Psi^{L}\left( H_{n-k}\right) }%
_{\substack{\subseteq H_{n-k}\\\text{(since }\Psi^{L}\text{ is a graded map)}%
}}\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\mu\text{ is the multiplication
map}\right) \\
& \subseteq x+\Psi^{L}\left( x\right) +\sum\limits_{k=1}^{n-1}H_{k}%
H_{n-k}=x+\Psi^{L}\left( x\right) +\sum\limits_{i=1}^{n-1}H_{i}H_{n-i}%
\end{align*}
(here, we renamed the index $k$ as $i$ in the sum), so that $\Psi^{L+1}\left(
x\right) -x-\Psi^{L}\left( x\right) \in\sum\limits_{i=1}^{n-1}H_{i}H_{n-i}%
$. Combined with $\Psi^{L+1}\left( x\right) -x-\Psi^{L}\left( x\right) \in
H_{n}$ (this is because $x\in H_{n}$ and thus $\Psi^{L}\left( x\right)
\in\Psi^{L}\left( H_{n}\right) \subseteq H_{n}$ and $\Psi^{L+1}\left(
x\right) \in\Psi^{L+1}\left( H_{n}\right) \subseteq H_{n}$), this yields%
\[
\Psi^{L+1}\left( x\right) -x-\Psi^{L}\left( x\right) \in\left(
\sum\limits_{i=1}^{n-1}H_{i}H_{n-i}\right) \cap H_{n}.
\]
In other words,%
\[
\Psi^{L+1}\equiv x+\underbrace{\Psi^{L}\left( x\right) }_{\substack{\equiv
Lx\operatorname{mod}\left( \sum\limits_{i=1}^{n-1}H_{i}H_{n-i}\right) \cap
H_{n}\\\text{(because (\ref{1.18}) holds for }\ell=L\text{)}}}\equiv
x+Lx=\left( L+1\right) x\operatorname{mod}\left( \sum\limits_{i=1}%
^{n-1}H_{i}H_{n-i}\right) \cap H_{n}.
\]
In other words, (\ref{1.18}) holds for $\ell=L+1$. Thus, the induction step is
done. The induction proof of (\ref{1.18}) is therefore complete.
So now we know that (\ref{1.18}) holds for every $\ell$. In other words,
Corollary 1.18 \textbf{(b)} is proven.
\end{proof}
Finally, a consequence of Proposition 1.4:
\begin{quote}
\textbf{Corollary 1.19.} Let $H$ be a bialgebra, a graded bialgebra or a Hopf
algebra. Assume that $H$ is commutative or cocommutative. Then the
characteristic operations (defined in Definition 1.2) satisfy $\left(
\Psi^{k}\right) ^{s}=\Psi^{k^{s}}$ (where $\left( \Psi^{k}\right) ^{s}$
means $\underbrace{\Psi^{k}\circ\Psi^{k}\circ...\circ\Psi^{k}}_{s\text{
times}}$) for all $k\in\mathbb{N}$ and $s\in\mathbb{N}$.
\end{quote}
\begin{proof}
[Proof of Corollary 1.19.]Fix some $k\in\mathbb{N}$. We will prove $\left(
\Psi^{k}\right) ^{s}=\Psi^{k^{s}}$ by induction over $s$:
\textit{Induction base:} We have $\left( \Psi^{k}\right) ^{0}=I=I^{\ast
1}=\Psi^{1}$ (because $\Psi^{1}$ was defined as $I^{\ast1}$) and $\Psi^{k^{0}%
}=\Psi^{1}$. Thus, $\left( \Psi^{k}\right) ^{0}=\Psi^{1}=\Psi^{k^{0}}$. In
other words, $\left( \Psi^{k}\right) ^{s}=\Psi^{k^{s}}$ holds for $s=0$.
This completes the induction base.
\textit{Induction step:} Let $S\in\mathbb{N}$. Assume that $\left( \Psi
^{k}\right) ^{s}=\Psi^{k^{s}}$ holds for $s=S$. We must then prove that
$\left( \Psi^{k}\right) ^{s}=\Psi^{k^{s}}$ also holds for $s=S+1$.
Since $\left( \Psi^{k}\right) ^{s}=\Psi^{k^{s}}$ holds for $s=S$, we have
$\left( \Psi^{k}\right) ^{S}=\Psi^{k^{S}}$. Applying Proposition 1.4 to
$l=k^{S}$, we get $\Psi^{k}\circ\Psi^{k^{S}}=\Psi^{k\cdot k^{S}}=\Psi
^{k^{S+1}}$ (since $k\cdot k^{S}=k^{S+1}$). Hence, $\left( \Psi^{k}\right)
^{S+1}=\Psi^{k}\circ\underbrace{\left( \Psi^{k}\right) ^{S}}_{=\Psi^{k^{S}}%
}=\Psi^{k}\circ\Psi^{k^{S}}=\Psi^{k^{S+1}}$. In other words, $\left( \Psi
^{k}\right) ^{s}=\Psi^{k^{s}}$ is proven to hold for $s=S+1$. This completes
the induction step. Thus, the induction proof of $\left( \Psi^{k}\right)
^{s}=\Psi^{k^{s}}$ is complete. In other words, Corollary 1.19 is proven.
\end{proof}
Now to the actual proof of Lemma 6.3:
\begin{proof}
[Proof of Lemma 6.3.]We WLOG assume that $H$ is a graded bialgebra. (The case
when $H$ is a Hopf algebra is analogous.)
We WLOG assume that $H$ is commutative. (The case when $H$ is cocommutative
can be obtained from the case when $H$ is commutative by dualization using
Proposition 3.9.)
Fix some $k\in\mathbb{Z}$ such that $k\not \equiv 0\operatorname{mod}p$. Thus,
$k$ is not divisible by $p$. Thus, by Fermat's Little Theorem, $p\mid
k^{p-1}-1$ (since $p$ is prime), so that
\begin{equation}
\left( k^{p-1}-1\right) \left( x\right) =0\ \ \ \ \ \ \ \ \ \ \text{for
every }x\in H \label{6.3.pf.0}%
\end{equation}
(since $K$ has characteristic $p$).
We must prove that every positive integer $n$ satisfies%
\begin{equation}
\rho_{n}\left( \left( \Psi^{k^{p^{n-1}}}\right) ^{p-1}\right) =\rho
_{n}\left( I\right) . \label{6.3.pf.1}%
\end{equation}
In fact, we will prove (\ref{6.3.pf.1}) by induction over $n$:
\textit{Induction base:} We have $p^{1-1}=p^{0}=1$, so that $\Psi^{k^{p^{1-1}%
}}=\Psi^{k^{1}}=\Psi^{k}$, so that $\left( \Psi^{k^{p^{1-1}}}\right)
^{p-1}=\left( \Psi^{k}\right) ^{p-1}=\Psi^{k^{p-1}}$ (by Corollary 1.19,
applied to $s=p-1$).
Every $x\in H_{1}$ satisfies%
\[
\Psi^{k^{p-1}}\left( x\right) \equiv k^{p-1}x\operatorname{mod}\left(
\sum_{i=1}^{1-1}H_{i}H_{1-i}\right) \cap H_{1}%
\]
(by Corollary 1.18 \textbf{(b)}, applied to $\ell=k^{p-1}$ and $n=1$). Since
$\underbrace{\left( \sum\limits_{i=1}^{1-1}H_{i}H_{1-i}\right) }_{=\left(
\text{empty sum}\right) =0}\cap H_{1}=0\cap H_{1}=0$, this becomes
$\Psi^{k^{p-1}}\left( x\right) \equiv k^{p-1}x\operatorname{mod}0$. Hence,
every $x\in H_{1}$ satisfies $\Psi^{k^{p-1}}\left( x\right) \equiv
k^{p-1}x\operatorname{mod}0$, so that%
\[
\Psi^{k^{p-1}}\left( x\right) =k^{p-1}x=x+\underbrace{\left( k^{p-1}%
x-x\right) }_{\substack{=\left( k^{p-1}-1\right) x=0\\\text{(by
(\ref{6.3.pf.0}))}}}=x.
\]
Thus, every $x\in H_{1}$ satisfies $\left( \Psi^{k^{p-1}}-I\right) \left(
x\right) =\underbrace{\Psi^{k^{p-1}}\left( x\right) }_{=x}%
-\underbrace{I\left( x\right) }_{=x}=x-x=0$ and thus $x\in
\operatorname*{Ker}\left( \Psi^{k^{p-1}}-I\right) $. Hence, $H_{1}%
\subseteq\operatorname*{Ker}\left( \Psi^{k^{p-1}}-I\right) $. In other
words, $\left( \Psi^{k^{p-1}}-I\right) \left( H_{1}\right) =0$.
On the other hand, Corollary 1.18 \textbf{(a)} (applied to $\ell=k^{p-1}$)
yields $\Psi^{k^{p-1}}\left( 1\right) =1$ and $\left( \Psi^{k^{p-1}%
}-I\right) \left( H_{0}\right) =0$.
Now, since $\rho_{1}\left( \Psi^{k^{p-1}}-I\right) $ is the restriction of
the map $\Psi^{k^{p-1}}-I$ to $\bigoplus\limits_{i=0}^{1}H_{i}$, we have
\begin{align*}
\operatorname{Im}\left( \rho_{1}\left( \Psi^{k^{p-1}}-I\right) \right) &
=\left( \Psi^{k^{p-1}}-I\right) \underbrace{\left( \bigoplus\limits_{i=0}%
^{1}H_{i}\right) }_{\substack{=H_{0}\oplus H_{1}=H_{0}+H_{1}\\\text{(since
direct sums are sums)}}}=\left( \Psi^{k^{p-1}}-I\right) \left( H_{0}%
+H_{1}\right) \\
& =\underbrace{\left( \Psi^{k^{p-1}}-I\right) \left( H_{0}\right) }%
_{=0}+\underbrace{\left( \Psi^{k^{p-1}}-I\right) \left( H_{1}\right)
}_{=0}\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\Psi^{k^{p-1}}-I\text{ is
}K\text{-linear}\right) \\
& =0+0=0,
\end{align*}
so that $\rho_{1}\left( \Psi^{k^{p-1}}-I\right) =0$. Thus,%
\begin{align*}
0 & =\rho_{1}\left( \Psi^{k^{p-1}}-I\right) =\rho_{1}\underbrace{\left(
\Psi^{k^{p-1}}\right) }_{=\left( \Psi^{k^{p^{1-1}}}\right) ^{p-1}}-\rho
_{1}\left( I\right) \ \ \ \ \ \ \ \ \ \ \left( \text{since }\rho_{1}\text{
is }K\text{-linear}\right) \\
& =\rho_{1}\left( \left( \Psi^{k^{p^{1-1}}}\right) ^{p-1}\right)
-\rho_{1}\left( I\right) ,
\end{align*}
so that $\rho_{1}\left( \left( \Psi^{k^{p^{1-1}}}\right) ^{p-1}\right)
=\rho_{1}\left( I\right) $. In other words, (\ref{6.3.pf.1}) holds for
$n=1$. This completes the induction base.
\textit{Induction step:} Let $N$ be a positive integer. Assume that
(\ref{6.3.pf.1}) holds for $n=N$. We must then prove that (\ref{6.3.pf.1})
also holds for $n=N+1$.
Since (\ref{6.3.pf.1}) holds for $n=N$, we have%
\[
\rho_{N}\left( \left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}\right) =\rho
_{N}\left( I\right) .
\]
Every $x\in\bigoplus\limits_{i=0}^{N}H_{i}$ satisfies $\left( \Psi
^{k^{p^{N-1}}}\right) ^{p-1}\left( x\right) =\left( \rho_{N}\left(
\left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}\right) \right) \left( x\right) $
(since $\rho_{N}\left( \left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}\right) $ is
the restriction of the map $\left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}$ to
$\bigoplus\limits_{i=0}^{N}H_{i}$) and $I\left( x\right) =\left( \rho
_{N}\left( I\right) \right) \left( x\right) $ (since $\rho_{N}\left(
I\right) $ is the restriction of the map $I$ to $\bigoplus\limits_{i=0}%
^{N}H_{i}$). Thus,%
\begin{equation}
\left(
\begin{array}
[c]{c}%
\text{every }x\in\bigoplus\limits_{i=0}^{N}H_{i}\text{ satisfies}\\
\left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}\left( x\right)
=\underbrace{\left( \rho_{N}\left( \left( \Psi^{k^{p^{N-1}}}\right)
^{p-1}\right) \right) }_{=\rho_{N}\left( I\right) }\left( x\right)
=\left( \rho_{N}\left( I\right) \right) \left( x\right) =I\left(
x\right) =x.
\end{array}
\right) . \label{6.3.pf.2a}%
\end{equation}
Let $E$ be the $K$-vector subspace $\bigoplus\limits_{i=0}^{N}H_{i}$ of $H$.
Then, (\ref{6.3.pf.2a}) rewrites as follows:%
\begin{equation}
\text{Every }x\in E\text{ satisfies }\left( \Psi^{k^{p^{N-1}}}\right)
^{p-1}\left( x\right) =x. \label{6.3.pf.2b}%
\end{equation}
On the other hand,
\begin{equation}
\text{every }j\in\left\{ 0,1,...,N\right\} \text{ satisfies }H_{j}\subseteq
E \label{6.3.pf.2c}%
\end{equation}
(because for every $j\in\left\{ 0,1,...,N\right\} $, the space $H_{j}$ is an
addend of the direct sum $\bigoplus\limits_{i=0}^{N}H_{i}$, and thus is
contained in $\bigoplus\limits_{i=0}^{N}H_{i}=E$).
We are now going to show that%
\begin{equation}
\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right) \left( H_{n}\right)
=0\ \ \ \ \ \ \ \ \ \ \text{for every }n\in\left\{ 0,1,...,N+1\right\} .
\label{6.3.pf.3}%
\end{equation}
\textit{Proof of (\ref{6.3.pf.3}).} Let $n\in\left\{ 0,1,...,N+1\right\} $
be arbitrary. Then, $n\leq N+1$, so that $n-1\leq N$.
Corollary 1.18 \textbf{(a)} (applied to $\ell=k^{p^{N}\left( p-1\right) }$)
yields $\Psi^{k^{p^{N}\left( p-1\right) }}\left( 1\right) =1$ and
\newline$\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right) \left(
H_{0}\right) =0$. Hence, (\ref{6.3.pf.3}) is already proven when $n=0$. Thus,
for the rest of the proof, we can WLOG assume that $n\neq0$. Assume this.
Then, $n$ is a positive integer.
Since $\Psi^{k^{p^{N-1}}}$ is a graded map, it satisfies $\Psi^{k^{p^{N-1}}%
}\left( H_{n}\right) \subseteq H_{n}$. Hence, it restricts to a $K$-linear
map $\beta:H_{n}\rightarrow H_{n}$ which satisfies $\left( \beta\left(
x\right) =\Psi^{k^{p^{N-1}}}\left( x\right) \text{ for every }x\in
H_{n}\right) $. Consider this $\beta$. Then,%
\begin{equation}
\beta^{\ell}\left( x\right) =\left( \Psi^{k^{p^{N-1}}}\right) ^{\ell
}\left( x\right) \ \ \ \ \ \ \ \ \ \ \text{for every }\ell\in\mathbb{N}%
\text{ and }x\in H_{n}. \label{6.3.pf.5}%
\end{equation}
\footnote{\textit{Proof of (\ref{6.3.pf.5}).} We will prove (\ref{6.3.pf.5})
by induction over $\ell$:
\par
\textit{Induction base:} We have $\underbrace{\beta^{0}}_{=\operatorname*{id}%
\nolimits_{H_{n}}}\left( x\right) =\operatorname*{id}\nolimits_{H_{n}%
}\left( x\right) =x=\underbrace{\operatorname*{id}}_{=\left( \Psi
^{k^{p^{N-1}}}\right) ^{0}}\left( x\right) =\left( \Psi^{k^{p^{N-1}}%
}\right) ^{0}\left( x\right) $. Thus, (\ref{6.3.pf.5}) holds for $\ell=0$.
The induction base is now complete.
\par
\textit{Induction step:} Let $L\in\mathbb{N}$. Assume that (\ref{6.3.pf.5})
holds for $\ell=L$. Now we must show that (\ref{6.3.pf.5}) also holds for
$\ell=L+1$.
\par
Let $x\in H_{n}$. We can apply (\ref{6.3.pf.5}) to $L$ and $\beta\left(
x\right) $ instead of $\ell$ and $x$ (since we assumed that (\ref{6.3.pf.5})
holds for $\ell=L$). This gives us $\beta^{L}\left( \beta\left( x\right)
\right) =\left( \Psi^{k^{p^{N-1}}}\right) ^{L}\left( \beta\left(
x\right) \right) $. Thus,%
\[
\beta^{L+1}\left( x\right) =\beta^{L}\left( \beta\left( x\right) \right)
=\left( \Psi^{k^{p^{N-1}}}\right) ^{L}\underbrace{\left( \beta\left(
x\right) \right) }_{=\Psi^{k^{p^{N-1}}}\left( x\right) }=\left(
\Psi^{k^{p^{N-1}}}\right) ^{L}\left( \Psi^{k^{p^{N-1}}}\left( x\right)
\right) =\left( \Psi^{k^{p^{N-1}}}\right) ^{L+1}\left( x\right) .
\]
Thus, we have proven that $\beta^{L+1}\left( x\right) =\left(
\Psi^{k^{p^{N-1}}}\right) ^{L+1}\left( x\right) $ for every $x\in H_{n}$.
In other words, (\ref{6.3.pf.5}) holds for $\ell=L+1$. This completes the
induction step. Thus, the induction proof of (\ref{6.3.pf.5}) is complete.}
Let $C$ be the $K$-vector space $H_{n}$. Let $D$ be the $K$-vector subspace
$\left( \sum\limits_{i=1}^{n-1}H_{i}H_{n-i}\right) \cap H_{n}$ of $C$. Since
$C=H_{n}$, the map $\beta$ is a map $C\rightarrow C$ (since $\beta$ is a map
$H_{n}\rightarrow H_{n}$).
Our goal is to apply Lemma 6.6. In order to do so, we must show that the
conditions (\ref{6.6.1}) and (\ref{6.6.2}) of Lemma 6.6 are satisfied. Let us
first prove that the condition (\ref{6.6.2}) is satisfied:
Since $k$ is not divisible by $p$, and since $p$ is prime, we have
$k^{p-1}\equiv1\operatorname{mod}p$ (by Fermat's Little Theorem). Thus,
$\left( k^{p-1}\right) ^{1+p+p^{2}+...+p^{N-2}}\equiv1^{1+p+p^{2}%
+...+p^{N-2}}=1\operatorname{mod}p$. Since $\left( k^{p-1}\right)
^{1+p+p^{2}+...+p^{N-2}}=k^{\left( p-1\right) \left( 1+p+p^{2}%
+...+p^{N-2}\right) }=k^{p^{N-1}-1}$ (because $\left( p-1\right) \left(
1+p+p^{2}+...+p^{N-2}\right) =p^{N-1}-1$), this becomes $k^{p^{N-1}-1}%
\equiv1\operatorname{mod}p$, so that $k^{p^{N}}=k\underbrace{k^{p^{N-1}-1}%
}_{\equiv1\operatorname{mod}p}\equiv k\operatorname{mod}p$. Hence, $p\mid
k^{p^{N}}-k$. Thus, $\left( k^{p^{N}}-k\right) x=0$ for every $x\in C$
(since $K$ has characteristic $p$).
Now, every $x\in C$ satisfies%
\begin{align*}
\beta\left( x\right) & =\Psi^{k^{p^{N-1}}}\left( x\right) \\
& \equiv k^{p^{N-1}}x\ \ \ \ \ \ \ \ \ \ \left( \text{by Corollary 1.18
\textbf{(b)} (applied to }\ell=k^{p^{N-1}}\text{), since }x\in C=H_{n}\right)
\\
& =kx+\underbrace{\left( k^{p^{N-1}}x-kx\right) }_{=\left( k^{p^{N}%
}-k\right) x=0}=kx\operatorname{mod}\left( \sum_{i=1}^{n-1}H_{i}%
H_{n-i}\right) \cap H_{n}.
\end{align*}
Since $\left( \sum\limits_{i=1}^{n-1}H_{i}H_{n-i}\right) \cap H_{n}=D$, this
rewrites as follows:%
\begin{equation}
\text{Every }x\in C\text{ satisfies }\beta\left( x\right) \equiv
kx\operatorname{mod}D. \label{6.3.pf.4}%
\end{equation}
Now to checking the condition (\ref{6.6.1}).
By Proposition 1.4, the characteristic operations of $H$ are algebra
homomorphisms (since $H$ is commutative); in particular, $\Psi^{k^{p^{N-1}%
\left( p-1\right) }}$ is an algebra homomorphism. Since $\left(
\Psi^{k^{p^{N-1}}}\right) ^{p-1}=\Psi^{k^{p^{N-1}\left( p-1\right) }}$ (by
Corollary 1.19, applied to $k^{p^{N-1}}$ and $p-1$ instead of $k$ and $s$),
this yields that $\left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}$ is a $K$-algebra homomorphism.
Now, let $x\in D$ be arbitrary. (Note that we are requiring $x\in D$ now, not
only $x\in C$.) Then,%
\begin{align*}
x & \in D=\left( \sum\limits_{i=1}^{n-1}H_{i}H_{n-i}\right) \cap
H_{n}\subseteq\sum\limits_{i=1}^{n-1}\underbrace{H_{i}}_{\substack{\subseteq
E\\\text{(since }i\leq n-1\leq N\text{,}\\\text{so that }i\in\left\{
0,1,...,N\right\} \text{,}\\\text{so that }H_{i}\subseteq E\\\text{(by
(\ref{6.3.pf.2c}), applied to }j=i\text{))}}}\ \ \underbrace{H_{n-i}%
}_{\substack{\subseteq E\\\text{(since }i\geq1\text{, thus }n-i\leq n-1\leq
N\text{,}\\\text{so that }n-i\in\left\{ 0,1,...,N\right\} \text{,}\\\text{so
that }H_{n-i}\subseteq E\\\text{(by (\ref{6.3.pf.2c}), applied to
}j=n-i\text{))}}}\\
& \subseteq\sum\limits_{i=1}^{n-1}EE\subseteq EE\ \ \ \ \ \ \ \ \ \ \left(
\text{since }EE\text{ is a }K\text{-vector space}\right) \\
& =\left( \text{the set of all }K\text{-linear combinations of elements of
the form }ee^{\prime}\text{ with }e\in E\text{ and }e^{\prime}\in E\right) .
\end{align*}
Hence, $x$ is a $K$-linear combination of elements of the form $ee^{\prime}$
with $e\in E$ and $e^{\prime}\in E$. In other words, we can write $x$ in the
form $x=\sum\limits_{i=1}^{I}\lambda_{i}e_{i}e_{i}^{\prime}$ for some
$I\in\mathbb{N}$, some elements $\lambda_{1}$, $\lambda_{2}$, $...$,
$\lambda_{I}$ of $K$, some elements $e_{1}$, $e_{2}$, $...$, $e_{I}$ of $E$,
and some elements $e_{1}^{\prime}$, $e_{2}^{\prime}$, $...$, $e_{I}^{\prime}$
of $E$. Consider this $I$, these $\lambda_{1}$, $\lambda_{2}$, $...$,
$\lambda_{I}$, these $e_{1}$, $e_{2}$, $...$, $e_{I}$, and these
$e_{1}^{\prime}$, $e_{2}^{\prime}$, $...$, $e_{I}^{\prime}$. Then,%
\begin{align*}
\beta^{p-1}\left( x\right) & =\left( \Psi^{k^{p^{N-1}}}\right)
^{p-1}\left( x\right) \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{6.3.pf.5})
(applied to }\ell=p-1\text{), since }x\in D\subseteq E=H_{n}\right) \\
& =\left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}\left( \sum\limits_{i=1}%
^{I}\lambda_{i}e_{i}e_{i}^{\prime}\right) \ \ \ \ \ \ \ \ \ \ \left(
\text{since }x=\sum\limits_{i=1}^{I}\lambda_{i}e_{i}e_{i}^{\prime}\right) \\
& =\sum\limits_{i=1}^{I}\lambda_{i}\underbrace{\left( \Psi^{k^{p^{N-1}}%
}\right) ^{p-1}\left( e_{i}\right) }_{\substack{=e_{i}\\\text{(by
(\ref{6.3.pf.2b}), applied to }e_{i}\text{ instead of }x\text{)}%
}}\underbrace{\left( \Psi^{k^{p^{N-1}}}\right) ^{p-1}\left( e_{i}^{\prime
}\right) }_{\substack{=e_{i}^{\prime}\\\text{(by (\ref{6.3.pf.2b}), applied
to }e_{i}^{\prime}\text{ instead of }x\text{)}}}\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( \Psi^{k^{p^{N-1}}}\right)
^{p-1}\text{ is a }K\text{-algebra homomorphism}\right) \\
& =\sum\limits_{i=1}^{I}\lambda_{i}e_{i}e_{i}^{\prime}=x.
\end{align*}
Now forget that we fixed $x$. We have thus shown that every $x\in D$ satisfies
$\beta^{p-1}\left( x\right) =x$. Combined with (\ref{6.3.pf.4}), this shows
that all conditions of Lemma 6.6 are satisfied. Hence, we can apply Lemma 6.6,
and obtain $\left( \beta^{p-1}\right) ^{p}=\operatorname*{id}\nolimits_{C}$.
Thus, $\operatorname*{id}\nolimits_{C}=\left( \beta^{p-1}\right) ^{p}%
=\beta^{\left( p-1\right) p}$. Hence, every $x\in C$ satisfies%
\begin{align*}
x & =\underbrace{\operatorname*{id}\nolimits_{C}}_{=\beta^{\left(
p-1\right) p}}\left( x\right) =\beta^{\left( p-1\right) p}\left(
x\right) =\left( \Psi^{k^{p^{N-1}}}\right) ^{\left( p-1\right) p}\left(
x\right) \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{6.3.pf.5}), applied to
}\ell=\left( p-1\right) p\right) \\
& =\Psi^{k^{p^{N-1}\left( p-1\right) p}}\left( x\right)
\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since Corollary 1.19}\\
\text{(applied to }k^{p^{N-1}}\text{ and }\left( p-1\right) p\text{ instead
of }k\text{ and }s\text{)}\\
\text{yields }\left( \Psi^{k^{p^{N-1}}}\right) ^{\left( p-1\right) p}%
=\Psi^{k^{p^{N-1}\left( p-1\right) p}}%
\end{array}
\right) \\
& =\Psi^{k^{p^{N}\left( p-1\right) }}\left( x\right)
\ \ \ \ \ \ \ \ \ \ \left( \text{since }p^{N-1}\left( p-1\right)
p=\underbrace{p^{N-1}p}_{=p^{N}}\left( p-1\right) =p^{N}\left( p-1\right)
\right)
\end{align*}
and thus%
\[
\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right) \left( x\right)
=\underbrace{\Psi^{k^{p^{N}\left( p-1\right) }}\left( x\right) }%
_{=x}-\underbrace{I\left( x\right) }_{=x}=x-x=0,
\]
so that $x\in\operatorname*{Ker}\left( \Psi^{k^{p^{N}\left( p-1\right) }%
}-I\right) $. In other words, $C\subseteq\operatorname*{Ker}\left(
\Psi^{k^{p^{N}\left( p-1\right) }}-I\right) $. Hence, $\left(
\Psi^{k^{p^{N}\left( p-1\right) }}-I\right) \left( C\right) =0$. Since
$C=H_{n}$, this becomes $\left( \Psi^{k^{p^{N}\left( p-1\right) }%
}-I\right) \left( H_{n}\right) =0$. This proves (\ref{6.3.pf.3}).
Now, $\rho_{N+1}\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right) $ is
the restriction of the map $\Psi^{k^{p^{N}\left( p-1\right) }}-I$ to
$\bigoplus\limits_{i=0}^{N+1}H_{i}$. Hence,
\begin{align*}
& \operatorname{Im}\left( \rho_{N+1}\left( \Psi^{k^{p^{N}\left(
p-1\right) }}-I\right) \right) \\
& =\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right) \underbrace{\left(
\bigoplus\limits_{i=0}^{N+1}H_{i}\right) }_{\substack{=\sum\limits_{i=0}%
^{N+1}H_{i}\\\text{(since direct sums are sums)}}}=\left( \Psi^{k^{p^{N}%
\left( p-1\right) }}-I\right) \sum\limits_{i=0}^{N+1}H_{i}\\
& =\sum\limits_{i=0}^{N+1}\underbrace{\left( \Psi^{k^{p^{N}\left(
p-1\right) }}-I\right) \left( H_{i}\right) }_{=0\text{ (by (\ref{6.3.pf.3}%
), applied to }n=i\text{)}}\ \ \ \ \ \ \ \ \ \ \left( \text{since }%
\Psi^{k^{p^{N}\left( p-1\right) }}-I\text{ is }K\text{-linear}\right) \\
& =\sum\limits_{i=0}^{N+1}0=0,
\end{align*}
so that $\rho_{N+1}\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right) =0$.
Since $\rho_{N+1}\left( \Psi^{k^{p^{N}\left( p-1\right) }}-I\right)
=\rho_{N+1}\left( \Psi^{k^{p^{N}\left( p-1\right) }}\right) -\rho
_{N+1}\left( I\right) $ (because $\rho_{N+1}$ is $K$-linear), this becomes
$\rho_{N+1}\left( \Psi^{k^{p^{N}\left( p-1\right) }}\right) -\rho
_{N+1}\left( I\right) =0$, so that $\rho_{N+1}\left( \Psi^{k^{p^{N}\left(
p-1\right) }}\right) =\rho_{N+1}\left( I\right) $.
Since $\left( \Psi^{k^{p^{N+1-1}}}\right) ^{p-1}=\left( \Psi^{k^{p^{N}}%
}\right) ^{p-1}=\Psi^{k^{p^{N}\left( p-1\right) }}$ (by Corollary 1.19,
applied to $k^{p^{N}}$ and $p-1$ instead of $k$ and $s$), we have $\rho
_{N+1}\left( \left( \Psi^{k^{p^{N+1-1}}}\right) ^{p-1}\right) =\rho
_{N+1}\left( \Psi^{k^{p^{N}\left( p-1\right) }}\right) =\rho_{N+1}\left(
I\right) $. Thus, (\ref{6.3.pf.1}) holds for $n=N+1$. This completes the
induction step. Thus, the induction proof of (\ref{6.3.pf.1}) is completed,
and with it the proof of Lemma 6.3.
\end{proof}
\parbreak
\textbf{Page 1086, Proposition 6.4:} I think the condition that
\textquotedblleft$k\not \equiv 0\ \left[ p\right] ,$ $k\not \equiv
1\ \left[ p\right] $\textquotedblright\ has to be replaced by the (stronger)
condition that $k$ be a primitive root modulo $p$. Otherwise, the sum
$H^{\left( 1\right) }\oplus...\oplus H^{\left( p-1\right) }$ won't be a
well-defined direct sum anymore (since some of the addends will be equal).
\parbreak
\begin{center}
\textbf{Additional references}
\end{center}
[BF] Andrea Bonfiglioli and Roberta Fulci, \textit{A New Proof of the
Existence of Free Lie Algebras and an Application}, ISRN Geometry, Volume 2011
(2011), Article ID 126358.\newline%
\url{http://www.hindawi.com/journals/isrn.algebra/2011/247403/}
[C] Pierre Cartier, \textit{A primer of Hopf algebras}, IHES, September
2006.\newline\url{http://www.math.osu.edu/~kerler.2/VIGRE/InvResPres-Sp07/Cartier-IHES.pdf}
[DG1] \textit{6th QEDMO 2009, Problem 4 (the Cauchy identity)}, with Solution
by Darij Grinberg.\newline\url{http://www.cip.ifi.lmu.de/~grinberg/QEDMO6P4long.pdf}
[M] Dominique Manchon, \textit{Hopf algebras, from basics to applications to
renormalization}, Revised and updated version, may 2006,
arXiv:math/0408405v2.\newline\url{http://arxiv.org/abs/math/0408405v2}
\end{document}