\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{framed}
\usepackage{amsmath}
\usepackage{comment}
\usepackage{color}
\usepackage{hyperref}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{amsthm}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Wednesday, July 17, 2019 10:56:52}
%TCIDATA{SuppressPackageManagement}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\theoremstyle{definition}
\newtheorem{theo}{Theorem}[section]
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[theo]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{condition}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{Question}
\newenvironment{algorithm}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{conclusion}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{exmp}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\fi
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\newcommand{\kk}{\mathbf{k}}
\newcommand{\id}{\operatorname{id}}
\newcommand{\ev}{\operatorname{ev}}
\newcommand{\Comp}{\operatorname{Comp}}
\newcommand{\bk}{\mathbf{k}}
\newcommand{\Nplus}{\mathbb{N}_{+}}
\newcommand{\NN}{\mathbb{N}}
\let\sumnonlimits\sum
\let\prodnonlimits\prod
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\DeclareSymbolFont{bbold}{U}{bbold}{m}{n}
\DeclareSymbolFontAlphabet{\mathbbold}{bbold}
\setlength\textheight{22.5cm}
\setlength\textwidth{15cm}
\ihead{Errata to ``Introduction to Kac-Moody Lie algebras''}
\ohead{\today}
\begin{document}
\begin{center}
\textbf{Introduction to Kac-Moody Lie algebras}
\textit{Nicolas Perrin}
\url{http://www.hcm.uni-bonn.de/?id=961}
version of 2011
\textbf{Errata and addenda by Darij Grinberg}
\bigskip
\end{center}
%\setcounter{section}{}
\subsection{Errata}
I am not an expert in Lie theory; hence, please approach the corrections below
with a critical eye.
\begin{itemize}
\item \textbf{Definition 2.2.1:} Replace $x\otimes y-z\otimes x-\left[
x,y\right] $ by $x\otimes y-y\otimes x-\left[ x,y\right] $. (Could this be
due to the switched \textquotedblleft$y$\textquotedblright\ and
\textquotedblleft$z$\textquotedblright\ keys on the German keyboard layout?)
\item \textbf{Theorem 2.2.4:} The product $x_{1}^{a_{1}}\cdots x_{n}^{a_{n}}$
should be $e_{1}^{a_{1}}\cdots e_{n}^{a_{n}}$ here.
\item \textbf{Proposition 3.1.2:} Replace $\ell$ by $n$ (in ``size $\ell$'').
\item \textbf{Proof of Proposition 3.1.2:} ``These is easy'' should be ``This
is easy''.
\item \textbf{Proof of Proposition 3.1.2:} I don't understand the part of this
proof that begins with ``For this, we may assume that $A_{3}=0$ and $A_{4}%
=0$'' and ends with ``and the matrix $C$ is non degenerate''. Why can we
assume that $A_{3}=0$ and $A_{4}=0$ without changing things, and why do we
have the $\left( \operatorname*{Vect}\left( ...\right) \right) ^{\perp
}=\operatorname*{Vect}\left( ...\right) $ relations (particularly the second one)?
(Here is how I would show that the matrix $C$ is nondegenerate: Since
$\alpha_{1},\alpha_{2},...,\alpha_{n}$ are linearly independent and
$\left\langle \cdot,\cdot\right\rangle $ is a nondegenerate bilinear form, the
block matrix $\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
A_{3} & A_{4}\\
X_{1} & X_{2}%
\end{array}
\right) $ has rank $n$. But each row of the \textquotedblleft middle
part\textquotedblright\ (by this I mean the $\left(
\begin{array}
[c]{cc}%
A_{3} & A_{4}%
\end{array}
\right) $ part) of this matrix is a linear combination of the rows of the
\textquotedblleft upper part\textquotedblright\ (the $\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}%
\end{array}
\right) $ part) (because $\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
A_{3} & A_{4}%
\end{array}
\right) =\operatorname*{rank}A=\ell=\operatorname*{rank}A_{1}\leq
\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}%
\end{array}
\right) $). Hence, by performing row operations to the matrix $\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
A_{3} & A_{4}\\
X_{1} & X_{2}%
\end{array}
\right) $, we can replace the $\left(
\begin{array}
[c]{cc}%
A_{3} & A_{4}%
\end{array}
\right) $ part by zeroes.\footnote{Is this what you mean by \textquotedblleft
assume that $A_{3}=0$ and $A_{4}=0$ \textquotedblright?} Since row operations
don't change the rank, this yields that $\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
0 & 0\\
X_{1} & X_{2}%
\end{array}
\right) =\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
A_{3} & A_{4}\\
X_{1} & X_{2}%
\end{array}
\right) $. Thus,%
\[
n=\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
A_{3} & A_{4}\\
X_{1} & X_{2}%
\end{array}
\right) =\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
0 & 0\\
X_{1} & X_{2}%
\end{array}
\right) =\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
X_{1} & X_{2}%
\end{array}
\right) .
\]
Now,%
\begin{align*}
\operatorname*{rank}C & =\operatorname*{rank}\left(
\begin{array}
[c]{ccc}%
A_{1} & A_{2} & 0\\
A_{3} & A_{4} & I_{n-\ell}\\
X_{1} & X_{2} & 0
\end{array}
\right) =\operatorname*{rank}\left(
\begin{array}
[c]{ccc}%
A_{1} & A_{2} & 0\\
X_{1} & X_{2} & 0\\
A_{3} & A_{4} & I_{n-\ell}%
\end{array}
\right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since permutations of rows don't change
the rank of a matrix}\right) \\
& =\underbrace{\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
A_{1} & A_{2}\\
X_{1} & X_{2}%
\end{array}
\right) }_{=n}+\underbrace{\operatorname*{rank}\left( I_{n-\ell}\right)
}_{=n-\ell}\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since any block matrix of the form }\left(
\begin{array}
[c]{cc}%
U & 0\\
V & I_{m}%
\end{array}
\right) \\
\text{satisfies }\operatorname*{rank}\left(
\begin{array}
[c]{cc}%
U & 0\\
V & I_{m}%
\end{array}
\right) =\operatorname*{rank}U+m
\end{array}
\right) \\
& =n+n-\ell=2n-\ell,
\end{align*}
so that $C$ is nondegenerate, qed.)
\item \textbf{Definition 3.1.4:} You say: \textquotedblleft Any matrix can be
decomposed as a direct sum of indecomposable matrix\textquotedblright. Maybe
you should add \textquotedblleft(up to simultaenous permutation of rows and
columns)\textquotedblright\ here.
\item \textbf{Definition 3.1.5:} Maybe add \textquotedblleft for all
$h\in\mathfrak{h}$ and $h^{\prime}\in\mathfrak{h}$\textquotedblright\ to the
defining relations.
\item \textbf{Proof of Theorem 3.1.6:} It would be better to explicitly
distinguish between the vector space $\mathfrak{h}$ which belongs to the
realization of $A$, and the subspace $\mathfrak{h}$ of the Lie algebra
$\widetilde{\mathfrak{g}}\left( A\right) $. It is clear that there is a
canonical surjection from the former space to the latter space, but it is not
a priori clear that this surjection is a bijection (i. e., that the relations
given in Definition 3.1.5 don't force some elements of $\mathfrak{h}$ to
become zero). This does not become clear until the following argument in your proof:
\textquotedblleft Assume there is a relation $n_{-}+h+n_{-}=0$ with $n_{-}%
\in\widetilde{\mathfrak{n}}_{-}$, $h\in\mathfrak{h}$ and $n_{+}\in
\widetilde{\mathfrak{n}}_{+}$ [...] so that $h=0$\textquotedblright
The $h$ in the beginning of this argument means an element of
$\widetilde{\mathfrak{g}}\left( A\right) $, whereas the $h$ in the end of
this argument means a corresponding element of the original vector space
$\mathfrak{h}$. Hence, this argument actually shows that if some element $h$
of our original vector space $\mathfrak{h}$ becomes $0$ in
$\widetilde{\mathfrak{g}}\left( A\right) $, then it must have been $0$ to
begin with. This justifies the identification of $\mathfrak{h}$ with the image
of $\mathfrak{h}\rightarrow\widetilde{\mathfrak{g}}\left( A\right) $. It
would help a lot if you make this explicit. (Maybe even in the theorem itself,
not just in the proof...)
\item \textbf{Proof of Theorem 3.1.6:} In the formula%
\begin{align*}
\left( he_{i}-e_{i}h\right) \left( a\otimes v_{j}\right) &
=\text{[...]}\\
& =\text{[...]}\\
& =\text{[...]}=\left\langle \alpha_{i},h\right\rangle e_{i}\left( a\otimes
v_{j}\right)
\end{align*}
(the parts that I have omitted are correct), replace both occurrences of
$a\otimes v_{j}$ by $v_{j}\otimes a$.
\item \textbf{Proof of Theorem 3.1.6:} When you say ``there is a surjective
map $U\left( \mathfrak{n}_{-}\right) \rightarrow T\left( V\right) $'', you
might want to add ``and this map is an algebra homomorphism'' (since otherwise
the next sentence is not clear).
\item \textbf{Theorem 3.1.6 and its Proof:} It seems that you write
$\mathfrak{n}_{+}$ for $\widetilde{\mathfrak{n}}_{+}$ (and, similarly,
$\mathfrak{n}_{-}$ for $\widetilde{\mathfrak{n}}_{-}$) several times here (e.
g., in part (iv) of the theorem).
\item \textbf{Proof of Theorem 3.1.6:} When you write $n_{-}\left( 1\right)
=\left\langle \alpha,h\right\rangle 1$, I think you mean $n_{-}\left(
1\right) =-\left\langle \alpha,h\right\rangle 1$.
\item \textbf{Proof of Theorem 3.1.6:} In the formula%
\[
\mathfrak{n}_{\pm}=\bigoplus\limits_{\alpha\in Q_{+},\ \alpha\neq
0}\widetilde{\mathfrak{g}}_{\pm},
\]
the $\widetilde{\mathfrak{g}}_{\pm}$ should be $\widetilde{\mathfrak{g}}%
_{\pm\alpha}$.
\item \textbf{Proof of Theorem 3.1.6:} When you say ``we have the inequality
$\dim\widetilde{\mathfrak{g}}_{\alpha}\leq n^{\left\vert \operatorname*{ht}%
\alpha\right\vert }$\ \ \ \ '', you might want to add ``for $\alpha\neq0$''.
\item \textbf{Definition 3.1.8:} Add a whitespace after ``(i)''.
\item \textbf{Definition 3.1.8:} Replace ``is it also contained'' by ``it is
also contained''.
\item \textbf{Between Definition 3.1.8 and Definition 3.1.9:} When you say
``We have the estimate $\dim\mathfrak{g}_{\alpha}0$ and lie in $\mathfrak{i}$ whenever $t=0$.
\item \textbf{Proof of Lemma 3.2.7:} Replace ``$a=0$'' by ``$x=0$''.
\item \textbf{Proof of Proposition 3.2.6 (continued after proof of Lemma
3.2.7):} Replace ``Lemme 3.2.5'' by ``Proposition 3.2.5 (ii)''. In the formula
directly below this, replace $f_{i}^{a_{i,j}}$ by $f_{i}^{-a_{i,j}}$.
\item \textbf{Proof of Proposition 3.2.6 (continued after proof of Lemma
3.2.7):} I think that $-a_{i,j}\left( \operatorname*{ad}f_{i}\right)
^{-a_{i,j}}\left( f_{i}\right) $ should be $a_{j,i}\left(
\operatorname*{ad}f_{i}\right) ^{-a_{i,j}}\left( f_{i}\right) $ here. Now
you use the ``$a_{i,j}=0$ $\Longrightarrow$ $a_{j,i}=0$'' condition from
Definition 3.2.1 to see that this vanishes. (But I may very well be mistaken.)
\item \textbf{Proof of Lemma 3.2.8:} By ``block matrix'' you mean
``block-diagonal matrix''.
\item \textbf{Proposition 3.2.9:} In part (ii), replace $g^{\prime}\left(
A\right) $ by $\mathfrak{g}^{\prime}\left( A\right) $.
\item \textbf{Proposition 3.2.9:} The conclusion of part (iii) should not be
``$\mathfrak{g}^{\prime}\left( A\right) /\mathfrak{c}=0$'' but it should be
``$\mathfrak{g}^{\prime}\left( A\right) /\mathfrak{c}$ is simple''.
\item \textbf{Proof of Proposition 3.2.9:} You write: ``If for any $\alpha$ we
have $\mathfrak{i}\cap\mathfrak{g}_{\alpha}=0$ then $\mathfrak{i}%
\subset\mathfrak{h}$''. Here, it would be better to replace ``any'' by
``all'', since ``any'' could also mean ``some''. Also, again you should say
that you are only considering $\alpha\neq0$.
\item \textbf{Proof of Proposition 3.2.9:} You write: ``We can therefore take
$\alpha$ a root minimal''. By ``minimal'' you mean ``minimal among the roots
in $Q_{+}$'' (not all of $Q$).
\item \textbf{Proof of Proposition 3.2.9:} Replace ``colinear'' by ``collinear''.
\item \textbf{Proof of Proposition 3.2.9:} It would be better not to speak of
``the center'' here, but just say $\mathfrak{c}$, because ``the center'' might
also mean the center of $\mathfrak{g}^{\prime}\left( A\right) $ (and I am
not sure whether this is the same center).
\item \textbf{Proof of Proposition 3.2.9:} Replace ``where $n\in
\mathfrak{n}_{-}\oplus\mathfrak{n}_{+}$ and $h\in\mathfrak{h}$'' by ``where
$n\in\mathfrak{n}_{-}\oplus\mathfrak{n}_{+}$ and $h\in\mathfrak{h}^{\prime}$''.
\item \textbf{Proof of Proposition 3.2.9:} At the moment when you write ``By
minimality, this implies that $\gamma=\alpha_{i}$'', I am losing track of what
you are doing. However, it is not hard to complete the proof from here:
Since $\left[ f_{i},x\right] \in\mathfrak{i}$ and $\left[ f_{i},x\right]
_{\gamma-\alpha_{i}}\neq0$, we get a contradiction to the minimality of
$\gamma$ unless either $\left[ f_{i},x\right] _{0}\neq0$ or $\left[
f_{i},x\right] \in\mathfrak{c}$. So we conclude that either $\left[
f_{i},x\right] _{0}\neq0$ or $\left[ f_{i},x\right] \in\mathfrak{c}$. In
the former case, we must have $x_{\alpha_{i}}\neq0$ (since $\left[
f_{i},x_{\alpha_{i}}\right] =\left[ f_{i},x\right] _{0}\neq0$). In the
latter case, we must have $x_{\alpha_{i}}\neq0$ as well (since $\left[
f_{i},x\right] \in\mathfrak{c}\subseteq\mathfrak{h}$ and thus $\left[
f_{i},x\right] =\left[ f_{i},x\right] _{0}$, so that $\left[
f_{i},x_{\alpha_{i}}\right] =\left[ f_{i},x\right] _{0}\neq0$). Hence, in
both cases, we have $x_{\alpha_{i}}\neq0$. Thus, $x_{\alpha_{i}}$ is a nonzero
scalar multiple of $e_{i}$ (since $x_{\alpha_{i}}\in\mathfrak{g}_{\alpha_{i}}%
$). Hence, $\left[ f_{i},x_{\alpha_{i}}\right] $ is a nonzero scalar
multiple of $\left[ f_{i},e_{i}\right] =-\alpha_{i}^{\vee}$, therefore a
nonzero scalar multiple of $\alpha_{i}^{\vee}$. Since $\left[ f_{i}%
,x_{\alpha_{i}}\right] =\left[ f_{i},x\right] _{0}$, this shows that
$\left[ f_{i},x\right] _{0}$ is a nonzero scalar multiple of $\alpha
_{i}^{\vee}$. Since $\left[ f_{i},x\right] _{0}\in\mathfrak{i}$ (because
$\left[ f_{i},x\right] \in\mathfrak{i}$ and by Lemma 3.1.7), this yields
$\alpha_{i}^{\vee}\in\mathfrak{i}$. Since $\alpha_{i}^{\vee}\notin%
\mathfrak{c}$ (this is easy to prove using Proposition 3.1.12 and the fact
that $A$ is an indecomposable Cartan matrix), this yields that there exists an
element $h\in\mathfrak{i}\cap\mathfrak{h}$ not in $\mathfrak{c}$ (namely,
$h=\alpha_{i}^{\vee}$). As you already have shown above, this concludes the proof.
\item \textbf{Lemma 4.1.2:} In part (i), replace ``$x$, $y$ and $z$'' by ``$x$
and $y$''.
\item \textbf{Proof of Lemma 4.1.2:} You write: ``Applying it to the adjoint
representation gives the result.'' Why? If you apply the formula%
\[
\left( \operatorname*{ad}x\right) ^{k}\left[ y,z\right] =\sum
\limits_{i=0}^{k}\dbinom{k}{i}\left[ \left( \operatorname*{ad}x\right)
^{i}y,\left( \operatorname*{ad}x\right) ^{k-i}z\right]
\ \ \ \ \ \ \ \ \ \ \text{in }U\left( \mathfrak{g}\right)
\]
to the adjoint representation, you get%
\[
\operatorname*{ad}\left( \left( \operatorname*{ad}x\right) ^{k}\left[
y,z\right] \right) =\operatorname*{ad}\left( \sum\limits_{i=0}^{k}%
\dbinom{k}{i}\left[ \left( \operatorname*{ad}x\right) ^{i}y,\left(
\operatorname*{ad}x\right) ^{k-i}z\right] \right)
\ \ \ \ \ \ \ \ \ \ \text{in }\mathfrak{g},
\]
which does not immediately yield $\left( \operatorname*{ad}x\right)
^{k}\left[ y,z\right] =\sum\limits_{i=0}^{k}\dbinom{k}{i}\left[ \left(
\operatorname*{ad}x\right) ^{i}y,\left( \operatorname*{ad}x\right)
^{k-i}z\right] $ in $\mathfrak{g}$ unless we know that $\mathfrak{g}$ has
trivial center. Maybe you wanted to use Corollary 2.2.5 (i), but then you
wouldn't need the adjoint representation. Am I understanding something wrong?
\item \textbf{Proof of Corollary 4.1.3:} You write: ``In particular both parts
of the equality are well defined.'' Why is the left hand side well-defined?
\item \textbf{Lemma 4.1.4:} In part (ii), replace ``(resp. locally nilpotent
element)'' by ``(resp. locally nilpotent) element''.
\item \textbf{Proof of Lemma 4.1.5:} Replace ``$t\in C$'' by ``$t\in
\mathbb{C}$''.
\item \textbf{Proof of Lemma 4.1.5:} There are some opening brackets missing
and/or some closing brackets too much in certain equations in this proof. For
example: $\exp\left( \operatorname*{ad}y\right) )\left( x\right) )$.
\item \textbf{Corollary 4.1.7:} Replace ``and locally nilpotent'' by ``are
locally nilpotent''.
\item \textbf{Proposition 4.2.2:} Replace ``integral'' by ``integrable''.
\item \textbf{Proposition 4.2.2:} Replace ``$g_{\left( i\right) }$'' by
``$\mathfrak{g}_{\left( i\right) }$''.
\item \textbf{Proposition 6.1.2:} In part (i), replace ``symmetrisable
generalised Cartan matrix'' by ``a symmetrisable generalised Cartan matrix''.
\item \textbf{Proposition 6.1.2:} In part (ii), replace ``symmetric'' by ``symmetrisable''.
\item \textbf{Proposition 6.1.2:} In part (iii), replace ``symmetric
indecomposable'' by ``symmetrisable indecomposable''.
\item \textbf{Proof of Proposition 6.1.2:} Replace ``These solutions'' by
``These equations''.
\item \textbf{Proof of Proposition 6.1.2:} Replace ``Furthermore because all
the $a_{i_{j},i_{j+1}}$ are non negative'' by ``Furthermore because all the
$a_{i_{j},i_{j+1}}$ and $a_{i_{j+1},i_{j}}$ are negative''.
\item \textbf{Proof of Proposition 6.1.2:} Replace ``me may assume'' by ``we
may assume''.
\item \textbf{Proposition 6.1.3:} Replace ``for all sequence $i_{1}\cdots
i_{k}$'' by ``for all sequences $\left( i_{1},\cdots,i_{k}\right) $''.
\item Your use of American English vs. British English (``realization'' vs.
``realisation'') is inconsistent.
\item \textbf{Proposition 6.2.1:} The sentence ``Let $A$ be symmetrizable and
indecomposable.'' could be better placed at the very beginning of this
proposition, not inside part (i), because it concerns all three parts (i),
(ii) and (iii).
\item \textbf{Proposition 6.2.1:} In part (ii), replace ``resctriction'' by ``restriction''.
\item \textbf{Proof of Proposition 6.2.1:} Replace ``Let
$D=\operatorname*{Diag}\left( \epsilon_{i}\right) $ be a diagonal matrix''
by ``Let $D=\operatorname*{Diag}\left( \epsilon_{i}\right) $ be a
nondegenerate diagonal matrix''.
\item \textbf{Proof of Proposition 6.2.1:} When you write ``$\left(
\alpha_{i}^{\vee},\alpha_{j}^{\vee}\right) =\left\langle \alpha_{i}%
,\alpha_{j}^{\vee}\right\rangle \epsilon_{i}=\left\langle \alpha_{j}^{\vee
},\alpha_{i}\right\rangle \epsilon_{j}=\left( \alpha_{j}^{\vee},\alpha
_{i}^{\vee}\right) $'', you should replace $\left\langle \alpha_{j}^{\vee
},\alpha_{i}\right\rangle $ by $\left\langle \alpha_{j},\alpha_{i}^{\vee
}\right\rangle $.
\item \textbf{Proof of Proposition 6.2.1:} Replace ``$\left\langle \sum
_{i}c_{i}\epsilon_{i}\alpha_{i}^{\vee},h^{\prime}\right\rangle =0$'' by
``$\left\langle \sum_{i}c_{i}\epsilon_{i}\alpha_{i},h^{\prime}\right\rangle
=0$''.
\item \textbf{Proof of Proposition 6.2.1:} In your proof of $\left(
s_{i}\left( h\right) ,s_{i}\left( h^{\prime}\right) \right) =\left(
h,h^{\prime}\right) $, you should replace $\left\langle \alpha_{i},h^{\prime
}\right\rangle \left\langle \alpha_{i},h^{\prime}\right\rangle $ by
$\left\langle \alpha_{i},h\right\rangle \left\langle \alpha_{i},h^{\prime
}\right\rangle $. (This typo appears twice.) Also, replace $\left\langle
\alpha_{i},h^{\prime}\right\rangle \left\langle h^{\prime},\alpha
_{i}\right\rangle $ by $\left\langle \alpha_{i},h^{\prime}\right\rangle
\left\langle h,\alpha_{i}\right\rangle $.
\item \textbf{Proof of Proposition 6.2.1:} Replace ``Let us set $\epsilon
_{i}=\left( \left( \alpha_{i},\alpha_{i}^{\vee}\right) \right) /2$'' by
``Let us set $\epsilon_{i}=\left( \left( \alpha_{i}^{\vee},\alpha_{i}^{\vee
}\right) \right) /2$''.
\item \textbf{Remark 6.2.3:} I do not see why $\left( \alpha_{i},\alpha
_{i}\right) >0$ should hold unless we choose the $\epsilon_{i}$ positive in
the construction of the form $\left( \cdot,\cdot\right) $.
\item \textbf{Proof of Theorem 6.2.5:} Replace ``For $\alpha=\sum_{i}%
\alpha_{i}$'' by ``For $\alpha=\sum_{i}k_{i}\alpha_{i}$''.
\item \textbf{Proof of Theorem 6.2.5:} I think what you call $\left\vert
\alpha\right\vert $ here is what you called $\operatorname*{ht}\alpha$ in
Chapter 3.
\item \textbf{Proof of Theorem 6.2.5:} You say: ``this proves the invariance
since the other conditions all vanish''. This is not exactly the case (for
example, the condition $\left( \left[ e_{i},h\right] ,f_{j}\right)
=\left( e_{i},\left[ h,f_{j}\right] \right) $ does not vanish, nor does
the condition $\left( \left[ f_{j},e_{i}\right] ,h\right) =\left(
f_{j},\left[ e_{i},h\right] \right) $). Still it is probably fair to say
that the other conditions are similarly proven.
\item \textbf{Proof of Theorem 6.2.5:} You write: ``where all the elements
$a$, $b$, $c$ and $d$ as well as the brackets $\left[ \left[ a,b\right]
,c\right] $, $\left[ b,\left[ c,d\right] \right] $, $\left[ \left[
a,c\right] ,b\right] $, $\left[ a,\left[ b,c\right] \right] $, $\left[
a,c\right] $, $\left[ b,d\right] $, $\left[ \left[ b,c\right] ,d\right]
$ and $\left[ c,\left[ b,d\right] \right] $ are in $\mathfrak{g}\left(
N-1\right) $''. This condition is not enough (for the proof at least); you
also need $\left[ b,c\right] $ to lie in $\mathfrak{g}\left( N-1\right) $.
\item \textbf{Proof of Theorem 6.2.5:} Replace $\left( \left[ \left[
s_{j},t_{j}\right] ,u_{i}\right] ,v_{j}\right) $ by $\left( \left[
\left[ s_{j},t_{j}\right] ,u_{i}\right] ,v_{i}\right) $.
\item \textbf{Proof of Theorem 6.2.5:} You write: ``Then we have to define
$\left( x,y\right) $ and $\left( y,x\right) $ for $x\in\mathfrak{g}_{N}$
and $y\in\mathfrak{g}_{-N}$''. But you define only $\left( x,y\right) $.
This, of course, is easy to fix: just define $\left( y,x\right) $ to mean
$\left( x,y\right) $. As a consequence of this definition, we see by
induction that the form $\left( \cdot,\cdot\right) $ on $\mathfrak{g}\left(
N\right) \times\mathfrak{g}\left( N\right) $ is symmetric.
\item \textbf{Proof of Theorem 6.2.5:} You write:
``For the invariance, we still need to prove that for $x\in\mathfrak{g}_{N}$,
for $y\in\mathfrak{g}_{-N}$ and for all $h$ we have the relations%
\[
\left( x,\left[ h,y\right] \right) =\left( \left[ x,h\right] ,y\right)
\ \ \ \ \ \ \ \ \ \ \text{and}\ \ \ \ \ \ \ \ \ \ \left( \left[ x,y\right]
,h\right) =\left( x,\left[ y,h\right] \right) .
\]
''
This is not enough. First of all, I think you need also to prove the relation
$\left( h,\left[ x,y\right] \right) =\left( \left[ h,x\right]
,y\right) $ (but that's easy: it follows from $\left( \left[ x,y\right]
,h\right) =\left( x,\left[ y,h\right] \right) $ using the symmetry of
$\left( \cdot,\cdot\right) $ and the antisymmetry of $\left[ \cdot
,\cdot\right] $). Secondly, you also need to show that $\left( \left[
x,y\right] ,z\right) =\left( x,\left[ y,z\right] \right) $ holds
whenever one of the vectors $x,y,z$ lies in either $\mathfrak{g}_{N}$ or
$\mathfrak{g}_{-N}$ and the other two lie in $\mathfrak{g}\left( N-1\right)
$. It seems to me that the latter part is easy, but I am not sure whether it
immediately follows from the definition of $\left( x,y\right) $ as
$\sum\limits_{i}\left( \left[ x,u_{i}\right] ,v_{i}\right) =\sum
\limits_{j}\left( s_{j},\left[ t_{j},y\right] \right) $ (at least it does
not follow without some rewriting using the symmetry of $\left( \cdot
,\cdot\right) $ and the antisymmetry of $\left[ \cdot,\cdot\right] $; and
even then there are a lot of cases to consider).
\item \textbf{Proof of Theorem 6.2.5:} In your proof of $\left( x,\left[
h,y\right] \right) =\left( \left[ x,h\right] ,y\right) $ (for
$x\in\mathfrak{g}_{N}$, for $y\in\mathfrak{g}_{-N}$ and for all $h$), you
should replace all $\sum_{i}$ signs by $\sum_{j}$ signs.
\end{itemize}
\end{document}