\documentclass[12pt,final,notitlepage,onecolumn]{article}%
\usepackage[all,cmtip]{xy}
\usepackage{lscape}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{color}
\usepackage{comment}
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{graphicx}%
\setcounter{MaxMatrixCols}{30}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{CSTFile=LaTeX article (bright).cst}
%TCIDATA{Created=Sat Mar 27 17:33:36 2004}
%TCIDATA{LastRevised=Monday, May 01, 2017 00:26:26}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%TCIDATA{}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\definecolor{violet}{RGB}{143,0,255}
\definecolor{forestgreen}{RGB}{34, 100, 34}
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\voffset=-2.5cm
\hoffset=-2.5cm
\setlength\textheight{24cm}
\setlength\textwidth{15.5cm}
\begin{document}
\begin{center}
\textbf{Lecture Notes on Cherednik Algebras}
\textit{Pavel Etingof and Xiaoguang Ma}
\href{https://arxiv.org/abs/1001.0432v4}{arXiv:1001.0432v4 (version 4, 19 Apr
2010)}
\textbf{Errata and questions by Darij Grinberg}
\bigskip
\end{center}
These are corrections and comments to the ``Lecture Notes on Cherednik
Algebras'' by Pavel Etingof and Xiaoguang Ma. In their current form, they
cover only the first ca. 10 pages of the notes.
\section*{Section 1}
\begin{itemize}
\item \textbf{Page 4:} Replace ``irrieducible'' by ``irreducible''.
\item \textbf{Page 5:} Replace ``shperical'' by ``spherical''.
\end{itemize}
\section*{Section 2}
\begin{itemize}
\item \textbf{Page 6, Theorem 2.1:} I think the words ``rational
coefficients'', ``lower order terms'' and ``homogeneous'' need some more
explanations. Here is how I understand them; please correct me if I am getting
something wrong:
\textit{``rational coefficients''} means ``coefficients which are rational
functions in the variables $x_{1},x_{2},...,x_{n}$'' (not ``coefficients which
are rational numbers'' or ``coefficients which are polynomials over
$\mathbb{Q}$'').
\textit{``lower order terms''} means the following: Let $\mathbf{D}$ be the
$\mathbb{C}$-algebra of all partial differential operators in the variables
$x_{1},x_{2},...,x_{n}$ whose coefficients are rational functions in the
variables $x_{1},x_{2},...,x_{n}$. Define a $\mathbb{C}$-algebra filtration on
$\mathbf{D}$ by requiring that all rational functions in $x_{1},x_{2}%
,...,x_{n}$ are in filtration degree $0$, and all $\dfrac{\partial}{\partial
x_{j}}$ are in filtration degree $1$. Then,
\[
L_{j}=\sum\limits_{i=1}^{n}\left( \dfrac{\partial}{\partial x_{i}}\right)
^{j}+\text{lower order terms}%
\]
means that%
\[
L_{j}\equiv\sum\limits_{i=1}^{n}\left( \dfrac{\partial}{\partial x_{i}%
}\right) ^{j}\operatorname{mod}\left( \left( j-1\right) \text{-th filtered
part of }\mathbf{D}\right) .
\]
And the \textit{order} of a partial differential operator $E\in\mathbf{D}$
means the smallest $n\in\mathbb{N}$ such that $E$ lies in the $n$-th filtered
part of $\mathbf{D}$. Am I seeing this right?
Note that this $\mathbb{C}$-algebra filtration on $\mathbf{D}$ can be also
characterized differently: Let $\mathbf{D}_{\operatorname*{const}}$ denote the
$\mathbb{C}$-algebra of all partial differential operators in the variables
$x_{1},x_{2},...,x_{n}$ whose coefficients are constant. Let the unadorned
$\otimes$ sign denote $\otimes_{\mathbb{C}}$. Then, $\mathbf{D}=\mathbb{C}%
\left( x_{1},x_{2},...,x_{n}\right) \otimes\mathbf{D}_{\operatorname*{const}%
}$ as vector spaces. Since the algebra $\mathbf{D}_{\operatorname*{const}}$ is
canonically graded (by giving all $\dfrac{\partial}{\partial x_{j}}$ the
degree $1$) and the algebra $\mathbb{C}\left( x_{1},x_{2},...,x_{n}\right) $
is trivially graded (by giving its every element the degree $0$), the tensor
product $\mathbb{C}\left( x_{1},x_{2},...,x_{n}\right) \otimes
\mathbf{D}_{\operatorname*{const}}$ is also graded. Since $\mathbf{D}%
=\mathbb{C}\left( x_{1},x_{2},...,x_{n}\right) \otimes\mathbf{D}%
_{\operatorname*{const}}$ as vector spaces, this yields that the vector space
$\mathbf{D}$ is also graded (albeit this is not a grading of the $\mathbb{C}%
$\textbf{-algebra }$\mathbf{D}$, since generally $\mathbf{D}\neq
\mathbb{C}\left( x_{1},x_{2},...,x_{n}\right) \otimes\mathbf{D}%
_{\operatorname*{const}}$ as algebras), hence filtered. This filtration is
easily seen to be the same filtration on $\mathbf{D}$ as defined above.)
Note that as vector spaces,%
\begin{align*}
& \left( j\text{-th filtered part of }\mathbf{D}\right) \diagup\left(
\left( j-1\right) \text{-th filtered part of }\mathbf{D}\right) \\
& \cong\left( j\text{-th graded part of }\mathbf{D}\right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since the filtration of }\mathbf{D}\text{
comes from a vector space grading on }\mathbf{D}\right) \\
& =\left( j\text{-th graded part of }\mathbb{C}\left( x_{1},x_{2}%
,...,x_{n}\right) \otimes\mathbf{D}_{\operatorname*{const}}\right) \\
& =\mathbb{C}\left( x_{1},x_{2},...,x_{n}\right) \otimes\left( j\text{-th
graded part of }\mathbf{D}_{\operatorname*{const}}\right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\mathbb{C}\left( x_{1}%
,x_{2},...,x_{n}\right) \text{ is concentrated in degree }0\right) .
\end{align*}
\textit{``homogeneous''} means the following: Let $\mathbf{D}_{\hom}$ be the
$\mathbb{C}$-subalgebra of the algebra $\mathbf{D}$ (defined above) generated
by all homogeneous rational functions in $x_{1},x_{2},...,x_{n}$ and the
derivations $\dfrac{\partial}{\partial x_{j}}$. This is a graded algebra,
where the degree of a homogeneous rational function is its usual degree, and
the degree of a derivation $\dfrac{\partial}{\partial x_{j}}$ is $-1$. Then,
when we say that a differential operator in $\mathbf{D}$ is
\textit{homogeneous of degree }$k$ (for some integer $k$), we mean that this
operator lies in $\mathbf{D}_{\hom}$ and has degree $k$.
\item \textbf{Page 6, four lines above Definition 2.3:} You speak of an
``inner product''. Maybe point out that it is supposed to be bilinear, not
sesquilinear (some people might be confused).
\item \textbf{Page 6, two lines above Definition 2.3:} You say ``equivalently,
$s$ is conjugate to $\operatorname*{diag}\left( -1,1,...,1\right) $''.
Conjugate where? in $\operatorname*{GL}\left( \mathfrak{h}\right) $ or in
$\operatorname*{O}\left( \mathfrak{h}\right) $ ? In this case, both are true
(as long as we suppose $s$ to lie in $\operatorname*{O}\left( \mathfrak{h}%
\right) $), but it would be better if you would point this out more explicitly.
\item \textbf{Page 6, Theorem 2.4, and many times after:} I think Theorem 2.4
is called the Chevalley-Shep\textbf{h}ard-Todd theorem, with two ``h'''s in
``Shephard'' (cf. \url{http://en.wikipedia.org/wiki/Geoffrey_Colin_Shephard} ).
\item \textbf{Page 6, one line below Theorem 2.4:} Maybe add ``if $G$ is a
complex reflection group'' into the sentence that comes directly after Theorem 2.4.
\item \textbf{Page 6, two lines below Theorem 2.4:} You write: ``The numbers
$d_{i}$ are uniquely determined''. You need to add here that you require
$d_{1}\leq d_{2}\leq...\leq d_{\dim\mathfrak{h}}$ (else, the ``$L_{1}=H$''
part of Theorem 2.9 makes no sense).
\item \textbf{Page 7, Example 2.5:} It is not clear what $p_{i}$ are, and why
you write $P_{i}\left( p_{1},...,p_{n}\right) $ (the $p_{i}$ are definitely
not polynomial variables, since they are algebraically dependent). Let me just
record the answer (which you explained in an email): You want $p_{i}%
=e_{i}-\dfrac{e_{1}+e_{2}+\cdots+e_{n}}{n}\in\mathfrak{h}$ (where $e_{1}%
,e_{2},...,e_{n}$ are the standard basis vectors of $\mathbb{C}^{n}$), and
instead of $P_{i}\left( p_{1},...,p_{n}\right) $ you simply want to write
$P_{i}$.
\item \textbf{Page 7, between Definition 2.6 and Example 2.7:} You write:
``Note that by Chevalley's theorem, a parabolic subgroup of a complex
(respectively, real) reflection group is itself a complex (respectively, real)
reflection group.'' What Chevalley's theorem do you mean? If you are applying
Theorem 2.4, isn't it quite an overkill? (Or is there really no simpler proof?)
\item \textbf{Page 7, between Example 2.7 and \S 2.4:} You write: ``and we can
define the open set $\mathfrak{h}_{\operatorname*{reg}}^{\ast G^{\prime}}$ of
all $\lambda\in\mathfrak{h}^{G^{\prime}}$ for which $G_{\lambda}=G^{\prime}%
$''. I think the ``$\mathfrak{h}^{G^{\prime}}$'' should be ``$\mathfrak{h}%
^{\ast G^{\prime}}$'' here.
\item \textbf{Page 7, first line of \S 2.4:} Replace ``Let $s\subset
\operatorname*{GL}\left( \mathfrak{h}\right) $'' by ``Let $s\in
\operatorname*{GL}\left( \mathfrak{h}\right) $''.
\item \textbf{Page 7, second line of \S 2.4:} You might want to point out that
a ``nontrivial eigenvalue'' of a reflection means an eigenvalue $\neq1$.
(Normally, in linear algebra, I tend to mean $\neq0$ by ``nontrivial''.)
\item \textbf{Page 7, one line above Definition 2.8:} What do you mean by a
``conjugation invariant function''? Invariant under conjugation by elements of
$W$, or by conjugation by any element of $\operatorname*{O}\left(
\mathfrak{h}\right) $ (or even $\operatorname*{GL}\left( \mathfrak{h}%
\right) $ ?) that happens to send an element of $\mathcal{S}$ to another
element of $\mathcal{S}$ ?
\item \textbf{Page 7, Definition 2.8:} This is hardly an error, but maybe it
would improve the exposition if you would define what $\Delta_{\mathfrak{h}}$
means. (It's just that I don't like algebra texts relying on geometry
preknowledge.) I assume we can define it by $\Delta_{\mathfrak{h}}%
=\sum\limits_{i=1}^{r}\partial_{y_{i}}^{2}$ for any orthonormal basis $\left(
y_{1},y_{2},...,y_{r}\right) $ of $\mathfrak{h}$ ?
\item \textbf{Page 7, one line above Theorem 2.9:} When you write
``$P_{1}\left( \mathbf{p}\right) =\mathbf{p}^{2}$'', it wouldn't hurt to
point out that $\mathbf{p}$ is a variable vector in $\mathfrak{h}^{\ast}$ (not
$\mathfrak{h}$), so ``$P_{1}\left( \mathbf{p}\right) =\mathbf{p}^{2}$''
describes $P_{1}$ as a polynomial function on $\mathfrak{h}^{\ast}$ (that is,
an element of $\mathbb{C}\left[ \mathfrak{h}^{\ast}\right] =S\mathfrak{h}$).
\item \textbf{Page 8, two lines above Remark 2.10:} You write: ``This theorem
is obviously a generalization of Theorem 1 about $W=\mathfrak{S}_{n}$.'' Given
that the representation $\mathbb{C}^{n}$ of $\mathfrak{S}_{n}$ is not
irreducible, while lifting the $L_{j}$ from the representation $\mathbb{C}%
^{n-1}$ of $\mathfrak{S}_{n}$ to $\mathbb{C}^{n}$ requires some work (as our
emails showed), I don't think the word ``obviously'' is justified here. See
below for a proposal how to improve this (by getting rid of the standing
assumption that $\mathfrak{h}$ be irreducible).
\item \textbf{Page 8, fourth line of \S 2.5:} You write: ``We normalize them
in such a way that $\left\langle \alpha_{s},\alpha_{s}^{\vee}\right\rangle
=2$.'' At this point, I had to think for a while about why this is possible
(i. e., why we don't have $\left\langle \alpha_{s},\alpha_{s}^{\vee
}\right\rangle =0$). This is quite easy to see by diagonalizing the matrix
$s$, but maybe you should make this an explicit exercise. (Remark 2.13, too,
could be an exercise.)
\item \textbf{Page 8, fifth line of \S 2.5:} Again, you speak of a ``function
invariant with respect to conjugation'', and it is not clear by what you allow
to conjugate. (I will henceforth assume that you allow conjugation by $G$.)
\item \textbf{Page 8, Definition 2.11:} Please say that $\mathbb{C}\left(
\mathfrak{h}\right) $ means the quotient field of $S\left( \mathfrak{h}%
^{\ast}\right) $. (I know that this follows from standard algebraic geometry
notation, but I didn't expect that you are using algebraic geometry notation.)
Also, please add ``Let $a\in\mathfrak{h}$.'' at the beginning of this Definition.
\item \textbf{Page 8, Proposition 2.14:} Beginning with part (i) of this
proposition, you seem to systematically write $\left( \cdot,\cdot\right) $
for the bilinear form on $\mathfrak{h}^{\ast}\times\mathfrak{h}$ that you
formerly denoted by $\left\langle \cdot,\cdot\right\rangle $. I don't like
this notation very much, because $\left( \cdot,\cdot\right) $ already means
two different bilinear forms (one on $\mathfrak{h}\times\mathfrak{h}$ and one
on $\mathfrak{h}^{\ast}\times\mathfrak{h}^{\ast}$) in the case when
$G\subseteq\operatorname*{O}\left( \mathfrak{h}\right) $, but it's okay
since one can always infer types. But you should point out the change in
notation, or else it appears as if you suddenly switched to the case
$G\subseteq\operatorname*{O}\left( \mathfrak{h}\right) $ !
\item \textbf{Page 9, proof of Theorem 2.15:} I think that%
\[
-\sum_{s\in\mathcal{S}}c_{s}\left( a,\alpha_{s}\right) \left( x,\alpha
_{s}^{\vee}\right) \left( b,\alpha_{s}\right) sD_{\alpha_{s}^{\vee}}%
\cdot\dfrac{1-\lambda_{s}^{-1}}{2}%
\]
should be%
\[
-\sum_{s\in\mathcal{S}}c_{s}\left( a,\alpha_{s}\right) \left( x,\alpha
_{s}^{\vee}\right) \left( b,\alpha_{s}\right) sD_{\alpha_{s}^{\vee}}%
\cdot\dfrac{1-\lambda_{s}}{2}%
\]
(leaving aside the fact that you are still using the notation $\left(
\cdot,\cdot\right) $ for what was formerly called $\left\langle \cdot
,\cdot\right\rangle $). To make sure that I haven't done any mistakes, let me
write up the details of this computation. (They are completely straightforward
and I don't think you should explicit them in the paper, but I am doing them
here so you can tell me where I am going wrong.)
It is clearly enough to prove that every $s\in\mathcal{S}$ satisfies%
\begin{equation}
\left[ s,D_{b}\right] =\left\langle b,\alpha_{s}\right\rangle sD_{\alpha
_{s}^{\vee}}\cdot\dfrac{1-\lambda_{s}}{2}. \label{p9.1}%
\end{equation}
First, we show that%
\begin{equation}
\text{every }b\in\mathfrak{h}\text{ satisfies }b-s^{-1}b=\dfrac{1-\lambda_{s}%
}{2}\left\langle b,\alpha_{s}\right\rangle \alpha_{s}^{\vee}. \label{p9.2}%
\end{equation}
(This is similar to Proposition 2.14 (i), but with $\mathfrak{h}$ instead of
$\mathfrak{h}^{\ast}$.)
\textit{Proof of (\ref{p9.2}):} WLOG, assume that $\mathfrak{h}=\mathbb{C}%
^{n}$, $s=\operatorname*{diag}\left( \lambda_{s},1,1,...,1\right) $,
$\alpha_{s}=e_{1}^{\ast}$ and $\alpha_{s}^{\vee}=2e_{1}$, where $\left(
e_{1},e_{2},...,e_{n}\right) $ is the standard basis of $\mathbb{C}^{n}$ and
$\left( e_{1}^{\ast},e_{2}^{\ast},...,e_{n}^{\ast}\right) $ is its dual
basis. (This situation can always be achieved by an appropriate change of
basis in $\mathfrak{h}$.) By linearity, it is enough to prove (\ref{p9.2}) in
the cases when $b=e_{i}$ for $i\in\left\{ 1,2,...,n\right\} $. So consider
this case. If $i>1$, then both sides of (\ref{p9.2}) are $0$, and thus
(\ref{p9.2}) holds. Remains the case $i=1$. In this case, $b=e_{1}=\dfrac
{1}{2}\alpha_{s}^{\vee}$, so that%
\[
b-s^{-1}b=\dfrac{1}{2}\left( \alpha_{s}^{\vee}-\underbrace{s^{-1}\alpha
_{s}^{\vee}}_{=\lambda_{s}\alpha_{s}^{\vee}}\right) =\dfrac{1-\lambda_{s}}%
{2}\alpha_{s}^{\vee}%
\]
and%
\[
\dfrac{1-\lambda_{s}}{2}\left\langle \underbrace{b}_{=e_{1}}%
,\underbrace{\alpha_{s}}_{=e_{1}^{\ast}}\right\rangle \alpha_{s}^{\vee}%
=\dfrac{1-\lambda_{s}}{2}\underbrace{\left\langle e_{1},e_{1}^{\ast
}\right\rangle }_{=1}\alpha_{s}^{\vee}=\dfrac{1-\lambda_{s}}{2}\alpha
_{s}^{\vee}.
\]
Thus, (\ref{p9.2}) holds in the case $i=1$ as well, and thus (\ref{p9.2}) is proven.
\textit{Proof of (\ref{p9.1}):} We have%
\begin{align*}
\left[ s,D_{b}\right] & =sD_{b}-D_{b}s=s\left( D_{b}-\underbrace{s^{-1}%
D_{b}s}_{\substack{=D_{s^{-1}b}\\\text{(by Proposition 2.14 (ii))}}}\right) \\
& =s\underbrace{\left( D_{b}-D_{s^{-1}b}\right) }_{\substack{=D_{b-s^{-1}%
b}=D_{\dfrac{1-\lambda_{s}}{2}\left\langle b,\alpha_{s}\right\rangle
\alpha_{s}^{\vee}}\\\text{(by (\ref{p9.1}))}}}=sD_{\dfrac{1-\lambda_{s}}%
{2}\left\langle b,\alpha_{s}\right\rangle \alpha_{s}^{\vee}}\\
& =\left\langle b,\alpha_{s}\right\rangle sD_{\alpha_{s}^{\vee}}\cdot
\dfrac{1-\lambda_{s}}{2},\ \ \ \ \ \ \ \ \ \ \text{and (\ref{p9.1}) is
proven.}%
\end{align*}
\item \textbf{Page 9, proof of Theorem 2.15:} You write: ``since this algebra
acts faithfully on $\mathbb{C}\left( \mathfrak{h}\right) $'' (where ``this
algebra'' is the semidirect product $\mathbb{C}G\ltimes\mathcal{D}\left(
\mathfrak{h}_{\operatorname*{reg}}\right) $). I am wondering how you prove
this. I have a proof, but it is rather messy: First, the claim that
$\mathbb{C}G\ltimes\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}%
}\right) $ acts faithfully on $\mathbb{C}\left( \mathfrak{h}\right) $ can
be rewritten as follows: If $\left( D_{g}\right) _{g\in G}$ is a family of
differential operators indexed by elements of $G$ such that $\sum\limits_{g\in
G}gD_{g}$ is $0$ as an endomorphism of $\mathbb{C}\left( \mathfrak{h}\right)
$, then each $g\in G$ satisfies $D_{g}=0$. To prove this, we first notice that
we can WLOG assume that every $D_{g}$ has polynomial coefficients (because we
can move denominators to the left, moving them past derivations by means of
the quotient rule and moving them past the $g$'s by using the formula%
\[
g\circ f=\left( gf\right) g\ \ \ \ \ \ \ \ \ \ \text{for any }g\in G\text{
and }f\in\mathbb{C}\left( \mathfrak{h}\right)
\]
). Now, let $v\in\mathfrak{h}$ be a point which is not fixed by any $g\in
G\diagdown\left\{ \operatorname*{id}\right\} $. Recall that $\sum
\limits_{g\in G}gD_{g}$ is $0$ as an endomorphism of $\mathbb{C}\left(
\mathfrak{h}\right) $. In particular, $\sum\limits_{g\in G}gD_{g}$ acts as
$0$ on $\mathbb{C}\left[ \mathfrak{h}\right] $. Thus, for every
$p\in\mathbb{C}\left[ \mathfrak{h}\right] $, a certain $\mathbb{C}\left[
\mathfrak{h}\right] $-linear combination of the partial derivatives of $p$
(of various orders) taken at the points $gv$ for varying $g\in G$ is
identically $0$ (and the coefficients of this combination don't depend on
$p$). But since we can find a polynomial with any given set of finitely many
prescribed values of partial derivatives at finitely many points\footnote{This
follows from the Chinese remainder theorem, applied to the ring $\mathbb{C}%
\left[ \mathfrak{h}\right] $. In fact, by prescribing the values of finitely
many partial derivatives of a polynomial $p\in\mathbb{C}\left[ \mathfrak{h}%
\right] $ at some point $w\in\mathfrak{h}$, we put a condition on the residue
class of $p$ modulo a certain power of the maximal ideal $\mathfrak{m}%
_{w}\subseteq\mathfrak{h}$ (where $\mathfrak{m}_{w}$ is the ideal of all
polynomials that vanish at $w$). Such a condition is always satisfiable. Thus,
if we prescribe the values of finitely many partial derivatives of a
polynomial $p\in\mathbb{C}\left[ \mathfrak{h}\right] $ at finitely many
points $w_{1},w_{2},...,w_{\ell}\in\mathfrak{h}$, we put conditions on the
residue classes of $p$ modulo powers of $\mathfrak{m}_{w_{1}}$, $\mathfrak{m}%
_{w_{2}}$, $...$, $\mathfrak{m}_{w_{\ell}}$. Each of these $\ell$ conditions
alone is satisfiable; thus, the conjunction of these $\ell$ conditions is also
satisfiable (because the Chinese remainder theorem says that $\left(
\mathbb{C}\left[ \mathfrak{h}\right] \right) \diagup\left( \mathfrak{m}%
_{w_{1}}^{\alpha_{1}}\mathfrak{m}_{w_{2}}^{\alpha_{2}}...\mathfrak{m}%
_{w_{\ell}}^{\alpha_{\ell}}\right) =\prod\limits_{i=1}^{\ell}\left(
\mathbb{C}\left[ \mathfrak{h}\right] \right) \diagup\mathfrak{m}_{w_{i}%
}^{\alpha_{i}}$, so that every $\ell$-tuple in $\prod\limits_{i=1}^{\ell
}\left( \mathbb{C}\left[ \mathfrak{h}\right] \right) \diagup
\mathfrak{m}_{w_{i}}^{\alpha_{i}}$ has a common representative in
$\mathbb{C}\left[ \mathfrak{h}\right] $), i. e., we can find a polynomial
with our given set of prescribed values.}, this yields that the $\mathbb{C}%
\left[ \mathfrak{h}\right] $-linear combination must be trivial at $v$; in
other words, $D_{g}$ is identically $0$ at $v$ for every $g\in G$. Since this
holds for every point $v\in\mathfrak{h}$ which is not fixed by any $g\in
G\diagdown\left\{ \operatorname*{id}\right\} $, and since the set of such
points is Zariski-dense in $\mathfrak{h}$, this yields that $D_{g}$ is
identically $0$ everywhere for every $g\in G$. This proves that each $g\in G$
satisfies $D_{g}=0$, qed.
\item \textbf{Page 9, \S 2.6, just before Proposition 2.16:} It would be nice
to explain when an element of $\mathbb{C}W\ltimes\mathcal{D}\left(
\mathfrak{h}_{\operatorname*{reg}}\right) $ or an operator on the space of
regular functions of $\mathfrak{h}_{\operatorname*{reg}}$ is said to be
$W$\textit{-invariant}. (Short answer: When it commutes with every $g\in W$.)
\item \textbf{Page 10, two lines above Corollary 2.17:} You write: ``the
algebra $\left( S\mathfrak{h}\right) ^{W}$ is free''. By ``free'', you mean
``free as a commutative algebra'', not ``free as an algebra''. (I know, this
is some nitpicking.)
\item \textbf{Page 10, Corollary 2.17:} In my opinion, you should explain what
$P_{j}\left( D_{y_{1}},...,D_{y_{r}}\right) $ means, because $P_{j}$ is just
an element of $S\mathfrak{h}$, and not a polynomial. (The meaning of
$P_{j}\left( D_{y_{1}},...,D_{y_{r}}\right) $ is the following: Since
$\left\{ y_{1},y_{2},...,y_{r}\right\} $ is a basis of $\mathfrak{h}$, we
can identify the symmetric algebra $S\mathfrak{h}$ with the ring of
polynomials in the $r$ variables $y_{1}$, $y_{2}$, $...$, $y_{r}$ over
$\mathbb{C}$. Thus, $P_{j}\in S\mathfrak{h}$ becomes a polynomial in the $r$
variables $y_{1}$, $y_{2}$, $...$, $y_{r}$. If we now substitute $D_{y_{1}}$,
$D_{y_{2}}$, $...$, $D_{y_{r}}$ for these variables $y_{1}$, $y_{2}$, $...$,
$y_{r}$ in $P_{j}$ (this is allowed because the Dunkl operators $D_{a}$
commute), we obtain an element of $\mathbb{C}W\ltimes\mathcal{D}\left(
\mathfrak{h}_{\operatorname*{reg}}\right) $. This element is what you denote
by $P_{j}\left( D_{y_{1}},...,D_{y_{r}}\right) $.
\item \textbf{Page 10, proof of Corollary 2.17:} Replace ``$L_{j}$'' by
``$\overline{L}_{j}$'' twice in this proof.
\item \textbf{Page 10, proof of Corollary 2.17:} This proof would be more
readable if you would explain why the $P_{j}\left( D_{y_{1}},...,D_{y_{r}%
}\right) $ is $W$-invariant for all $j$. The \textit{proof} is not too immediate:
First, it is easy to see that the map%
\begin{align*}
\mathfrak{h} & \rightarrow\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) ,\\
a & \mapsto D_{a}%
\end{align*}
is $\mathbb{C}$-linear\footnote{This is because the map%
\begin{align*}
\mathfrak{h} & \rightarrow\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) ,\\
a & \mapsto\partial_{a}%
\end{align*}
is $\mathbb{C}$-linear, and because $\alpha_{s}$ is $\mathbb{C}$-linear for
every $s\in W$.}. Denote this map by $T$.
Since $\left\{ y_{1},y_{2},...,y_{r}\right\} $ is a basis of $\mathfrak{h}$,
we can identify the symmetric algebra $S\mathfrak{h}$ with the ring of
polynomials in the $r$ variables $y_{1}$, $y_{2}$, $...$, $y_{r}$ over
$\mathbb{C}$. Thus, every $P\in S\mathfrak{h}$ becomes a polynomial in the $r$
variables $y_{1}$, $y_{2}$, $...$, $y_{r}$. As a consequence, for every $P\in
S\mathfrak{h}$, we will denote by $P\left( D_{y_{1}},D_{y_{2}},...,D_{y_{r}%
}\right) $ the result of substituting $D_{y_{1}}$, $D_{y_{2}}$, $...$,
$D_{y_{r}}$ for these variables $y_{1}$, $y_{2}$, $...$, $y_{r}$ in $P$. When
$P\in\mathfrak{h}$, then $P\left( D_{y_{1}},D_{y_{2}},...,D_{y_{r}}\right) $
is a $\mathbb{C}$-linear combination of $D_{y_{1}}$, $D_{y_{2}}$, $...$,
$D_{y_{r}}$ (here, we regard $\mathfrak{h}$ as a subspace of $S\mathfrak{h}$,
so $P\in\mathfrak{h}$ yields $P\in S\mathfrak{h}$).
It is easy to see that
\begin{equation}
\text{every }a\in\mathfrak{h}\text{ satisfies }D_{a}=a\left( D_{y_{1}%
},D_{y_{2}},...,D_{y_{r}}\right) \label{p10.1}%
\end{equation}
(where $a\left( D_{y_{1}},D_{y_{2}},...,D_{y_{r}}\right) $ is to be
understood as just explained, with $a$ being regarded as an element of
$S\mathfrak{h}$). (In fact, the equation (\ref{p10.1}) is $\mathbb{C}$-linear
in $a$ (because of the $\mathbb{C}$-linearity of $T$), and thus in order to
prove it for all $a\in\mathfrak{h}$, it is enough to prove it in the case when
$a\in\left\{ y_{1},y_{2},...,y_{r}\right\} $ (since $\left\{ y_{1}%
,y_{2},...,y_{r}\right\} $ is a basis of $\mathfrak{h}$), but in this case it
is trivial.)
Now, for any $j\in\left\{ 1,2,...,\dim\mathfrak{h}\right\} $ and any $g\in
W$, we have%
\begin{align*}
& gP_{j}\left( D_{y_{1}},...,D_{y_{r}}\right) g^{-1}\\
& =P_{j}\left( gD_{y_{1}}g^{-1},...,gD_{y_{r}}g^{-1}\right)
\ \ \ \ \ \ \ \ \ \ \left( \text{since conjugation by }g\text{ is an algebra
automorphism}\right) \\
& =P_{j}\left( D_{gy_{1}},...,D_{gy_{r}}\right) \ \ \ \ \ \ \ \ \ \ \left(
\text{since }gD_{y_{i}}g^{-1}=D_{gy_{i}}\text{ for every }i\text{ due to
Proposition 2.14 (ii)}\right) \\
& =P_{j}\left( \left( gy_{1}\right) \left( D_{y_{1}},D_{y_{2}%
},...,D_{y_{r}}\right) ,...,\left( gy_{r}\right) \left( D_{y_{1}}%
,D_{y_{2}},...,D_{y_{r}}\right) \right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since (\ref{p10.1}) yields that
}D_{gy_{i}}=\left( gy_{i}\right) \left( D_{y_{1}},D_{y_{2}},...,D_{y_{r}%
}\right) \text{ for every }i\right) \\
& =\underbrace{\left( P_{j}\left( gy_{1},...,gy_{r}\right) \right)
}_{\substack{=gP_{j}=P_{j}\\\text{(since }P_{j}\in\left( S\mathfrak{h}%
\right) ^{W}\text{)}}}\left( D_{y_{1}},...,D_{y_{r}}\right) \\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here (as explained above) }\left( P_{j}\left( gy_{1},...,gy_{r}%
\right) \right) \left( D_{y_{1}},...,D_{y_{r}}\right) \text{ means}\\
\text{ ``the polynomial }P_{j}\left( gy_{1},...,gy_{r}\right) \text{ with
}D_{y_{1}}\text{, }D_{y_{2}}\text{, }...\text{, }D_{y_{r}}\text{ substituted
for }y_{1}\text{, }y_{2}\text{, }...\text{, }y_{r}\text{''}%
\end{array}
\right) \\
& =P_{j}\left( D_{y_{1}},...,D_{y_{r}}\right) ,
\end{align*}
so that $P_{j}\left( D_{y_{1}},...,D_{y_{r}}\right) $ is $W$-invariant, qed.
The main idea of this proof, I think, was the $\mathbb{C}$-linearity of $T$.
While trivial, it is (in my opinion) unexpected for such a complicated map.
\item \textbf{Pages 7-10:} Here are various suggested changes to make the
proofs clearer (these changes should be made at the same time, as they depend
one on another):
-- In Theorem 2.9, add the claim that the $L_{j}$ are $W$-invariant.
(Otherwise, Theorem 2.1 doesn't directly follow from Theorem 2.9, because
Theorem 2.1 claims the $\mathfrak{S}_{n}$-invariance of the $L_{j}$.)
-- In Corollary 2.17, add the claim that the $\overline{L}_{j}$ are
$W$-invariant. (Otherwise, Theorem 2.9 with the added claim that the $L_{j}$
are $W$-invariant doesn't directly follow from Theorem 2.9.)
-- On page 9, you write:
``For any element $B\in\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $, define $m\left( B\right) $ to be the
differential operator $\mathbb{C}\left( \mathfrak{h}\right) ^{W}%
\rightarrow\mathbb{C}\left( \mathfrak{h}\right) $, defined by $B$. That is,
if $B=\sum_{g\in W}B_{g}g$, $B_{g}\in\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $, then $m\left( B\right) =\sum_{g\in W}%
B_{g}$.''
This is slightly confusing, since you later (in the proof of Corollary 2.17)
want $m\left( B\right) $ to be defined on the whole $\mathbb{C}\left(
\mathfrak{h}\right) $ rather than just on $\mathbb{C}\left( \mathfrak{h}%
\right) ^{W}$. In my opinion, you should replace the text I've just quoted by
the following:
``For any element $B\in\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $, define a differential operator $m\left(
B\right) \in\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $
by $m\left( B\right) =\sum\limits_{g\in W}B_{g}$, where $B$ is being written
in the form $B=\sum\limits_{g\in W}B_{g}g$ with $B_{g}\in\mathcal{D}\left(
\mathfrak{h}_{\operatorname*{reg}}\right) $. The differential operator $B$
defined this way satisfies the following properties:
(i) If $f\in\mathbb{C}\left( \mathfrak{h}\right) ^{W}$, then $m\left(
B\right) f=Bf$.
(ii) If $B\in\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $ is $W$-invariant, then $m\left( B\right) $
is $W$-invariant as well\footnote{\textit{Proof.} Let $B\in\mathbb{C}%
W\ltimes\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $ be
$W$-invariant. Write $B$ in the form $B=\sum\limits_{g\in W}B_{g}g$ with
$B_{g}\in\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $.
Then, $m\left( B\right) =\sum\limits_{g\in W}B_{g}$. Let $h\in W$. Since $B$
is $W$-invariant, we have $hB=Bh$, so that%
\begin{align*}
\sum\limits_{g\in W}hB_{g}g & =h\underbrace{\sum\limits_{g\in W}B_{g}g}%
_{=B}=hB=\underbrace{B}_{=\sum\limits_{g\in W}B_{g}g}h=\sum\limits_{g\in
W}B_{g}gh=\sum\limits_{g\in W}B_{gh^{-1}}g\underbrace{h^{-1}h}%
_{=\operatorname*{id}}\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we substituted }gh^{-1}\text{ for }g\text{ in the sum}\\
\text{(since the map }W\rightarrow W,\ g\mapsto gh^{-1}\text{ is a bijection)}%
\end{array}
\right) \\
& =\sum\limits_{g\in W}B_{gh^{-1}}g.
\end{align*}
Compared to%
\begin{align*}
\sum\limits_{g\in W}hB_{g}\underbrace{g}_{=h^{-1}hg} & =\sum\limits_{g\in
W}hB_{g}h^{-1}hg=\sum\limits_{g\in W}hB_{g}h^{-1}hg=\sum\limits_{g\in
W}hB_{h^{-1}g}h^{-1}\underbrace{hh^{-1}}_{=\operatorname*{id}}g\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we substituted }h^{-1}g\text{ for }g\text{ in the sum}\\
\text{(since the map }W\rightarrow W,\ g\mapsto h^{-1}g\text{ is a bijection)}%
\end{array}
\right) \\
& =\sum\limits_{g\in W}hB_{h^{-1}g}h^{-1}g,
\end{align*}
this yields $\sum\limits_{g\in W}B_{gh^{-1}}g=\sum\limits_{g\in W}hB_{h^{-1}%
g}h^{-1}g$. Notice that every $g\in W$ satisfies $B_{gh^{-1}}\in
\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $ and
$hB_{h^{-1}g}h^{-1}\in\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}%
}\right) $.
\par
But any element of $\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $ can be \textbf{uniquely} written in the form
$\sum\limits_{g\in W}C_{g}g$ with $C_{g}\in\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $. Hence, if we have $\sum\limits_{g\in W}%
C_{g}g=\sum\limits_{g\in W}D_{g}g$ for some choice of $C_{g}\in\mathcal{D}%
\left( \mathfrak{h}_{\operatorname*{reg}}\right) $ and $D_{g}\in
\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $, then every
$g\in W$ satisfies $C_{g}=D_{g}$. Applied to $C_{g}=B_{gh^{-1}}$ and
$C_{g}=hB_{h^{-1}g}h^{-1}$, this yields that
\par%
\[
\text{every }g\in W\text{ satisfies }B_{gh^{-1}}=hB_{h^{-1}g}h^{-1}%
\]
(because $\sum\limits_{g\in W}B_{gh^{-1}}g=\sum\limits_{g\in W}hB_{h^{-1}%
g}h^{-1}g$). Hence,
\begin{align*}
\sum\limits_{g\in W}B_{gh^{-1}} & =\sum\limits_{g\in W}hB_{h^{-1}g}%
h^{-1}=\sum\limits_{g\in W}hB_{g}h^{-1}\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we substituted }g\text{ for }h^{-1}g\text{ in the sum}\\
\text{(since the map }W\rightarrow W,\ g\mapsto h^{-1}g\text{ is a bijection)}%
\end{array}
\right) \\
& =h\underbrace{\left( \sum\limits_{g\in W}B_{g}\right) }_{=m\left(
B\right) }h^{-1}=hm\left( B\right) h^{-1}.
\end{align*}
Compared to%
\begin{align*}
\sum\limits_{g\in W}B_{gh^{-1}} & =\sum\limits_{g\in W}B_{g}%
\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we substituted }g\text{ for }gh^{-1}\text{ in the sum}\\
\text{(since the map }W\rightarrow W,\ g\mapsto gh^{-1}\text{ is a bijection)}%
\end{array}
\right) \\
& =m\left( B\right) ,
\end{align*}
this yields $m\left( B\right) =hm\left( B\right) h^{-1}$, so that
$m\left( B\right) h=hm\left( B\right) $.
\par
Since this holds for every $h\in W$, this yields that $m\left( B\right) $ is
$W$-invariant, qed.}.
(iii) Any $s\in W$ and any $B\in\mathbb{C}W\ltimes\mathcal{D}\left(
\mathfrak{h}_{\operatorname*{reg}}\right) $ satisfy $m\left( B\right)
=m\left( Bs\right) $. (This is used, e. g., in the proof that $m\left(
D_{y}^{2}\right) =m\left( D_{y}\partial_{y}\right) $ in the proof of
Proposition 2.16.)
(iv) Any $A\in\mathbb{C}W\ltimes\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $ and any $W$-invariant $B\in\mathbb{C}%
W\ltimes\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $
satisfy $m\left( AB\right) =m\left( A\right) m\left( B\right) $.''
\item \textbf{Pages 7-10:} In much of Section 2, you work with a standing
assumption requiring that $\mathfrak{h}$ be an irreducible $W$-module. This
makes deducing Theorem 2.1 from Theorem 2.9 unnecessarily hard. There is a
very easy way to get rid of the standing assumption:
-- On page 7, replace ``Let us assume that $\mathfrak{h}$ is an irreducible
representation of $W$ (i. e. $W$ is an irreducible finite Coxeter group, and
$\mathfrak{h}$ is its reflection representation.) In this case, we can take
$P_{1}\left( \mathbf{p}\right) =\mathbf{p}^{2}$'' by ``Note that if
$\mathfrak{h}$ is an irreducible representation of $W$ (i. e. $W$ is an
irreducible finite Coxeter group, and $\mathfrak{h}$ is its reflection
representation), then we can take $P_{1}\left( \mathbf{p}\right)
=\mathbf{p}^{2}$. If $W=\mathfrak{S}_{n}$ and $\mathfrak{h}=\mathbb{C}^{n}$
(with the standard permutation representation of $\mathfrak{S}_{n}$), then we
can take $P_{2}\left( \mathbf{p}\right) =\mathbf{p}^{2}$''.
-- In Theorem 2.9, replace ``$L_{1}=H$'' by ``if $\ell\in\left\{
1,2,...,\dim\mathfrak{h}\right\} $ is such that $P_{\ell}\left(
\mathbf{p}\right) =\mathbf{p}^{2}$, then $L_{\ell}=H$''.
-- One line above Corollary 2.17, replace ``$P_{1}=\mathbf{p}^{2}$'' by
``$P_{1}$''.
-- In Corollary 2.17, replace ``$\overline{L}_{1}=\overline{H}$'' by ``if
$\ell\in\left\{ 1,2,...,\dim\mathfrak{h}\right\} $ is such that $P_{\ell
}\left( \mathbf{p}\right) =\mathbf{p}^{2}$, then $\overline{L}_{\ell
}=\overline{H}$''.{}
\item \textbf{Page 10, proof of Proposition 2.18:} The expression
``$\sum\limits_{i=1}^{r}\partial_{y_{i}}\left( \log\delta_{c}\right)
\partial_{y_{i}}$'' is ambiguous: Does $\partial_{y_{i}}\left( \log\delta
_{c}\right) $ mean the product $\partial_{y_{i}}\cdot\left( \log\delta
_{c}\right) $ in $\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}%
}\right) $ or the $y_{i}$-derivative of $\log\delta_{c}$ ? (I know it means
the latter.)
\item \textbf{Page 10, proof of Proposition 2.18:} As I don't like this proof
(it uses a strange function $\delta_{c}$, which is in general not algebraic
and doesn't have a very obvious interpretation as a power series), let me
reformulate it in a more algebraic way. First, I will show some lemmas:
\textbf{Lemma 2.18a.} Let $A$ and $D$ be $\mathbb{C}$-algebras. Let
$\mathcal{G}$ be a subset of $A$ which generates $A$ as a $\mathbb{C}%
$-algebra. Let $f:A\rightarrow D$ be a $\mathbb{C}$-linear map. Assume that%
\[
f\left( ab\right) =f\left( a\right) f\left( b\right)
\ \ \ \ \ \ \ \ \ \ \text{for every }a\in\mathcal{G}\text{ and }b\in A\text{.}%
\]
Also, assume that $f\left( 1\right) =1$. Then, $f$ is a $\mathbb{C}$-algebra homomorphism.
\textit{Proof of Lemma 2.18a.} Let $\mathcal{H}$ be the subset%
\[
\left\{ x\in A\ \mid\ f\left( xb\right) =f\left( x\right) f\left(
b\right) \text{ for every }b\in A\right\} .
\]
Every $a\in\mathcal{G}$ satisfies $a\in\mathcal{H}$ (because every
$a\in\mathcal{G}$ satisfies $f\left( ab\right) =f\left( a\right) f\left(
b\right) $ for every $b\in A$, and thus $a\in\left\{ x\in A\ \mid\ f\left(
xb\right) =f\left( x\right) f\left( b\right) \text{ for every }b\in
A\right\} =\mathcal{H}$). In other words, $\mathcal{G}\subseteq\mathcal{H}$.
Also, any $\lambda\in\mathbb{C}$, $\mu\in\mathbb{C}$, $a\in\mathcal{H}$ and
$a^{\prime}\in\mathcal{H}$ satisfy $\lambda a+\mu a^{\prime}\in\mathcal{H}%
$\ \ \ \ \footnote{\textit{Proof.} Let $\lambda\in\mathbb{C}$, $\mu
\in\mathbb{C}$, $a\in\mathcal{H}$ and $a^{\prime}\in\mathcal{H}$. Since
$a\in\mathcal{H}=\left\{ x\in A\ \mid\ f\left( xb\right) =f\left(
x\right) f\left( b\right) \text{ for every }b\in A\right\} $, we have
$f\left( ab\right) =f\left( a\right) f\left( b\right) $ for every $b\in
A$. Since $a^{\prime}\in\mathcal{H}=\left\{ x\in A\ \mid\ f\left( xb\right)
=f\left( x\right) f\left( b\right) \text{ for every }b\in A\right\} $, we
have $f\left( a^{\prime}b\right) =f\left( a^{\prime}\right) f\left(
b\right) $ for every $b\in A$. Now,%
\begin{align*}
f\left( \underbrace{\left( \lambda a+\mu a^{\prime}\right) b}_{=\lambda
ab+\mu a^{\prime}b}\right) & =f\left( \lambda ab+\mu a^{\prime}b\right)
=\lambda\underbrace{f\left( ab\right) }_{=f\left( a\right) f\left(
b\right) }+\mu\underbrace{f\left( a^{\prime}b\right) }_{=f\left(
a^{\prime}\right) f\left( b\right) }\ \ \ \ \ \ \ \ \ \ \left( \text{since
}f\text{ is }\mathbb{C}\text{-linear}\right) \\
& =\lambda f\left( a\right) f\left( b\right) +\mu f\left( a^{\prime
}\right) f\left( b\right) =\underbrace{\left( \lambda f\left( a\right)
+\mu f\left( a^{\prime}\right) \right) }_{\substack{=f\left( \lambda a+\mu
a^{\prime}\right) \\\text{(since }f\text{ is }\mathbb{C}\text{-linear)}%
}}f\left( b\right) =f\left( \lambda a+\mu a^{\prime}\right) f\left(
b\right)
\end{align*}
for every $b\in A$. In other words, $\lambda a+\mu a^{\prime}\in\left\{ x\in
A\ \mid\ f\left( xb\right) =f\left( x\right) f\left( b\right) \text{ for
every }b\in A\right\} =\mathcal{H}$, qed.}. Combined with the trivial fact
that $0\in\mathcal{H}$ (this quickly follows from $f\left( 0\right) =0$),
this yields that $\mathcal{H}$ is a $\mathbb{C}$-vector subspace of $A$.
Also, $1\in\mathcal{H}$ (since $f\left( \underbrace{1b}_{=b}\right)
=f\left( b\right) =\underbrace{1}_{=f\left( 1\right) }f\left( b\right)
=f\left( 1\right) f\left( b\right) $ for every $b\in A$, so that
$1\in\left\{ x\in A\ \mid\ f\left( xb\right) =f\left( x\right) f\left(
b\right) \text{ for every }b\in A\right\} =\mathcal{H}$). Besides, any
$a\in\mathcal{H}$ and $a^{\prime}\in\mathcal{H}$ satisfy $aa^{\prime}%
\in\mathcal{H}$\ \ \ \ \footnote{\textit{Proof.} Let $a\in\mathcal{H}$ and
$a^{\prime}\in\mathcal{H}$. Since $a\in\mathcal{H}=\left\{ x\in
A\ \mid\ f\left( xb\right) =f\left( x\right) f\left( b\right) \text{ for
every }b\in A\right\} $, we have%
\begin{equation}
f\left( ab\right) =f\left( a\right) f\left( b\right)
\ \ \ \ \ \ \ \ \ \ \text{for every }b\in A \label{p10.2.18a.0}%
\end{equation}
for every $b\in A$. Since $a^{\prime}\in\mathcal{H}=\left\{ x\in
A\ \mid\ f\left( xb\right) =f\left( x\right) f\left( b\right) \text{ for
every }b\in A\right\} $, we have $f\left( a^{\prime}b\right) =f\left(
a^{\prime}\right) f\left( b\right) $ for every $b\in A$. Now, every $b\in
A$ satisfies%
\begin{align*}
f\left( aa^{\prime}b\right) & =f\left( a\right) \underbrace{f\left(
a^{\prime}b\right) }_{=f\left( a^{\prime}\right) f\left( b\right)
}\ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{p10.2.18a.0}), applied to
}a^{\prime}b\text{ instead of }b\right) \\
& =f\left( a\right) f\left( a^{\prime}\right) f\left( b\right)
\end{align*}
and%
\[
\underbrace{f\left( aa^{\prime}\right) }_{\substack{=f\left( a\right)
f\left( a^{\prime}\right) \\\text{(by (\ref{p10.2.18a.0}), applied to
}a^{\prime}\\\text{instead of }b\text{)}}}f\left( b\right) =f\left(
a\right) f\left( a^{\prime}\right) f\left( b\right) .
\]
Hence, $f\left( aa^{\prime}b\right) =f\left( a\right) f\left( a^{\prime
}\right) f\left( b\right) =f\left( aa^{\prime}\right) f\left( b\right)
$ for every $b\in A$. Hence, $aa^{\prime}\in\left\{ x\in A\ \mid\ f\left(
xb\right) =f\left( x\right) f\left( b\right) \text{ for every }b\in
A\right\} =\mathcal{H}$, qed.}. Combining this with the fact that
$1\in\mathcal{H}$ and that $\mathcal{H}$ is a $\mathbb{C}$-vector subspace of
$A$, we conclude that $\mathcal{H}$ is a $\mathbb{C}$-subalgebra of $A$.
Combined with $\mathcal{G}\subseteq\mathcal{H}$, this yields that
$\mathcal{H}$ is a $\mathbb{C}$-subalgebra of $A$ containing $\mathcal{G}$ as
a subset. But since every $\mathbb{C}$-subalgebra of $A$ containing
$\mathcal{G}$ as a subset must contain $A$ as a
subset\footnote{\textit{Proof.} We know that $\mathcal{G}$ generates $A$ as a
$\mathbb{C}$-algebra. In other words, $A$ is the smallest $\mathbb{C}%
$-subalgebra of $A$ containing $\mathcal{G}$ as a subset. Hence, every
$\mathbb{C}$-subalgebra of $A$ containing $\mathcal{G}$ as a subset must
contain $A$ as a subset, qed.}, this yields that $\mathcal{H}$ contains $A$ as
a subset. In other words, $A\subseteq\mathcal{H}$. Thus, every $a\in A$
satisfies $a\in A\subseteq\mathcal{H}=\left\{ x\in A\ \mid\ f\left(
xb\right) =f\left( x\right) f\left( b\right) \text{ for every }b\in
A\right\} $, so that $f\left( ab\right) =f\left( a\right) f\left(
b\right) $ for every $b\in A$.
We thus have proven that every $a\in A$ and $b\in A$ satisfy $f\left(
ab\right) =f\left( a\right) f\left( b\right) $. Combined with $f\left(
1\right) =1$ and with the $\mathbb{C}$-linearity of the map $f$, this yields
that $f$ is a $\mathbb{C}$-algebra homomorphism. Lemma 2.18a is proven.
\textbf{Lemma 2.18b.} Let $V$ be a finite-dimensional $\mathbb{C}$-vector
space, and let $U$ be a Zariski-dense open subset of $V$. Let $\mathbb{C}%
\left[ U\right] $ and $\mathbb{C}\left[ V\right] $ be the coordinate rings
of $U$ and $V$, respectively (so that $\mathbb{C}\left[ V\right] =S\left(
V^{\ast}\right) $, and $\mathbb{C}\left[ U\right] $ is a localization of
$\mathbb{C}\left[ V\right] $). Let $\tau:V\rightarrow\mathbb{C}\left[
U\right] $ be a $\mathbb{C}$-linear map. Assume that%
\begin{equation}
\left[ \partial_{a}+\tau\left( a\right) ,\partial_{b}+\tau\left( b\right)
\right] =0\ \ \ \ \ \ \ \ \ \ \text{for any }a\in V\text{ and }b\in V.
\label{p10.2.18b.1}%
\end{equation}
Then, there exists a unique $\mathbb{C}$-algebra homomorphism $\varsigma
:\mathcal{D}\left( U\right) \rightarrow\mathcal{D}\left( U\right) $ which
satisfies the following two conditions:
\textit{Condition 1:} We have $\varsigma\left( f\right) =f$ for every
$f\in\mathbb{C}\left[ U\right] $ (where $\mathbb{C}\left[ U\right] $ is
canonically embedded into $\mathcal{D}\left( U\right) $).
\textit{Condition 2:} We have $\varsigma\left( \partial_{a}\right)
=\partial_{a}+\tau\left( a\right) $ for every $a\in V$.
\textit{Proof of Lemma 2.18b.} Since the $\mathbb{C}$-algebra $\mathcal{D}%
\left( U\right) $ is generated by the elements of $\mathbb{C}\left[
U\right] $ and the elements $\partial_{a}$ for $a\in V$, it is clear that
there exists \textbf{at most one} $\mathbb{C}$-algebra homomorphism
$\varsigma:\mathcal{D}\left( U\right) \rightarrow\mathcal{D}\left(
U\right) $ satisfying Conditions 1 and 2. Hence, in order to prove that there
exists \textbf{exactly one} such homomorphism, we need only check that there
exists \textbf{at least one} such homomorphism. Let us do this now.
Let $\mathcal{D}_{\operatorname*{const}}\left( V\right) $ be the
$\mathbb{C}$-algebra of differential operators on $V$ with constant
coefficients. Recall that $\mathcal{D}\left( U\right) =\mathbb{C}\left[
U\right] \otimes\mathcal{D}_{\operatorname*{const}}\left( V\right) $ as a
vector space (where $\otimes$ means $\otimes_{\mathbb{C}}$). In particular,
for any $f\in\mathbb{C}\left[ U\right] $ and any $D\in\mathcal{D}%
_{\operatorname*{const}}\left( V\right) $, the operator $fD\in
\mathcal{D}\left( U\right) $ is the tensor product $f\otimes D\in
\mathbb{C}\left[ U\right] \otimes\mathcal{D}_{\operatorname*{const}}\left(
V\right) $.
Moreover, we can define a map $\partial:V\rightarrow\mathcal{D}%
_{\operatorname*{const}}\left( V\right) $ by%
\[
\partial\left( v\right) =\partial_{v}\ \ \ \ \ \ \ \ \ \ \text{for every
}v\in V.
\]
Then, $\partial$ is a $\mathbb{C}$-linear injection, and the image
$\partial\left( V\right) $ is the space of all degree-$1$ differential
operators on $V$ with constant coefficients. Denote by $\partial^{-1}%
:\partial\left( V\right) \rightarrow V$ the inverse of $\partial$ on
$\partial\left( V\right) $.
Let $D^{\prime}$ be the $\mathbb{C}$-subalgebra of $\mathcal{D}\left(
U\right) $ generated by $\left\{ \partial_{v}+\tau\left( v\right)
\ \mid\ v\in V\right\} $. Then, the algebra $D^{\prime}$ is commutative
(because (\ref{p10.2.18b.1}) shows that its generators commute). Define a
$k$-linear map $\xi:V\rightarrow D^{\prime}$ by%
\[
\xi\left( v\right) =\partial_{v}+\tau\left( v\right)
\ \ \ \ \ \ \ \ \ \ \text{for every }v\in V.
\]
Then, $\xi\circ\partial^{-1}$ is a $\mathbb{C}$-linear map $\partial\left(
V\right) \rightarrow D^{\prime}$. By the universal property of the symmetric
algebra, the $\mathbb{C}$-linear map $\xi\circ\partial^{-1}:\partial\left(
V\right) \rightarrow D^{\prime}$ can be extended to a $\mathbb{C}$-algebra
homomorphism $\Xi:S\left( \partial\left( V\right) \right) \rightarrow
D^{\prime}$ such that%
\begin{equation}
\Xi\left( z\right) =\left( \xi\circ\partial^{-1}\right) \left( z\right)
\ \ \ \ \ \ \ \ \ \ \text{for every }z\in\partial\left( V\right)
\label{p10.2.18b.2}%
\end{equation}
(because $D^{\prime}$ is commutative). Consider this $\Xi$.
Since $\partial\left( V\right) $ is the space of all degree-$1$ differential
operators on $V$ with constant coefficients, we have $\mathcal{D}%
_{\operatorname*{const}}\left( V\right) \cong S\left( \partial\left(
V\right) \right) $. Hence, we can regard $\Xi:S\left( \partial\left(
V\right) \right) \rightarrow D^{\prime}$ as a $\mathbb{C}$-algebra
homomorphism $\mathcal{D}_{\operatorname*{const}}\left( V\right) \rightarrow
D^{\prime}$. Since $\Xi$ is a $\mathbb{C}$-algebra homomorphism, we have
$\Xi\left( 1\right) =1$.
Now, define a $\mathbb{C}$-linear map $\varsigma:\mathbb{C}\left[ U\right]
\otimes\mathcal{D}_{\operatorname*{const}}\left( V\right) \rightarrow
\mathcal{D}\left( U\right) $ by%
\[
\varsigma\left( f\otimes D\right) =f\Xi\left( D\right)
\ \ \ \ \ \ \ \ \ \ \text{for every }f\in\mathbb{C}\left[ U\right] \text{
and }D\in\mathcal{D}_{\operatorname*{const}}\left( V\right) .
\]
Since $\mathbb{C}\left[ U\right] \otimes\mathcal{D}_{\operatorname*{const}%
}\left( V\right) =\mathcal{D}\left( U\right) $, this map $\varsigma$ is a
$\mathbb{C}$-linear map $\mathcal{D}\left( U\right) \rightarrow
\mathcal{D}\left( U\right) $. We claim that $\varsigma$ is a $\mathbb{C}%
$-algebra homomorphism satisfying Conditions 1 and 2.
In fact, every $f\in\mathbb{C}\left[ U\right] $ satisfies%
\begin{align*}
\varsigma\left( \underbrace{f}_{=f\otimes1}\right) & =\varsigma\left(
f\otimes1\right) =f\underbrace{\Xi\left( 1\right) }_{=1}%
\ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\varsigma\right) \\
& =f.
\end{align*}
Thus, $\varsigma$ satisfies Condition 1. Applied to $f=1$, Condition 1 yields
$\varsigma\left( 1\right) =1$.
Every $a\in V$ satisfies%
\begin{align*}
\varsigma\left( \underbrace{\partial_{a}}_{=1\otimes\partial_{a}}\right) &
=\zeta\left( 1\otimes\partial_{a}\right) =1\Xi\left( \partial_{a}\right)
\ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\varsigma\right) \\
& =\Xi\left( \partial_{a}\right) =\left( \xi\circ\partial^{-1}\right)
\left( \partial_{a}\right) \ \ \ \ \ \ \ \ \ \ \left( \text{by
(\ref{p10.2.18b.2}), applied to }z=\partial_{a}\right) \\
& =\xi\left( \underbrace{\partial^{-1}\left( \partial_{a}\right)
}_{\substack{=a\\\text{(since }\partial_{a}=\partial\left( a\right)
\text{)}}}\right) =\xi\left( a\right) =\partial_{a}+\tau\left( a\right)
\ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\xi\right) .
\end{align*}
Thus, $\varsigma$ satisfies Condition 2.
We now will prove that $\varsigma$ is a $\mathbb{C}$-algebra homomorphism. For
this, define a subset $\mathcal{G}$ of $\mathcal{D}\left( U\right) $ by
$\mathcal{G}=\mathbb{C}\left[ U\right] \cup\partial\left( V\right) $.
Then, $\mathcal{G}$ generates $\mathcal{D}\left( U\right) $ as a
$\mathbb{C}$-algebra. Hence, Lemma 2.18a (applied to $A=\mathcal{D}\left(
U\right) $, $D=\mathcal{D}\left( U\right) $ and $f=\varsigma$), in order to
prove that $\varsigma$ is a $\mathbb{C}$-algebra homomorphism, it will be
enough to prove that%
\begin{equation}
\varsigma\left( ab\right) =\varsigma\left( a\right) \varsigma\left(
b\right) \ \ \ \ \ \ \ \ \ \ \text{for every }a\in\mathcal{G}\text{ and }%
b\in\mathcal{D}\left( U\right) \text{.} \label{p10.2.18b.3}%
\end{equation}
So let us prove this now:
\textit{Proof of (\ref{p10.2.18b.3}):} Let $a\in\mathcal{G}$ and
$b\in\mathcal{D}\left( U\right) $. Since the equality (\ref{p10.2.18b.3}) is
$\mathbb{C}$-linear in $b$, we can WLOG assume that $b$ has the form $gE$ for
some $g\in\mathbb{C}\left[ U\right] $ and $E\in\mathcal{D}\left( U\right)
$ (because every element of $\mathcal{D}\left( U\right) $ is a $\mathbb{C}%
$-linear combination of elements of this form). Assume this. Thus,
$b=gE=g\otimes E$. Hence,%
\begin{equation}
\varsigma\left( b\right) =\varsigma\left( g\otimes E\right) =g\Xi\left(
E\right) \ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }%
\varsigma\right) . \label{p10.2.18b.5}%
\end{equation}
Since $a\in\mathcal{G}=\mathbb{C}\left[ U\right] \cup\partial\left(
V\right) $, we have $a\in\mathbb{C}\left[ U\right] $ or $a\in
\partial\left( V\right) $. Thus, we must be in one of the following cases:
\textit{Case 1:} We have $a\in\mathbb{C}\left[ U\right] $.
\textit{Case 2:} We have $a\in\partial\left( V\right) $.
Let us consider Case 1 first. In this case, $a\in\mathbb{C}\left[ U\right]
$. Hence, $\varsigma\left( a\right) =a$ (by Condition 1, applied to $f=a$),
and%
\begin{align*}
\varsigma\left( a\underbrace{b}_{=gE}\right) & =\varsigma\left(
\underbrace{agE}_{=ag\otimes E}\right) =\varsigma\left( ag\otimes E\right)
=\underbrace{a}_{=\varsigma\left( a\right) }\underbrace{g\Xi\left(
E\right) }_{\substack{=\varsigma\left( b\right) \\\text{(by
(\ref{p10.2.18b.5}))}}}\ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of
}\varsigma\right) \\
& =\varsigma\left( a\right) \varsigma\left( b\right) .
\end{align*}
Hence, (\ref{p10.2.18b.3}) is proven in Case 1.
Let us now consider Case 2. In this case, $a\in\partial\left( V\right) $.
Thus, there exists some $v\in V$ such that $a=\partial_{v}$. Consider this
$v$. Let $\partial_{v}g$ denote the product of the elements $\partial_{v}$ and
$g$ in the $\mathbb{C}$-algebra $\mathcal{D}\left( V\right) $, whereas
$\partial_{v}\left( g\right) $ denotes the image of $g$ under the
differential operator $\partial_{v}$. Then,%
\[
\partial_{v}g=g\partial_{v}+\partial_{v}\left( g\right) ,
\]
so that%
\[
\underbrace{a}_{=\partial_{v}}\underbrace{b}_{=gE}=\underbrace{\partial_{v}%
g}_{=g\partial_{v}+\partial_{v}\left( g\right) }E=g\partial_{v}%
E+\partial_{v}\left( g\right) E=g\otimes\partial_{v}E+\partial_{v}\left(
g\right) \otimes E,
\]
and thus%
\begin{align*}
\varsigma\left( ab\right) & =\varsigma\left( g\otimes\partial
_{v}E+\partial_{v}\left( g\right) \otimes E\right) =\underbrace{\varsigma
\left( g\otimes\partial_{v}E\right) }_{\substack{=g\Xi\left( \partial
_{v}E\right) \\\text{(by the definition of }\varsigma\text{)}}%
}+\underbrace{\varsigma\left( \partial_{v}\left( g\right) \otimes E\right)
}_{\substack{=\partial_{v}\left( g\right) \Xi\left( E\right) \\\text{(by
the definition of }\varsigma\text{)}}}\\
& =g\underbrace{\Xi\left( \partial_{v}E\right) }_{\substack{=\Xi\left(
\partial_{v}\right) \Xi\left( E\right) \\\text{(since }\Xi\text{ is a
}\mathbb{C}\text{-algebra}\\\text{homomorphism)}}}+\partial_{v}\left(
g\right) \Xi\left( E\right) =g\underbrace{\Xi\left( \partial_{v}\right)
}_{\substack{=\left( \xi\circ\partial^{-1}\right) \left( \partial
_{v}\right) \\\text{(by (\ref{p10.2.18b.2}), applied}\\\text{to }%
z=\partial_{v}\text{)}}}\Xi\left( E\right) +\partial_{v}\left( g\right)
\Xi\left( E\right) \\
& =g\underbrace{\left( \xi\circ\partial^{-1}\right) \left( \partial
_{v}\right) }_{=\xi\left( \partial^{-1}\left( \partial_{v}\right) \right)
}\Xi\left( E\right) +\partial_{v}\left( g\right) \Xi\left( E\right)
=g\xi\left( \underbrace{\partial^{-1}\left( \partial_{v}\right)
}_{\substack{=v\\\text{(since }\partial_{v}=\partial\left( v\right)
\text{)}}}\right) \Xi\left( E\right) +\partial_{v}\left( g\right)
\Xi\left( E\right) \\
& =g\underbrace{\xi\left( v\right) }_{\substack{=\partial_{v}+\tau\left(
v\right) \\\text{(by the definition of }\xi\text{)}}}\Xi\left( E\right)
+\partial_{v}\left( g\right) \Xi\left( E\right) =g\left( \partial
_{v}+\tau\left( v\right) \right) \Xi\left( E\right) +\partial_{v}\left(
g\right) \Xi\left( E\right) \\
& =\left( \underbrace{g\left( \partial_{v}+\tau\left( v\right) \right)
}_{=g\partial_{v}+g\tau\left( v\right) }+\partial_{v}\left( g\right)
\right) \Xi\left( E\right) =\left( g\partial_{v}+g\tau\left( v\right)
+\partial_{v}\left( g\right) \right) \Xi\left( E\right) .
\end{align*}
On the other hand, Condition 2 (applied to $v$ instead of $a$) yields
$\varsigma\left( \partial_{v}\right) =\partial_{v}+\tau\left( v\right) $,
so that%
\begin{align*}
\varsigma\left( \underbrace{a}_{=\partial_{v}}\right) \varsigma\left(
\underbrace{b}_{=gE=g\otimes E}\right) & =\underbrace{\varsigma\left(
\partial_{v}\right) }_{=\partial_{v}+\tau\left( v\right) }%
\underbrace{\varsigma\left( g\otimes E\right) }_{\substack{=g\Xi\left(
E\right) \\\text{(by the definition of }\varsigma\text{)}}}\\
& =\left( \partial_{v}+\tau\left( v\right) \right) g\Xi\left( E\right)
=\underbrace{\partial_{v}g}_{=g\partial_{v}+\partial_{v}\left( g\right) }%
\Xi\left( E\right) +\underbrace{\tau\left( v\right) g}_{=g\tau\left(
v\right) }\Xi\left( E\right) \\
& =g\partial_{v}\Xi\left( E\right) +\partial_{v}\left( g\right)
\Xi\left( E\right) +g\tau\left( v\right) \Xi\left( E\right) \\
& =\left( g\partial_{v}+\partial_{v}\left( g\right) +g\tau\left(
v\right) \right) \Xi\left( E\right) =\left( g\partial_{v}+g\tau\left(
v\right) +\partial_{v}\left( g\right) \right) \Xi\left( E\right) \\
& =\varsigma\left( ab\right) .
\end{align*}
Hence, (\ref{p10.2.18b.3}) is proven in Case 2.
So we have proven (\ref{p10.2.18b.3}) in each of the Cases 1 and 2. Since
Cases 1 and 2 are the only possible cases, this yields that (\ref{p10.2.18b.3}%
) always holds.
Thus, Lemma 2.18a (applied to $A=\mathcal{D}\left( U\right) $,
$D=\mathcal{D}\left( U\right) $ and $f=\varsigma$) yields that $\varsigma$
is a $\mathbb{C}$-algebra homomorphism. Hence, $\varsigma$ is a $\mathbb{C}%
$-algebra homomorphism satisfying Conditions 1 and 2. We thus have verified
the existence of a $\mathbb{C}$-algebra homomorphism $\varsigma:\mathcal{D}%
\left( U\right) \rightarrow\mathcal{D}\left( U\right) $ satisfying
Conditions 1 and 2. This completes the proof of Lemma 2.18b.
\textbf{Corollary 2.18c.} Let $\mathfrak{h}$ be a $\mathbb{C}$-vector space.
Let $\mathcal{S}$ be a finite set. For every $s\in\mathcal{S}$, let $c_{s}$ be
an element of $\mathbb{C}$ and let $\alpha_{s}$ be an element of
$\mathfrak{h}^{\ast}$. Let $\mathfrak{h}_{\operatorname*{reg}}$ be a
Zariski-dense open subset of $\mathfrak{h}$ such that every $a\in
\mathfrak{h}_{\operatorname*{reg}}$ and every $s\in\mathcal{S}$ satisfy
$\alpha_{s}\left( a\right) \neq0$. Then, there exists a unique $\mathbb{C}%
$-algebra homomorphism $\varsigma:\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) \rightarrow\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $ which satisfies the following two conditions:
\textit{Condition 1:} We have $\varsigma\left( f\right) =f$ for every
$f\in\mathbb{C}\left[ \mathfrak{h}_{\operatorname*{reg}}\right] $ (where
$\mathbb{C}\left[ \mathfrak{h}_{\operatorname*{reg}}\right] $ is canonically
embedded into $\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $).
\textit{Condition 2:} We have $\varsigma\left( \partial_{a}\right)
=\partial_{a}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
a\right) }{\alpha_{s}}$ for every $a\in\mathfrak{h}$.
\textit{Proof of Corollary 2.18c.} Let $V=\mathfrak{h}$ and $U=\mathfrak{h}%
_{\operatorname*{reg}}$. Define a $\mathbb{C}$-linear map $\tau:\mathfrak{h}%
\rightarrow\mathbb{C}\left[ \mathfrak{h}_{\operatorname*{reg}}\right] $ by
\[
\tau\left( a\right) =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha
_{s}\left( a\right) }{\alpha_{s}}\ \ \ \ \ \ \ \ \ \ \text{for every }%
a\in\mathfrak{h}.
\]
Then, obviously, Conditions 1 and 2 of Corollary 2.18c are equivalent to
Conditions 1 and 2 of Lemma 2.18b, respectively.
Every $a\in\mathfrak{h}$ and $b\in\mathfrak{h}$ satisfy%
\begin{align*}
& \left[ \partial_{a}+\tau\left( a\right) ,\partial_{b}+\tau\left(
b\right) \right] \\
& =\left[ \partial_{a}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha
_{s}\left( a\right) }{\alpha_{s}},\partial_{b}+\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}\alpha_{s}\left( b\right) }{\alpha_{s}}\right] \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\tau\left( a\right)
=\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( a\right) }%
{\alpha_{s}}\text{ and }\tau\left( b\right) =\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}\alpha_{s}\left( b\right) }{\alpha_{s}}\right) \\
& =\underbrace{\left[ \partial_{a},\partial_{b}\right] }_{=0}+\left[
\partial_{a},\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
b\right) }{\alpha_{s}}\right] +\underbrace{\left[ \sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( a\right) }{\alpha_{s}},\partial
_{b}\right] }_{=-\left[ \partial_{b},\sum\limits_{s\in\mathcal{S}}%
\dfrac{c_{s}\alpha_{s}\left( a\right) }{\alpha_{s}}\right] }%
+\underbrace{\left[ \sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha
_{s}\left( a\right) }{\alpha_{s}},\sum\limits_{s\in\mathcal{S}}\dfrac
{c_{s}\alpha_{s}\left( b\right) }{\alpha_{s}}\right] }_{=0}\\
& =\underbrace{\left[ \partial_{a},\sum\limits_{s\in\mathcal{S}}\dfrac
{c_{s}\alpha_{s}\left( b\right) }{\alpha_{s}}\right] }_{=\partial
_{a}\left( \sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
b\right) }{\alpha_{s}}\right) =\sum\limits_{s\in\mathcal{S}}c_{s}\alpha
_{s}\left( b\right) \cdot\partial_{a}\left( \dfrac{1}{\alpha_{s}}\right)
}-\underbrace{\left[ \partial_{b},\sum\limits_{s\in\mathcal{S}}\dfrac
{c_{s}\alpha_{s}\left( a\right) }{\alpha_{s}}\right] }_{=\partial
_{b}\left( \sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
a\right) }{\alpha_{s}}\right) =\sum\limits_{s\in\mathcal{S}}c_{s}\alpha
_{s}\left( a\right) \cdot\partial_{b}\left( \dfrac{1}{\alpha_{s}}\right)
}\\
& =\sum\limits_{s\in\mathcal{S}}c_{s}\alpha_{s}\left( b\right)
\cdot\underbrace{\partial_{a}\left( \dfrac{1}{\alpha_{s}}\right)
}_{\substack{=-\dfrac{\partial_{a}\left( \alpha_{s}\right) }{\alpha_{s}^{2}%
}=-\dfrac{\alpha_{s}\left( a\right) }{\alpha_{s}^{2}}\\\text{(since
}\partial_{a}\left( \alpha_{s}\right) =\alpha_{s}\left( a\right) \text{
(because }\alpha_{s}\\\text{is linear))}}}-\sum\limits_{s\in\mathcal{S}}%
c_{s}\alpha_{s}\left( a\right) \cdot\underbrace{\partial_{b}\left(
\dfrac{1}{\alpha_{s}}\right) }_{\substack{=-\dfrac{\partial_{b}\left(
\alpha_{s}\right) }{\alpha_{s}^{2}}=.\dfrac{\alpha_{s}\left( b\right)
}{\alpha_{s}^{2}}\\\text{(since }\partial_{b}\left( \alpha_{s}\right)
=\alpha_{s}\left( b\right) \text{ (because }\alpha_{s}\\\text{is linear))}%
}}\\
& =\sum\limits_{s\in\mathcal{S}}\underbrace{c_{s}\alpha_{s}\left( b\right)
\cdot\left( -\dfrac{\alpha_{s}\left( a\right) }{\alpha_{s}^{2}}\right)
}_{=\dfrac{-c_{s}\alpha_{s}\left( a\right) \alpha_{s}\left( b\right)
}{\alpha_{s}^{2}}}-\sum\limits_{s\in\mathcal{S}}\underbrace{c_{s}\alpha
_{s}\left( a\right) \cdot\left( -\dfrac{\alpha_{s}\left( b\right)
}{\alpha_{s}^{2}}\right) }_{=\dfrac{-c_{s}\alpha_{s}\left( a\right)
\alpha_{s}\left( b\right) }{\alpha_{s}^{2}}}\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( a\right)
\alpha_{s}\left( b\right) }{\alpha_{s}^{2}}-\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}\alpha_{s}\left( a\right) \alpha_{s}\left( b\right) }%
{\alpha_{s}^{2}}=0.
\end{align*}
Hence, Lemma 2.18b yields that there exists a unique $\mathbb{C}$-algebra
homomorphism $\varsigma:\mathcal{D}\left( U\right) \rightarrow
\mathcal{D}\left( U\right) $ which satisfies the Conditions 1 and 2 of Lemma
2.18b. In other words, there exists a unique $\mathbb{C}$-algebra homomorphism
$\varsigma:\mathcal{D}\left( U\right) \rightarrow\mathcal{D}\left(
U\right) $ which satisfies the Conditions 1 and 2 of Corollary 2.18c (because
we know that Conditions 1 and 2 of Corollary 2.18c are equivalent to
Conditions 1 and 2 of Lemma 2.18b, respectively). Corollary 2.18c is thus proven.
\textbf{Definition.} Let $\mathfrak{h}$ be a $\mathbb{C}$-vector space with a
nondegenerate bilinear inner product $\left( \cdot,\cdot\right) $. Let
$W\subseteq\operatorname*{O}\left( \mathfrak{h}\right) $ be a real
reflection group, and $\mathcal{S}\subseteq W$ the set of reflections. Let
$c:\mathcal{S}\rightarrow\mathbb{C}$ be a function invariant under conjugation
(by elements of $W$). For every $s\in\mathcal{S}$, we will write $c_{s}$ for
$c\left( s\right) $. For every $s\in\mathcal{S}$, let $\alpha_{s}%
\in\mathfrak{h}^{\ast}$ be the unique (up to scaling by an element of
$\mathbb{C}^{\times}$) nonzero eigenvector of $s$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$, and let $\alpha_{s}^{\vee}\in\mathfrak{h}$ be
the unique (up to scaling by an element of $\mathbb{C}^{\times}$) nonzero
eigenvector of $s$ (acting on $\mathfrak{h}$) with eigenvalue $-1$. Define $H$
as in Definition 2.8, and define $\overline{H}$ as in Proposition 2.16. Let
$\mathfrak{h}_{\operatorname*{reg}}$ be the subset $\left\{ x\in
\mathfrak{h}\ \mid\ W_{x}=\left\{ \operatorname*{id}\right\} \right\} $ of
$\mathfrak{h}$. According to Corollary 2.18c, there exists a unique
$\mathbb{C}$-algebra homomorphism $\varsigma:\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) \rightarrow\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $ which satisfies the Conditions 1 and 2 of
Corollary 2.18c\footnote{In fact, every $a\in\mathfrak{h}_{\operatorname*{reg}%
}$ and every $s\in\mathcal{S}$ satisfy $\alpha_{s}\left( a\right) \neq0$
(because otherwise, $a$ would be fixed under $s$, contradicting $W_{a}%
=\left\{ \operatorname*{id}\right\} $).}. This homomorphism $\varsigma$ will
be denoted by $\varsigma_{c}$. Due to Condition 1, it satisfies%
\begin{equation}
\varsigma_{c}\left( f\right) =f\ \ \ \ \ \ \ \ \ \ \text{for every }%
f\in\mathbb{C}\left[ \mathfrak{h}_{\operatorname*{reg}}\right]
\label{p10.2.18c.1}%
\end{equation}
(where $\mathbb{C}\left[ \mathfrak{h}_{\operatorname*{reg}}\right] $ is
canonically embedded into $\mathcal{D}\left( \mathfrak{h}%
_{\operatorname*{reg}}\right) $). Due to Condition 2, it satisfies%
\begin{equation}
\varsigma_{c}\left( \partial_{a}\right) =\partial_{a}+\sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( a\right) }{\alpha_{s}%
}\ \ \ \ \ \ \ \ \ \ \text{for every }a\in\mathfrak{h}. \label{p10.2.18c.2}%
\end{equation}
\textbf{Remark.} In terms of your Proposition 2.18, this homomorphism
$\varsigma_{c}$ is the conjugation by $\delta_{c}$ (that is, it is given by
$D\mapsto\delta_{c}^{-1}\circ D\circ\delta_{c}$). However, our definition of
$\varsigma_{c}$ was purely algebraic, while your $\delta_{c}$ is a
transcendental function (in general).
Now, our elementary version of Proposition 2.18 rewrites as follows:
\textbf{Proposition 2.18d.} We have $\varsigma_{c}\left( \overline{H}\right)
=H$.
Before we prove this, another lemma:
\textbf{Lemma 2.18e.} Let $\mathfrak{h}$ be a $\mathbb{C}$-vector space with a
nondegenerate bilinear inner product $\left( \cdot,\cdot\right) $. Let
$W\subseteq\operatorname*{O}\left( \mathfrak{h}\right) $ be a real
reflection group, and $\mathcal{S}\subseteq W$ the set of reflections. Let
$c:\mathcal{S}\rightarrow\mathbb{C}$ be a function invariant under conjugation
(by elements of $W$). For every $s\in\mathcal{S}$, we will write $c_{s}$ for
$c\left( s\right) $. For every $s\in\mathcal{S}$, let $\alpha_{s}%
\in\mathfrak{h}^{\ast}$ be the unique (up to scaling by an element of
$\mathbb{C}^{\times}$) nonzero eigenvector of $s$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$. Let $\mathfrak{h}_{\operatorname*{reg}}$ be
the subset $\left\{ x\in\mathfrak{h}\ \mid\ G_{x}=\left\{ \operatorname*{id}%
\right\} \right\} $ of $\mathfrak{h}$. Then:
\textbf{(a)} Every $t\in\mathcal{S}$ satisfies%
\[
t\left( \prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) =-\prod
\limits_{s\in\mathcal{S}}\alpha_{s}%
\]
(where $t\left( \prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) $ denotes
the action of $t\in W$ on $\prod\limits_{s\in\mathcal{S}}\alpha_{s}\in
S\left( \mathfrak{h}^{\ast}\right) $).
\textbf{(b)} We have%
\[
\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha
_{u}}=0.
\]
\textbf{(c)} We have%
\[
\sum\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}\left(
\alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}=\sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha
_{s}^{2}}.
\]
\textit{Proof of Lemma 2.18e.} Let us notice that
\begin{equation}
\text{if two }t\in\mathcal{S}\text{ and }s\in\mathcal{S}\text{ satisfy
}\operatorname*{Ker}\left( \alpha_{t}\right) \subseteq\operatorname*{Ker}%
\left( \alpha_{s}\right) \text{, then }t=s \label{p10.2.18e.1}%
\end{equation}
\footnote{\textit{Proof of (\ref{p10.2.18e.1}):} Let $t\in\mathcal{S}$ and
$s\in\mathcal{S}$ satisfy $\operatorname*{Ker}\left( \alpha_{t}\right)
\subseteq\operatorname*{Ker}\left( \alpha_{s}\right) $. Then,
$\operatorname*{Ker}\left( \alpha_{t}\right) =\operatorname*{Ker}\left(
\alpha_{s}\right) $ (since $\operatorname*{Ker}\left( \alpha_{t}\right) $
and $\operatorname*{Ker}\left( \alpha_{s}\right) $ are hyperplanes in
$\mathfrak{h}$, and thus have the same dimension).
\par
But $s$ is the reflection in the hyperplane $\operatorname*{Ker}\left(
\alpha_{s}\right) $ (because $s$ is a reflection, and $\alpha_{s}%
\in\mathfrak{h}^{\ast}$ is the unique (up to scaling by an element of
$\mathbb{C}^{\times}$) nonzero eigenvector of $s$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$). Similarly, $t$ is the reflection in the
hyperplane $\operatorname*{Ker}\left( \alpha_{t}\right) $. Thus,%
\[
s=\left( \text{the reflection in the hyperplane }%
\underbrace{\operatorname*{Ker}\left( \alpha_{s}\right) }%
_{=\operatorname*{Ker}\left( \alpha_{t}\right) }\right) =\left( \text{the
reflection in the hyperplane }\operatorname*{Ker}\left( \alpha_{t}\right)
\right) =t,
\]
qed.}. As a consequence, the polynomials $\alpha_{s}\in\mathbb{C}\left[
\mathfrak{h}\right] $ for $s\in\mathcal{S}$ are pairwise
coprime.\footnote{\textit{Proof.} Assume the contrary. Then, there exist two
distinct elements $t\in\mathcal{S}$ and $s\in\mathcal{S}$ such that the
polynomials $\alpha_{s}$ and $\alpha_{t}$ have a nontrivial common divisor.
Consider these $t$ and $s$. The polynomials $\alpha_{s}$ and $\alpha_{t}$ have
a nontrivial common divisor, but are both linear. Therefore, $\alpha_{s}$ and
$\alpha_{t}$ must be proportional to each other, i. e., there exists a
$\lambda\in\mathbb{C}^{\times}$ such that $\alpha_{s}=\lambda\alpha_{t}$.
Therefore, $\operatorname*{Ker}\left( \alpha_{t}\right) =\operatorname*{Ker}%
\left( \alpha_{s}\right) $, so that $t=s$ (by (\ref{p10.2.18e.1})),
contradicting the assumption that $t$ and $s$ be distinct. This contradiction
proves that our assumption was wrong, qed.}
Also, for every $t\in\mathcal{S}$ and every $s\in\mathcal{S}$, we have
$t\alpha_{s}\in\mathbb{C}^{\times}\alpha_{tst^{-1}}$%
\ \ \ \ \footnote{\textit{Proof.} Let $t\in\mathcal{S}$ and $s\in\mathcal{S}$.
Then, $\alpha_{s}$ is a nonzero eigenvector of $s$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$. Thus, $s\alpha_{s}=-1\alpha_{s}=-\alpha_{s}$,
so that $\left( tst^{-1}\right) \left( t\alpha_{s}\right)
=t\underbrace{s\alpha_{s}}_{=-\alpha_{s}}=-t\alpha_{s}$. In other words,
$t\alpha_{s}$ is an eigenvector of $tst^{-1}$ (acting on $\mathfrak{h}^{\ast}%
$) with eigenvalue $-1$. Also, $t\alpha_{s}\neq0$ (since $\alpha_{s}\neq0$).
Hence, $t\alpha_{s}$ is a nonzero eigenvector of $tst^{-1}$ (acting on
$\mathfrak{h}^{\ast}$) with eigenvalue $-1$. Thus, $t\alpha_{s}\in
\mathbb{C}^{\times}\alpha_{tst^{-1}}$ (because $\alpha_{tst^{-1}}$ is the
unique (up to scaling by an element of $\mathbb{C}^{\times}$) nonzero
eigenvector of $tst^{-1}$ (acting on $\mathfrak{h}^{\ast}$) with eigenvalue
$-1$), qed.}. In other words, for every $t\in\mathcal{S}$ and every
$s\in\mathcal{S}$, there exists a $\mu_{t,s}\in\mathbb{C}^{\times}$ such that%
\begin{equation}
t\alpha_{s}=\mu_{t,s}\alpha_{tst^{-1}}. \label{p10.2.18e.mu}%
\end{equation}
Consider these $\mu_{t,s}$.
\textbf{(a)} Let $t\in\mathcal{S}$. Then,%
\begin{align*}
t\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha
_{s}\right) & =\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\underbrace{\left( t\alpha_{s}\right) }_{=\mu_{t,s}\alpha_{tst^{-1}}}%
=\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}%
\underbrace{\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }%
\alpha_{tst^{-1}}}_{\substack{=\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\alpha_{s}\\\text{(because the map}\\\mathcal{S}\diagdown\left\{
t\right\} \rightarrow\mathcal{S}\diagdown\left\{ t\right\} ,\ s\mapsto
tst^{-1}\\\text{is a bijection)}}}\\
& =\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}%
\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}.
\end{align*}
Hence,%
\begin{align*}
t^{2}\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }%
\alpha_{s}\right) & =t\cdot\underbrace{t\left( \prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\right) }_{=\prod
\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}\prod
\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}}\\
& =\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}%
\cdot\underbrace{t\left( \prod\limits_{s\in\mathcal{S}}\alpha_{s}\right)
}_{=\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}%
\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}}=\left(
\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}\right)
^{2}\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}.
\end{align*}
Compared with $t^{2}\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\alpha_{s}\right) =\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\alpha_{s}$ (because $t$ is a reflection and thus satisfies
$t^{2}=\operatorname*{id}$), this yields $\left( \prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}\right) ^{2}\prod
\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}%
=\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}$. Since
$\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}$ is
nonzero, this yields $\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\mu_{t,s}\right) ^{2}=1$. Hence, $\prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}=1$ or $\prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}=-1$.
Let us first assume that $\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\mu_{t,s}=-1$. In this case,%
\begin{equation}
t\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha
_{s}\right) =\underbrace{\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\mu_{t,s}}_{=-1}\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\alpha_{s}=-\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\alpha_{s}. \label{p10.2.18e.2}%
\end{equation}
Now, $\operatorname*{Ker}\left( \alpha_{t}\right) \not \subseteq
\bigcup\limits_{s\in\mathcal{S};\ s\neq t}\operatorname*{Ker}\left(
\alpha_{s}\right) $\ \ \ \ \footnote{\textit{Proof.} Assume the contrary.
Then, $\operatorname*{Ker}\left( \alpha_{t}\right) \subseteq\bigcup
\limits_{s\in\mathcal{S};\ s\neq t}\operatorname*{Ker}\left( \alpha
_{s}\right) $. Since $\operatorname*{Ker}\left( \alpha_{t}\right) $ and
$\operatorname*{Ker}\left( \alpha_{s}\right) $ are vector subspaces of
$\mathfrak{h}$, this yields that there exists some $s\in\mathcal{S}$ such that
$s\neq t$ and $\operatorname*{Ker}\left( \alpha_{t}\right) \subseteq
\operatorname*{Ker}\left( \alpha_{s}\right) $ (because there is a well-known
linear-algebraic fact that if a vector subspace $U$ of a finite-dimensional
$\mathbb{C}$-vector space $V$ is a subset of the union $\bigcup\limits_{i\in
I}W_{i}$ of finitely many subspaces $W_{i}$ of $V$, then there exists some
$i\in I$ such that $U\subseteq W_{i}$). Consider this $s$. Then,
$\operatorname*{Ker}\left( \alpha_{t}\right) \subseteq\operatorname*{Ker}%
\left( \alpha_{s}\right) $, so that $t=s$ (by (\ref{p10.2.18e.1})),
contradicting $s\neq t$. This contradiction shows that our assumption was
wrong, qed.}. Hence, there exists a $p\in\operatorname*{Ker}\left( \alpha
_{t}\right) $ such that $p\notin\bigcup\limits_{s\in\mathcal{S};\ s\neq
t}\operatorname*{Ker}\left( \alpha_{s}\right) $. Pick such a $p$.
Now, $t$ is the reflection in the hyperplane $\operatorname*{Ker}\left(
\alpha_{t}\right) $ (because $t$ is a reflection, and $\alpha_{t}%
\in\mathfrak{h}^{\ast}$ is the unique (up to scaling by an element of
$\mathbb{C}^{\times}$) nonzero eigenvector of $t$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$). Thus, $\operatorname*{Ker}\left( \alpha
_{t}\right) =\left\{ \text{set of fixed points of }t\text{ in }%
\mathfrak{h}\right\} $.
Since $p\in\operatorname*{Ker}\left( \alpha_{t}\right) =\left\{ \text{set
of fixed points of }t\text{ in }\mathfrak{h}\right\} $, the point $p$ is
fixed under $t$, so that $tp=p$ and thus $t^{-1}p=p$. Thus,%
\[
\left( t\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\alpha_{s}\right) \right) \left( p\right) =\left( \prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\right)
\underbrace{\left( t^{-1}p\right) }_{=p}=\left( \prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\right) \left( p\right)
.
\]
Compared to%
\[
\underbrace{\left( t\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\alpha_{s}\right) \right) }_{\substack{=-\prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\\\text{(by
(\ref{p10.2.18e.2}))}}}\left( p\right) =-\left( \prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\right) \left( p\right)
,
\]
this yields $\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\alpha_{s}\right) \left( p\right) =-\left( \prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\right) \left( p\right)
$. Thus, $\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\alpha_{s}\right) \left( p\right) =0$. In other words, $\prod
\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}\left(
p\right) =0$. Hence, there exists some $s\in\mathcal{S}\diagdown\left\{
t\right\} $ such that $p\in\operatorname*{Ker}\left( \alpha_{s}\right) $.
In other words, $p\in\bigcup\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\operatorname*{Ker}\left( \alpha_{s}\right) =\bigcup
\limits_{s\in\mathcal{S};\ s\neq t}\operatorname*{Ker}\left( \alpha
_{s}\right) $, contradicting $p\notin\bigcup\limits_{s\in\mathcal{S};\ s\neq
t}\operatorname*{Ker}\left( \alpha_{s}\right) $.
This contradiction shows that our assumption (the assumption that
$\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}=-1$) was
wrong. So we don't have $\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\mu_{t,s}=-1$. Since we know that we have $\prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}=1$ or $\prod\limits_{s\in
\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}=-1$, this yields that
$\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\mu_{t,s}=1$.
But we know that $\alpha_{t}$ is an eigenvector of $t$ (acting on
$\mathfrak{h}^{\ast}$) with eigenvalue $-1$. Thus, $t\alpha_{t}=-1\alpha
_{t}=-\alpha_{t}$.
Now, $\prod\limits_{s\in\mathcal{S}}\alpha_{s}=\alpha_{t}\cdot\prod
\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\} }\alpha_{s}$, so that%
\begin{align*}
t\left( \prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) & =t\left(
\alpha_{t}\cdot\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\alpha_{s}\right) =\underbrace{t\alpha_{t}}_{=-\alpha_{t}}\cdot
\underbrace{t\left( \prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\alpha_{s}\right) }_{=\prod\limits_{s\in\mathcal{S}\diagdown\left\{
t\right\} }\mu_{t,s}\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\alpha_{s}}\\
& =-\underbrace{\prod\limits_{s\in\mathcal{S}\diagdown\left\{ t\right\}
}\mu_{t,s}}_{=1}\cdot\underbrace{\alpha_{t}\prod\limits_{s\in\mathcal{S}%
\diagdown\left\{ t\right\} }\alpha_{s}}_{=\prod\limits_{s\in\mathcal{S}%
}\alpha_{s}}=-\prod\limits_{s\in\mathcal{S}}\alpha_{s}.
\end{align*}
This proves Lemma 2.18e \textbf{(a)}.
\textbf{(b)} Let $P$ be the function%
\[
\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha
_{u}}\in\mathbb{C}\left[ \mathfrak{h}_{\operatorname*{reg}}\right] .
\]
Then, every $t\in\mathcal{S}$ satisfies%
\begin{align*}
tP & =t\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq
u}}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}%
\alpha_{u}}=\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq
u}}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\left(
t\alpha_{s}\right) \left( t\alpha_{u}\right) }=\sum\limits_{\substack{s\in
\mathcal{S};\ u\in\mathcal{S};\\s\neq u}}\dfrac{c_{s}c_{u}\left( t\alpha
_{s},t\alpha_{u}\right) }{\left( t\alpha_{s}\right) \left( t\alpha
_{u}\right) }\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }t\in\mathcal{S}\subseteq
W\subseteq\operatorname*{O}\left( \mathfrak{h}\right) \text{ and thus
}\left( \alpha_{s},\alpha_{u}\right) =\left( t\alpha_{s},t\alpha
_{u}\right) \right) \\
& =\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}\dfrac{c_{s}c_{u}\left( \mu_{t,s}\alpha_{tst^{-1}},\mu_{t,u}\alpha
_{tut^{-1}}\right) }{\mu_{t,s}\alpha_{tst^{-1}}\cdot\mu_{t,u}\alpha
_{tut^{-1}}}\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since (\ref{p10.2.18e.mu}) yields
}t\alpha_{s}=\mu_{t,s}\alpha_{tst^{-1}}\text{ and }t\alpha_{u}=\mu_{t,u}%
\alpha_{tut^{-1}}\right) \\
& =\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}\dfrac{c_{s}c_{u}\mu_{t,s}\mu_{t,u}\left( \alpha_{tst^{-1}},\alpha
_{tut^{-1}}\right) }{\mu_{t,s}\alpha_{tst^{-1}}\cdot\mu_{t,u}\alpha
_{tut^{-1}}}=\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq
u}}\dfrac{c_{s}c_{u}\left( \alpha_{tst^{-1}},\alpha_{tut^{-1}}\right)
}{\alpha_{tst^{-1}}\alpha_{tut^{-1}}}\\
& =\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}\dfrac{c_{tst^{-1}}c_{tut^{-1}}\left( \alpha_{tst^{-1}},\alpha_{tut^{-1}%
}\right) }{\alpha_{tst^{-1}}\alpha_{tut^{-1}}}\ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{since the function }c\text{ is invariant under}\\
\text{conjugation, and thus }c_{s}=c_{tst^{-1}}\text{ and }c_{u}=c_{tut^{-1}}%
\end{array}
\right) \\
& =\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha
_{u}}\\
& \ \ \ \ \ \ \ \ \ \ \left(
\begin{array}
[c]{c}%
\text{here, we substituted }\left( s,u\right) \text{ for }\left(
tst^{-1},tut^{-1}\right) \text{ in the sum, because the map}\\
\left\{ \left( s,u\right) \in\mathcal{S}\times\mathcal{S}\ \mid\ s\neq
u\right\} \rightarrow\left\{ \left( s,u\right) \in\mathcal{S}%
\times\mathcal{S}\ \mid\ s\neq u\right\} ,\ \left( s,u\right)
\mapsto\left( tst^{-1},tut^{-1}\right) \\
\text{is a bijection}%
\end{array}
\right) \\
& =P.
\end{align*}
Moreover, since $P=\sum\limits_{\substack{s\in\mathcal{S};\ u\in
\mathcal{S};\\s\neq u}}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right)
}{\alpha_{s}\alpha_{u}}$ and $\prod\limits_{s\in\mathcal{S}}\alpha_{s}%
=\prod\limits_{q\in\mathcal{S}}\alpha_{q}$, we have%
\begin{align*}
P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s} & =\sum
\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}}\dfrac
{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}%
\cdot\prod\limits_{q\in\mathcal{S}}\alpha_{q}=\sum\limits_{\substack{s\in
\mathcal{S};\ u\in\mathcal{S};\\s\neq u}}c_{s}c_{u}\left( \alpha_{s}%
,\alpha_{u}\right) \cdot\underbrace{\dfrac{\prod\limits_{q\in\mathcal{S}%
}\alpha_{q}}{\alpha_{s}\alpha_{u}}}_{=\prod\limits_{q\in\mathcal{S}%
\diagdown\left\{ s,u\right\} }\alpha_{q}}\\
& =\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}%
}c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) \cdot\prod\limits_{q\in
\mathcal{S}\diagdown\left\{ s,u\right\} }\alpha_{q}.
\end{align*}
This yields immediately that $P\cdot\prod\limits_{s\in\mathcal{S}}\alpha
_{s}\in\mathbb{C}\left[ \mathfrak{h}\right] $ and $\deg\left( P\cdot
\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) \leq\left\vert \mathcal{S}%
\right\vert -2$. Also, every $t\in\mathcal{S}$ satisfies%
\begin{equation}
t\left( P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right)
=\underbrace{tP}_{=P}\cdot\underbrace{t\left( \prod\limits_{s\in\mathcal{S}%
}\alpha_{s}\right) }_{\substack{=-\prod\limits_{s\in\mathcal{S}}\alpha
_{s}\\\text{(by Lemma 2.18e \textbf{(a)})}}}=-P\cdot\prod\limits_{s\in
\mathcal{S}}\alpha_{s}. \label{p10.2.18e.6}%
\end{equation}
Thus, for every $t\in\mathcal{S}$, we have $\alpha_{t}\mid P\cdot
\prod\limits_{s\in\mathcal{S}}\alpha_{s}$ in $\mathbb{C}\left[ \mathfrak{h}%
\right] $\ \ \ \ \footnote{\textit{Proof.} Let $t\in\mathcal{S}$. We know
that $t$ is the reflection in the hyperplane $\operatorname*{Ker}\left(
\alpha_{t}\right) $ (because $t$ is a reflection, and $\alpha_{t}%
\in\mathfrak{h}^{\ast}$ is the unique (up to scaling by an element of
$\mathbb{C}^{\times}$) nonzero eigenvector of $t$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$). Thus, $\operatorname*{Ker}\left( \alpha
_{t}\right) =\left\{ \text{set of fixed points of }t\text{ in }%
\mathfrak{h}\right\} $.
\par
Now, let $x\in\operatorname*{Ker}\left( \alpha_{t}\right) $. Then, $tx=x$
(because $x\in\operatorname*{Ker}\left( \alpha_{t}\right) =\left\{
\text{set of fixed points of }t\text{ in }\mathfrak{h}\right\} $) and thus
$t^{-1}x=x$, so that%
\[
\left( t\left( P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right)
\right) \left( x\right) =\left( P\cdot\prod\limits_{s\in\mathcal{S}}%
\alpha_{s}\right) \left( \underbrace{t^{-1}x}_{=x}\right) =\left(
P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) \left( x\right) .
\]
Compared to%
\[
\underbrace{\left( t\left( P\cdot\prod\limits_{s\in\mathcal{S}}\alpha
_{s}\right) \right) }_{\substack{=-P\cdot\prod\limits_{s\in\mathcal{S}%
}\alpha_{s}\\\text{(by (\ref{p10.2.18e.6}))}}}\left( x\right) =-\left(
P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) \left( x\right) ,
\]
this yields $\left( P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right)
\left( x\right) =-\left( P\cdot\prod\limits_{s\in\mathcal{S}}\alpha
_{s}\right) \left( x\right) $, so that $\left( P\cdot\prod\limits_{s\in
\mathcal{S}}\alpha_{s}\right) \left( x\right) =0$.
\par
Now forget that we fixed $x$. We thus have proven that every $x\in
\operatorname*{Ker}\left( \alpha_{t}\right) $ satisfies $\left( P\cdot
\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) \left( x\right) =0$. In
other words, the polynomial $P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}$
vanishes on the kernel of the linear function $\alpha_{t}$. Thus, $\alpha
_{t}\mid P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}$ in $\mathbb{C}\left[
\mathfrak{h}\right] $ (because a polynomial which vanishes on the kernel of a
linear function must be divisible by that function), qed.}. Since the
polynomials $\alpha_{s}\in\mathbb{C}\left[ \mathfrak{h}\right] $ for
$s\in\mathcal{S}$ are pairwise coprime, this yields that $\prod\limits_{t\in
\mathcal{S}}\alpha_{t}\mid P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}$ in
$\mathbb{C}\left[ \mathfrak{h}\right] $ (because $\mathbb{C}\left[
\mathfrak{h}\right] $ is a unique factorization domain). Since $\deg\left(
P\cdot\prod\limits_{s\in\mathcal{S}}\alpha_{s}\right) \leq\left\vert
\mathcal{S}\right\vert -2<\left\vert \mathcal{S}\right\vert =\deg\left(
\prod\limits_{t\in\mathcal{S}}\alpha_{t}\right) $, this leads to $P\cdot
\prod\limits_{s\in\mathcal{S}}\alpha_{s}=0$ (because if a polynomial is
divisible by a polynomial of greater degree, then the former polynomial must
be $0$). Hence, $P=0$ (since $\mathbb{C}\left[ \mathfrak{h}%
_{\operatorname*{reg}}\right] $ is an integral domain, and $\prod
\limits_{s\in\mathcal{S}}\alpha_{s}\neq0$). Since $P=\sum
\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}}\dfrac
{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}$,
this rewrites as $\sum\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S}%
;\\s\neq u}}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }%
{\alpha_{s}\alpha_{u}}=0$. Lemma 2.18e \textbf{(b)} is proven.
\textbf{(c)} We have%
\begin{align*}
\sum\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}\left(
\alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}} & =\underbrace{\sum
\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s\neq u}}\dfrac
{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}%
}_{\substack{=0\\\text{(by Lemma 2.18e \textbf{(b)})}}}+\underbrace{\sum
\limits_{\substack{s\in\mathcal{S};\ u\in\mathcal{S};\\s=u}}\dfrac{c_{s}%
c_{u}\left( \alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}}%
_{=\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}\alpha_{s}}}\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}c_{s}\left( \alpha_{s}%
,\alpha_{s}\right) }{\alpha_{s}\alpha_{s}}=\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}},
\end{align*}
and thus Lemma 2.18e \textbf{(c)} is proven.
\textit{Proof of Proposition 2.18d.} Let $\left\{ y_{1},y_{2},...,y_{r}%
\right\} $ be an orthonormal basis of $\mathfrak{h}$. Then, by the definition
of the Laplace operator, $\Delta_{\mathfrak{h}}=\sum\limits_{i=1}^{r}%
\partial_{y_{i}}^{2}$.
For every $s\in\mathcal{S}$, we have%
\begin{equation}
\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) y_{i}=\dfrac{1}{2}\left(
\alpha_{s},\alpha_{s}\right) \alpha_{s}^{\vee}. \label{p10.2.18d.1}%
\end{equation}
\footnote{\textit{Proof of (\ref{p10.2.18d.1}):} Let $s\in\mathcal{S}$. Then,
the bilinear form $\left( \cdot,\cdot\right) $ is $W$-invariant (because
$W\subseteq\operatorname*{O}\left( \mathfrak{h}\right) $).
\par
Since the bilinear form $\left( \cdot,\cdot\right) $ is nondegenerate, it
induces an isomorphism $J:\mathfrak{h}^{\ast}\rightarrow\mathfrak{h}$. This
isomorphism $J$ is $W$-linear (since $\left( \cdot,\cdot\right) $ is
$W$-invariant). Also, it satisfies%
\[
J\left( \varphi\right) =\sum\limits_{i=1}^{r}\varphi\left( y_{i}\right)
y_{i}\ \ \ \ \ \ \ \ \ \ \text{for every }\varphi\in\mathfrak{h}^{\ast}%
\]
(since $\left\{ y_{1},y_{2},...,y_{r}\right\} $ is an orthonormal basis of
$\mathfrak{h}$). Applied to $\varphi=\alpha_{s}$, this yields%
\[
J\left( \alpha_{s}\right) =\sum\limits_{i=1}^{r}\alpha_{s}\left(
y_{i}\right) y_{i}.
\]
But since $\alpha_{s}$ is an eigenvector of $s$ (acting on $\mathfrak{h}%
^{\ast}$) with eigenvalue $-1$, we have $s\alpha_{s}=-1\alpha_{s}=-\alpha_{s}%
$. Thus, $J\left( s\alpha_{s}\right) =J\left( -\alpha_{s}\right)
=-J\left( \alpha_{s}\right) $. Compared with $J\left( s\alpha_{s}\right)
=sJ\left( \alpha_{s}\right) $ (since $J$ is $W$-linear), this yields
$sJ\left( \alpha_{s}\right) =-J\left( \alpha_{s}\right) =-1J\left(
\alpha_{s}\right) $. In other words, $J\left( \alpha_{s}\right) $ is an
eigenvector of $s$ (acting on $\mathfrak{h}$) with eigenvalue $-1$. This
yields that $J\left( \alpha_{s}\right) \in\mathbb{C}\alpha_{s}^{\vee}$
(because $\alpha_{s}^{\vee}\in\mathfrak{h}$ is the unique (up to scaling by an
element of $\mathbb{C}^{\times}$) nonzero eigenvector of $s$ (acting on
$\mathfrak{h}$) with eigenvalue $-1$). In other words, there exists a
$\lambda\in\mathbb{C}$ such that $J\left( \alpha_{s}\right) =\lambda
\alpha_{s}^{\vee}$. We now will prove that $\lambda=\dfrac{1}{2}\left(
\alpha_{s},\alpha_{s}\right) $.
\par
In fact, from $J\left( \alpha_{s}\right) =\sum\limits_{i=1}^{r}\alpha
_{s}\left( y_{i}\right) y_{i}$, we deduce that%
\[
\left\langle \alpha_{s},J\left( \alpha_{s}\right) \right\rangle
=\left\langle \alpha_{s},\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right)
y_{i}\right\rangle =\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right)
\underbrace{\left\langle \alpha_{s},y_{i}\right\rangle }_{=\alpha_{s}\left(
y_{i}\right) }=\sum\limits_{i=1}^{r}\left( \alpha_{s}\left( y_{i}\right)
\right) ^{2}=\left( \alpha_{s},\alpha_{s}\right)
\]
(since $\left\{ y_{1},y_{2},...,y_{r}\right\} $ is an orthonormal basis of
$\mathfrak{h}$). Compared with%
\[
\left\langle \alpha_{s},\underbrace{J\left( \alpha_{s}\right) }%
_{=\lambda\alpha_{s}^{\vee}}\right\rangle =\lambda\underbrace{\left\langle
\alpha_{s},\alpha_{s}^{\vee}\right\rangle }_{=2}=2\lambda,
\]
this yields $2\lambda=\left( \alpha_{s},\alpha_{s}\right) $, so that
$\lambda=\dfrac{1}{2}\left( \alpha_{s},\alpha_{s}\right) $. Now,%
\[
\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) y_{i}=J\left( \alpha
_{s}\right) =\underbrace{\lambda}_{=\dfrac{1}{2}\left( \alpha_{s},\alpha
_{s}\right) }\alpha_{s}^{\vee}=\dfrac{1}{2}\left( \alpha_{s},\alpha
_{s}\right) \alpha_{s}^{\vee}.
\]
This proves (\ref{p10.2.18d.1}).} Thus, for every $s\in\mathcal{S}$, we have%
\begin{align}
\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \partial_{y_{i}} &
=\partial_{\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) y_{i}%
}=\partial_{\dfrac{1}{2}\left( \alpha_{s},\alpha_{s}\right) \alpha_{s}%
^{\vee}}\ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{p10.2.18d.1})}\right)
\nonumber\\
& =\dfrac{1}{2}\left( \alpha_{s},\alpha_{s}\right) \partial_{\alpha
_{s}^{\vee}}. \label{p10.2.18d.2}%
\end{align}
Also, for every $s\in\mathcal{S}$ and $g\in\mathfrak{h}^{\ast}$, we have%
\begin{equation}
\left( \alpha_{s},\alpha_{s}\right) g\left( \alpha_{s}^{\vee}\right)
=2\left( \alpha_{s},g\right) . \label{p10.2.18d.3}%
\end{equation}
\footnote{\textit{Proof.} Let $s\in\mathcal{S}$ and $g\in\mathfrak{h}^{\ast}$.
Then, (\ref{p10.2.18d.1}) yields $\dfrac{1}{2}\left( \alpha_{s},\alpha
_{s}\right) \alpha_{s}^{\vee}=\sum\limits_{i=1}^{r}\alpha_{s}\left(
y_{i}\right) y_{i}$, so that $\left( \alpha_{s},\alpha_{s}\right)
\alpha_{s}^{\vee}=2\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) y_{i}%
$, and thus%
\[
g\left( \left( \alpha_{s},\alpha_{s}\right) \alpha_{s}^{\vee}\right)
=g\left( 2\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) y_{i}\right)
=2\underbrace{\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) g\left(
y_{i}\right) }_{\substack{=\left( \alpha_{s},g\right) \\\text{(since
}\left\{ y_{1},y_{2},...,y_{r}\right\} \text{ is an}\\\text{orthonormal
basis of }\mathfrak{h}\text{)}}}=2\left( \alpha_{s},g\right) .
\]
Since $g\left( \left( \alpha_{s},\alpha_{s}\right) \alpha_{s}^{\vee
}\right) =\left( \alpha_{s},\alpha_{s}\right) g\left( \alpha_{s}^{\vee
}\right) $, this rewrites as $\left( \alpha_{s},\alpha_{s}\right) g\left(
\alpha_{s}^{\vee}\right) =2\left( \alpha_{s},g\right) $. This proves
(\ref{p10.2.18d.3}).}
On the other hand, from $\Delta_{\mathfrak{h}}=\sum\limits_{i=1}^{r}%
\partial_{y_{i}}^{2}$, we obtain%
\begin{align*}
\varsigma_{c}\left( \Delta_{\mathfrak{h}}\right) & =\varsigma_{c}\left(
\sum\limits_{i=1}^{r}\partial_{y_{i}}^{2}\right) =\sum\limits_{i=1}%
^{r}\left( \underbrace{\varsigma_{c}\left( \partial_{y_{i}}\right)
}_{\substack{=\partial_{y_{i}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}%
\alpha_{s}\left( y_{i}\right) }{\alpha_{s}}\\\text{(by (\ref{p10.2.18c.2}),
applied to }a=y_{i}\text{)}}}\right) ^{2}\\
& \ \ \ \ \ \ \ \ \ \ \left( \text{since }\varsigma_{c}\text{ is a
}\mathbb{C}\text{-algebra homomorphism}\right) \\
& =\sum\limits_{i=1}^{r}\underbrace{\left( \partial_{y_{i}}+\sum
\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right) }%
{\alpha_{s}}\right) ^{2}}_{=\partial_{y_{i}}^{2}+\partial_{y_{i}}%
\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right)
}{\alpha_{s}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
y_{i}\right) }{\alpha_{s}}\partial_{y_{i}}+\left( \sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right) }{\alpha_{s}}\right)
^{2}}\\
& =\sum\limits_{i=1}^{r}\left( \partial_{y_{i}}^{2}+\partial_{y_{i}}%
\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right)
}{\alpha_{s}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
y_{i}\right) }{\alpha_{s}}\partial_{y_{i}}+\left( \sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right) }{\alpha_{s}}\right)
^{2}\right) \\
& =\underbrace{\sum\limits_{i=1}^{r}\partial_{y_{i}}^{2}}_{=\Delta
_{\mathfrak{h}}}+\underbrace{\sum\limits_{i=1}^{r}\partial_{y_{i}}%
\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right)
}{\alpha_{s}}}_{=\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}^{r}%
\alpha_{s}\left( y_{i}\right) \partial_{y_{i}}\dfrac{1}{\alpha_{s}}}\\
& \ \ \ \ \ \ \ \ \ \ +\underbrace{\sum\limits_{i=1}^{r}\sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right) }{\alpha_{s}}%
\partial_{y_{i}}}_{=\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}%
^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}\partial_{y_{i}}%
}+\sum\limits_{i=1}^{r}\underbrace{\left( \sum\limits_{s\in\mathcal{S}}%
\dfrac{c_{s}\alpha_{s}\left( y_{i}\right) }{\alpha_{s}}\right) ^{2}}%
_{=\sum\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}\alpha
_{s}\left( y_{i}\right) }{\alpha_{s}}\cdot\dfrac{c_{u}\alpha_{u}\left(
y_{i}\right) }{\alpha_{u}}}\\
& =\Delta_{\mathfrak{h}}+\sum\limits_{s\in\mathcal{S}}c_{s}\sum
\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \underbrace{\partial_{y_{i}%
}\dfrac{1}{\alpha_{s}}}_{=\dfrac{1}{\alpha_{s}}\partial_{y_{i}}+\partial
_{y_{i}}\left( \dfrac{1}{\alpha_{s}}\right) }\\
& \ \ \ \ \ \ \ \ \ \ +\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}%
^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}\partial_{y_{i}%
}+\underbrace{\sum\limits_{i=1}^{r}\sum\limits_{s\in\mathcal{S};\ u\in
\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left( y_{i}\right) }{\alpha_{s}}%
\cdot\dfrac{c_{u}\alpha_{u}\left( y_{i}\right) }{\alpha_{u}}}_{=\sum
\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}}{\alpha_{s}%
\alpha_{u}}\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \alpha
_{u}\left( y_{i}\right) }%
\end{align*}%
\begin{align*}
& =\Delta_{\mathfrak{h}}+\underbrace{\sum\limits_{s\in\mathcal{S}}c_{s}%
\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \left( \dfrac{1}%
{\alpha_{s}}\partial_{y_{i}}+\partial_{y_{i}}\left( \dfrac{1}{\alpha_{s}%
}\right) \right) }_{=\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}%
^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}\partial_{y_{i}}%
+\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}^{r}\alpha_{s}\left(
y_{i}\right) \partial_{y_{i}}\left( \dfrac{1}{\alpha_{s}}\right) }\\
& \ \ \ \ \ \ \ \ \ \ +\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}%
^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}\partial_{y_{i}}%
+\sum\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}}{\alpha
_{s}\alpha_{u}}\underbrace{\sum\limits_{i=1}^{r}\alpha_{s}\left(
y_{i}\right) \alpha_{u}\left( y_{i}\right) }_{\substack{=\left( \alpha
_{s},\alpha_{u}\right) \\\text{(since }\left\{ y_{1},y_{2},...,y_{r}%
\right\} \text{ is an orthonormal}\\\text{basis of }\mathfrak{h}\text{) }}}\\
& =\Delta_{\mathfrak{h}}+\sum\limits_{s\in\mathcal{S}}c_{s}\sum
\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}%
\partial_{y_{i}}+\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}^{r}%
\alpha_{s}\left( y_{i}\right) \partial_{y_{i}}\left( \dfrac{1}{\alpha_{s}%
}\right) \\
& \ \ \ \ \ \ \ \ \ \ +\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}%
^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}\partial_{y_{i}}%
+\sum\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}}{\alpha
_{s}\alpha_{u}}\left( \alpha_{s},\alpha_{u}\right)
\end{align*}%
\begin{align*}
& =\Delta_{\mathfrak{h}}+2\sum\limits_{s\in\mathcal{S}}c_{s}\underbrace{\sum
\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \dfrac{1}{\alpha_{s}}%
\partial_{y_{i}}}_{=\dfrac{1}{\alpha_{s}}\sum\limits_{i=1}^{r}\alpha
_{s}\left( y_{i}\right) \partial_{y_{i}}}+\sum\limits_{s\in\mathcal{S}}%
c_{s}\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right) \underbrace{\partial
_{y_{i}}\left( \dfrac{1}{\alpha_{s}}\right) }_{\substack{=-\dfrac
{\partial_{y_{i}}\left( \alpha_{s}\right) }{\alpha_{s}^{2}}=-\dfrac
{\alpha_{s}\left( y_{i}\right) }{\alpha_{s}^{2}}\\\text{(since }%
\partial_{y_{i}}\left( \alpha_{s}\right) =\alpha_{s}\left( y_{i}\right)
\text{ (because }\alpha_{s}\\\text{is linear))}}}\\
& \ \ \ \ \ \ \ \ \ \ +\underbrace{\sum\limits_{s\in\mathcal{S}%
;\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}}{\alpha_{s}\alpha_{u}}\left( \alpha
_{s},\alpha_{u}\right) }_{\substack{=\sum\limits_{s\in\mathcal{S}%
;\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}\left( \alpha_{s},\alpha_{u}\right)
}{\alpha_{s}\alpha_{u}}=\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}^{2}\left(
\alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}\\\text{(by Lemma 2.18e
\textbf{(c)})}}}\\
& =\Delta_{\mathfrak{h}}+2\sum\limits_{s\in\mathcal{S}}c_{s}\dfrac{1}%
{\alpha_{s}}\underbrace{\sum\limits_{i=1}^{r}\alpha_{s}\left( y_{i}\right)
\partial_{y_{i}}}_{\substack{=\dfrac{1}{2}\left( \alpha_{s},\alpha
_{s}\right) \partial_{\alpha_{s}^{\vee}}\\\text{(by (\ref{p10.2.18d.2}))}%
}}+\underbrace{\sum\limits_{s\in\mathcal{S}}c_{s}\sum\limits_{i=1}^{r}%
\alpha_{s}\left( y_{i}\right) \left( -\dfrac{\alpha_{s}\left(
y_{i}\right) }{\alpha_{s}^{2}}\right) }_{=-\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}}{\alpha_{s}^{2}}\sum\limits_{i=1}^{r}\left( \alpha_{s}\left(
y_{i}\right) \right) ^{2}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}%
^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}%
\end{align*}%
\begin{align}
& =\Delta_{\mathfrak{h}}+\underbrace{2\sum\limits_{s\in\mathcal{S}}%
c_{s}\dfrac{1}{\alpha_{s}}\cdot\dfrac{1}{2}\left( \alpha_{s},\alpha
_{s}\right) \partial_{\alpha_{s}^{\vee}}}_{=\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}}%
\partial_{\alpha_{s}^{\vee}}}-\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}%
}{\alpha_{s}^{2}}\underbrace{\sum\limits_{i=1}^{r}\left( \alpha_{s}\left(
y_{i}\right) \right) ^{2}}_{\substack{=\left( \alpha_{s},\alpha_{s}\right)
\\\text{(since }\left\{ y_{1},y_{2},...,y_{r}\right\} \text{ is an}\\\text{
orthonormal basis of }\mathfrak{h}\text{) }}}+\sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}%
}\nonumber\\
& =\Delta_{\mathfrak{h}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left(
\alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial_{\alpha_{s}^{\vee}}%
-\sum\limits_{s\in\mathcal{S}}\underbrace{\dfrac{c_{s}}{\alpha_{s}^{2}}\left(
\alpha_{s},\alpha_{s}\right) }_{=\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}^{2}}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}%
^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}\nonumber\\
& =\Delta_{\mathfrak{h}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left(
\alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial_{\alpha_{s}^{\vee}}%
-\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}^{2}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}%
^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}.
\label{p10.2.18d.6}%
\end{align}
On the other hand, every $t\in\mathcal{S}$ satisfies%
\begin{align}
\varsigma_{c}\left( \partial_{\alpha_{t}^{\vee}}\right) & =\partial
_{a_{t}^{\vee}}+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\alpha_{s}\left(
\alpha_{t}^{\vee}\right) }{\alpha_{s}}\ \ \ \ \ \ \ \ \ \ \left( \text{by
(\ref{p10.2.18c.2}), applied to }a=\alpha_{t}^{\vee}\right) \nonumber\\
& =\partial_{a_{t}^{\vee}}+\sum\limits_{u\in\mathcal{S}}\dfrac{c_{u}%
\alpha_{u}\left( \alpha_{t}^{\vee}\right) }{\alpha_{u}}%
\ \ \ \ \ \ \ \ \ \ \left( \text{here, we renamed }s\text{ as }u\text{ in the
sum}\right) . \label{p10.2.18d.7}%
\end{align}
Now,%
\begin{align}
& \varsigma_{c}\left( \sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left(
\alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial_{\alpha_{s}^{\vee}}\right)
\nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\underbrace{\varsigma_{c}\left( \partial
_{\alpha_{s}^{\vee}}\right) }_{\substack{=\partial_{a_{s}^{\vee}}%
+\sum\limits_{u\in\mathcal{S}}\dfrac{c_{u}\alpha_{u}\left( \alpha_{s}^{\vee
}\right) }{\alpha_{u}}\\\text{(by (\ref{p10.2.18d.7}), applied to
}t=s\text{)}}}=\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha
_{s},\alpha_{s}\right) }{\alpha_{s}}\left( \partial_{a_{s}^{\vee}}%
+\sum\limits_{u\in\mathcal{S}}\dfrac{c_{u}\alpha_{u}\left( \alpha_{s}^{\vee
}\right) }{\alpha_{u}}\right) \nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\partial_{a_{s}^{\vee}}+\underbrace{\sum
\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha_{s}\right)
}{\alpha_{s}}\sum\limits_{u\in\mathcal{S}}\dfrac{c_{u}\alpha_{u}\left(
\alpha_{s}^{\vee}\right) }{\alpha_{u}}}_{=\sum\limits_{s\in\mathcal{S}%
;\ u\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha_{s}\right) }%
{\alpha_{s}}\cdot\dfrac{c_{u}\alpha_{u}\left( \alpha_{s}^{\vee}\right)
}{\alpha_{u}}}\nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\partial_{a_{s}^{\vee}}+\sum\limits_{s\in
\mathcal{S};\ u\in\mathcal{S}}\underbrace{\dfrac{c_{s}\left( \alpha
_{s},\alpha_{s}\right) }{\alpha_{s}}\cdot\dfrac{c_{u}\alpha_{u}\left(
\alpha_{s}^{\vee}\right) }{\alpha_{u}}}_{=\dfrac{c_{s}c_{u}}{\alpha_{s}%
\alpha_{u}}\left( \alpha_{s},\alpha_{s}\right) \alpha_{u}\left( \alpha
_{s}^{\vee}\right) }\nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\partial_{a_{s}^{\vee}}+\sum\limits_{s\in
\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}}{\alpha_{s}\alpha_{u}%
}\underbrace{\left( \alpha_{s},\alpha_{s}\right) \alpha_{u}\left(
\alpha_{s}^{\vee}\right) }_{\substack{=2\left( \alpha_{s},\alpha_{u}\right)
\\\text{(by (\ref{p10.2.18d.3}), applied to }g=\alpha_{u}\text{)}}}\nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\partial_{a_{s}^{\vee}}+2\sum\limits_{s\in
\mathcal{S};\ u\in\mathcal{S}}\underbrace{\dfrac{c_{s}c_{u}}{\alpha_{s}%
\alpha_{u}}\left( \alpha_{s},\alpha_{u}\right) }_{=\dfrac{c_{s}c_{u}\left(
\alpha_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}}\nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\partial_{a_{s}^{\vee}}+2\underbrace{\sum
\limits_{s\in\mathcal{S};\ u\in\mathcal{S}}\dfrac{c_{s}c_{u}\left( \alpha
_{s},\alpha_{u}\right) }{\alpha_{s}\alpha_{u}}}_{\substack{=\sum
\limits_{s\in\mathcal{S}}\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right)
}{\alpha_{s}^{2}}\\\text{(by Lemma 2.18e \textbf{(c)})}}}\nonumber\\
& =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}}\partial_{a_{s}^{\vee}}+2\sum\limits_{s\in
\mathcal{S}}\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha
_{s}^{2}}. \label{p10.2.18d.8}%
\end{align}
Now, $\overline{H}=\Delta_{\mathfrak{h}}-\sum\limits_{s\in\mathcal{S}}%
\dfrac{c_{s}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial
_{\alpha_{s}^{\vee}}$, so that
\begin{align*}
\varsigma_{c}\left( \overline{H}\right) & =\varsigma_{c}\left(
\Delta_{\mathfrak{h}}-\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left(
\alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial_{\alpha_{s}^{\vee}%
}\right) =\varsigma_{c}\left( \Delta_{\mathfrak{h}}\right) -\varsigma
_{c}\left( \sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha_{s}%
,\alpha_{s}\right) }{\alpha_{s}}\partial_{\alpha_{s}^{\vee}}\right) \\
& =\left( \Delta_{\mathfrak{h}}+\sum\limits_{s\in\mathcal{S}}\dfrac
{c_{s}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial_{\alpha
_{s}^{\vee}}-\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( \alpha
_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}+\sum\limits_{s\in\mathcal{S}}%
\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}\right)
\\
& \ \ \ \ \ \ \ \ \ \ -\left( \sum\limits_{s\in\mathcal{S}}\dfrac
{c_{s}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}}\partial_{a_{s}%
^{\vee}}+2\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}^{2}\left( \alpha
_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}\right) \\
& \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{p10.2.18d.6}) and
(\ref{p10.2.18d.8})}\right) \\
& =\Delta_{\mathfrak{h}}-\underbrace{\left( \sum\limits_{s\in\mathcal{S}%
}\dfrac{c_{s}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}%
+\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha
_{s}\right) }{\alpha_{s}^{2}}\right) }_{=\sum\limits_{s\in\mathcal{S}%
}\left( \dfrac{c_{s}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}%
}+\dfrac{c_{s}^{2}\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}%
}\right) =\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left( c_{s}+1\right)
\left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}}\\
& =\Delta_{\mathfrak{h}}-\sum\limits_{s\in\mathcal{S}}\dfrac{c_{s}\left(
c_{s}+1\right) \left( \alpha_{s},\alpha_{s}\right) }{\alpha_{s}^{2}}=H.
\end{align*}
This proves Proposition 2.18d.
\textit{Proof of Theorem 2.9.} The $\mathbb{C}$-algebra homomorphism
$\varsigma_{c}:\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right)
\rightarrow\mathcal{D}\left( \mathfrak{h}_{\operatorname*{reg}}\right) $
preserves the degree of homogeneous differential operators and their symbols
and commutes with the action of $W$ by conjugation (these facts all are easy
to prove), and maps $\overline{H}$ to $H$ (by Proposition 2.18d). Hence,
applying $\varsigma_{c}$ to Corollary 2.17, we obtain Theorem 2.9 (at least,
if we add to Corollary 2.17 the claim that the $\overline{L}_{j}$ are
$W$-invariant, as I suggested above), with the $L_{j}$ being given by
$L_{j}=\varsigma_{c}\left( \overline{L}_{j}\right) $.
Theorem 2.9 is thus proven, and with it Theorem 2.1.
\item \textbf{Page 10, Remark 2.20:} It would be better to replace ``$L_{i}$''
by ``$\overline{L}_{i}$'' here.
\item[...] [To be continued?]
\item \textbf{Page 12, Example 2.25:} In this example, you regard
$\mathfrak{h}$ as being embedded into $\mathbb{C}^{n}$ as the subspace
consisting of the vectors whose coordinates sum to zero. (This is the same
embedding as in Example 2.5.) The $p_{i}$ are the same as in Example 2.5. The
$x_{i}$ (for each $i\in\left\{ 1,2,\ldots,n\right\} $) is the linear map
sending each element of $\mathfrak{h}$ to its $i$-th coordinate. This all is
worth pointing out explicitly, since it is far from obvious.
\end{itemize}
\protect\begin{noncompile}
\section*{Section 3}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 4}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 5}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 6}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 7}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 8}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 9}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\section*{Section 10}
\begin{itemize}
\item \textbf{.}
\end{itemize}
\end{noncompile}
\end{document}