\documentclass[numbers=enddot,12pt,final,onecolumn,notitlepage]{scrartcl}%
\usepackage{amsfonts}
\usepackage[headsepline,footsepline,manualmark]{scrlayer-scrpage}
\usepackage[all,cmtip]{xy}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{framed}
\usepackage{comment}
\usepackage{color}
\usepackage{hyperref}
\usepackage{ifthen}
\usepackage[sc]{mathpazo}
\usepackage[T1]{fontenc}
\usepackage{needspace}
\usepackage{tabls}
\usepackage{tikz}
\usepackage{enumitem}
\usepackage{graphicx}
\usepackage{cancel}%
\setcounter{MaxMatrixCols}{30}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Thursday, April 09, 2026 22:19:45}
%TCIDATA{<META NAME="GraphicsSave" CONTENT="32">}
%TCIDATA{<META NAME="SaveForMode" CONTENT="1">}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
\usetikzlibrary{shapes.geometric, arrows.meta, fit, positioning, decorations.pathreplacing}
\newcounter{exer}
\newcounter{rmk}
\theoremstyle{definition}
\newtheorem{theo}{Theorem}
\newenvironment{theorem}[1][]
{\begin{theo}[#1]\begin{leftbar}}
{\end{leftbar}\end{theo}}
\newtheorem{lem}[theo]{Lemma}
\newenvironment{lemma}[1][]
{\begin{lem}[#1]\begin{leftbar}}
{\end{leftbar}\end{lem}}
\newtheorem{prop}[theo]{Proposition}
\newenvironment{proposition}[1][]
{\begin{prop}[#1]\begin{leftbar}}
{\end{leftbar}\end{prop}}
\newtheorem{defi}[theo]{Definition}
\newenvironment{definition}[1][]
{\begin{defi}[#1]\begin{leftbar}}
{\end{leftbar}\end{defi}}
\newtheorem{remk}[rmk]{Remark}
\newenvironment{remark}[1][]
{\begin{remk}[#1]\begin{leftbar}}
{\end{leftbar}\end{remk}}
\newtheorem{coro}[theo]{Corollary}
\newenvironment{corollary}[1][]
{\begin{coro}[#1]\begin{leftbar}}
{\end{leftbar}\end{coro}}
\newtheorem{conv}[theo]{Convention}
\newenvironment{condition}[1][]
{\begin{conv}[#1]\begin{leftbar}}
{\end{leftbar}\end{conv}}
\newtheorem{quest}[theo]{Question}
\newenvironment{algorithm}[1][]
{\begin{quest}[#1]\begin{leftbar}}
{\end{leftbar}\end{quest}}
\newtheorem{warn}[theo]{Warning}
\newenvironment{conclusion}[1][]
{\begin{warn}[#1]\begin{leftbar}}
{\end{leftbar}\end{warn}}
\newtheorem{conj}[theo]{Conjecture}
\newenvironment{conjecture}[1][]
{\begin{conj}[#1]\begin{leftbar}}
{\end{leftbar}\end{conj}}
\newtheorem{exam}[theo]{Example}
\newenvironment{example}[1][]
{\begin{exam}[#1]\begin{leftbar}}
{\end{leftbar}\end{exam}}
\newtheorem{exmp}[exer]{Exercise}
\newenvironment{exercise}[1][]
{\begin{exmp}[#1]\begin{leftbar}}
{\end{leftbar}\end{exmp}}
\newenvironment{statement}{\begin{quote}}{\end{quote}}
\iffalse
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\fi
\let\sumnonlimits\sum
\let\prodnonlimits\prod
\let\cupnonlimits\bigcup
\let\capnonlimits\bigcap
\renewcommand{\sum}{\sumnonlimits\limits}
\renewcommand{\prod}{\prodnonlimits\limits}
\renewcommand{\bigcup}{\cupnonlimits\limits}
\renewcommand{\bigcap}{\capnonlimits\limits}
\setlength\tablinesep{3pt}
\setlength\arraylinesep{3pt}
\setlength\extrarulesep{3pt}
\voffset=0cm
\hoffset=-0.7cm
\setlength\textheight{22.5cm}
\setlength\textwidth{15.5cm}
\newenvironment{verlong}{}{}
\newenvironment{vershort}{}{}
\newenvironment{noncompile}{}{}
\excludecomment{verlong}
\includecomment{vershort}
\excludecomment{noncompile}
\newcommand*\circled[1]{\tikz[baseline=(char.base)]{
            \node[shape=circle,draw,inner sep=2pt] (char) {#1};}}
\newcommand{\id}{\operatorname{id}}
\newcommand{\rev}{\operatorname{rev}}
\newcommand{\conncomp}{\operatorname{conncomp}}
\newcommand{\conn}{\operatorname{conn}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\CC}{\mathbb{C}}
\newcommand{\powset}[2][]{\ifthenelse{\equal{#2}{}}{\mathcal{P}\left(#1\right)}{\mathcal{P}_{#1}\left(#2\right)}}
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\tup}[1]{\left( #1 \right)}
\newcommand{\ive}[1]{\left[ #1 \right]}
\newcommand{\verts}[1]{\operatorname{V}\left( #1 \right)}
\newcommand{\edges}[1]{\operatorname{E}\left( #1 \right)}
\newcommand{\arcs}[1]{\operatorname{A}\left( #1 \right)}
\newcommand{\underbrack}[2]{\underbrace{#1}_{\substack{#2}}}
\newcommand{\are}{\ar@{-}}
\newcommand{\arebi}[1][]{\ar@{<-}@/_/[#1] \ar@/^/[#1]}
\newcommand{\iverson}[1]{\left[#1\right]}
\newcommand{\sur}{\operatorname{sur}}
\ihead{Math 4990 Fall 2017 (Darij Grinberg): handwritten notes 2017-11-07}
\ohead{page \thepage}
\cfoot{}
\begin{document}

\begin{center}
\textbf{Math 4990 Fall 2017 (Darij Grinberg):}

\textbf{handwritten notes from the lecture of 7 November 2017}

(digitized using Claude in 2026)

\textbf{Note:} This is \textbf{not} a polished text!
\end{center}

\subsection*{4.2. The Concept of a FPS (continued)}

Fix a commutative ring $K$. (For example, $K=\mathbb{Z}$ or $\mathbb{Q}$ or
$\mathbb{R}$ or $\mathbb{C}$.)

\begin{definition}
A \emph{formal power series} (short: \emph{FPS}) (in $1$ indeterminate $x$
over $K$) is a sequence $(a_{0},a_{1},a_{2},\ldots)\in K^{\infty}$ of elements
of $K$.
\end{definition}

This might answer the question \textquotedblleft what is an
FPS\textquotedblright, but does not explain what we can do with FPSs. For
example, why can we write $(a_{0},a_{1},a_{2},\ldots)$ as $a_{0}+a_{1}%
x+a_{2}x^{2}+\cdots$\thinspace? What is $x$?

\begin{definition}
\label{def.fps.ops}\ \ 

\begin{enumerate}
\item[\textbf{(a)}] The \emph{sum} of two FPS $(a_{0},a_{1},\ldots)$ and
$(b_{0},b_{1},\ldots)$ is the FPS%
\[
(a_{0},a_{1},\ldots)+(b_{0},b_{1},\ldots)=(a_{0}+b_{0},\,a_{1}+b_{1}%
,\,\ldots).
\]


The \emph{difference} of two FPS $(a_{0},a_{1},\ldots)$ and $(b_{0}%
,b_{1},\ldots)$ is the FPS%
\[
(a_{0},a_{1},\ldots)-(b_{0},b_{1},\ldots)=(a_{0}-b_{0},\,a_{1}-b_{1}%
,\,\ldots).
\]


\item[\textbf{(b)}] If $\lambda\in K$ and if $(a_{0},a_{1},\ldots)$ is a FPS,
then the FPS $\lambda(a_{0},a_{1},\ldots)$ is defined as $(\lambda
a_{0},\,\lambda a_{1},\,\ldots)$.

\item[\textbf{(c)}] The \emph{product} of two FPS $(a_{0},a_{1},\ldots)$ and
$(b_{0},b_{1},\ldots)$ is the FPS $(c_{0},c_{1},\ldots)$, where%
\begin{align}
c_{n}  &  =\sum_{i=0}^{n}a_{i}\,b_{n-i}=\sum_{\substack{i,j\in\mathbb{N}%
;\\i+j=n}}a_{i}b_{j}\\
&  =a_{0}b_{n}+a_{1}b_{n-1}+\cdots+a_{n}b_{0}\,.
\end{align}


\item[\textbf{(d)}] For each $a\in K$, define the FPS $\underline{a}%
=(a,0,0,0,\ldots)$. We'll soon just call it $a$, but for now let's use
$\underline{a}$.

\item[\textbf{(e)}] The set of all FPS is called $K[\![x]\!]$. (These are
double square brackets.)
\end{enumerate}
\end{definition}

We often use symbols with arrows above them (such as $\overrightarrow{a}$ or
$\overrightarrow{x}$) to denote FPS, since they are vectors (although of
infinite size).

\setcounter{theo}{0}

\begin{theorem}
\label{thm.fps.ring}The set $K[\![x]\!]$ (with the sum and product just
defined) is a commutative ring, with subring $K$ (if we regard each $a\in K$
as the FPS $\underline{a}$). Specifically, this means:

\begin{enumerate}
\item[\textbf{(a)}] Addition in $K[\![x]\!]$ is commutative and associative:
\begin{align*}
\overrightarrow{a}+\overrightarrow{b}  &  =\overrightarrow{b}%
+\overrightarrow{a};\\
\overrightarrow{a}+(\overrightarrow{b}+\overrightarrow{c})  &
=(\overrightarrow{a}+\overrightarrow{b})+\overrightarrow{c}.
\end{align*}


\item[\textbf{(b)}] Each FPS $\overrightarrow{a}$ satisfies $\underline{0}%
+\overrightarrow{a}=\overrightarrow{a}+\underline{0}=\overrightarrow{a}$\ .

\item[\textbf{(c)}] Multiplication in $K[\![x]\!]$ is commutative and
associative:
\begin{align*}
\overrightarrow{a}\,\overrightarrow{b}  &  =\overrightarrow{b}%
\,\overrightarrow{a};\\
\overrightarrow{a}\ (\overrightarrow{b}\,\overrightarrow{c})  &
=(\overrightarrow{a}\,\overrightarrow{b})\ \overrightarrow{c}.
\end{align*}


\item[\textbf{(d)}] Each FPS $\overrightarrow{a}$ satisfies $\underline{1}%
\,\overrightarrow{a}=\overrightarrow{a}\,\underline{1}=\overrightarrow{a}$.

\item[\textbf{(e)}] Each FPS $\overrightarrow{a}$ satisfies $\underline{0}%
\,\overrightarrow{a}=\overrightarrow{a}\,\underline{0}=\underline{0}$.

\item[\textbf{(f)}] Distributivity holds:
\begin{align*}
\overrightarrow{a}\,\left(  \overrightarrow{b}+\overrightarrow{c}\right)   &
=\overrightarrow{a}\,\overrightarrow{b}+\overrightarrow{a}\,\overrightarrow{c}%
;\\
(\overrightarrow{a}+\overrightarrow{b})\,\overrightarrow{c}  &
=\overrightarrow{a}\,\overrightarrow{c}+\overrightarrow{b}\,\overrightarrow{c}%
.
\end{align*}


\item[\textbf{(g)}] For all $\overrightarrow{a},\overrightarrow{b}%
,\overrightarrow{c}\in K[\![x]\!]$, we have $\overrightarrow{a}%
+\overrightarrow{b}=\overrightarrow{c}$ if and only if $\overrightarrow{a}%
=\overrightarrow{c}-\overrightarrow{b}$.

\item[\textbf{(h)}] For all $a,b\in K$, we have $\underline{a}+\underline{b}%
=\underline{a+b}$ and $\underline{a}\cdot\underline{b}=\underline{ab}\,$.
\end{enumerate}

Furthermore, $K[\![x]\!]$ is a $K$-module (this word means the same as
\textquotedblleft$K$-vector space\textquotedblright, except that $K$ doesn't
have to be a field). That is, the following hold for all $\overrightarrow{a}%
,\overrightarrow{b}\in K[\![x]\!]$ and all $\lambda,\mu\in K$:

\begin{enumerate}
\item[\textbf{(i)}] $\qquad\lambda(\overrightarrow{a}+\overrightarrow{b}%
)=\lambda\overrightarrow{a}+\lambda\overrightarrow{b}$,

\item[\textbf{(j)}] $\qquad(\lambda+\mu)\overrightarrow{a}=\lambda
\overrightarrow{a}+\mu\overrightarrow{a}$,

\item[\textbf{(k)}] $\qquad(\lambda\mu)\overrightarrow{a}=\lambda
(\mu\overrightarrow{a})$,

\item[\textbf{(l)}] $\qquad1\overrightarrow{a}=\overrightarrow{a}$ and
$0\overrightarrow{a}=\underline{0}\,$.
\end{enumerate}

Finally:

\begin{enumerate}
\item[\textbf{(m)}] For all $\lambda\in K$ and $\overrightarrow{a}\in
K[\![x]\!]$, we have $\lambda\overrightarrow{a}=\underline{\lambda}%
\cdot\overrightarrow{a}$.
\end{enumerate}
\end{theorem}

The purpose of Theorem~\ref{thm.fps.ring} is to justify computing with FPS as
with numbers, at least as far as the operations $+$, $-$ and $\cdot$ are concerned.

Knowing that $K[\![x]\!]$ is a commutative ring implies the following:

\begin{itemize}
\item Subtraction of FPS undoes addition.

\item Sums and products need no parentheses and don't depend on the order: For
example, $\left(  (\overrightarrow{a}\,\overrightarrow{b})\,\overrightarrow{c}%
\right)  \,\overrightarrow{d}=\overrightarrow{a}\ \left(  (\overrightarrow{b}%
\ \overrightarrow{c})\,\overrightarrow{d}\right)  =\overrightarrow{a}\,\left(
\overrightarrow{b}\,(\overrightarrow{c}\,\overrightarrow{d})\right)  $, so we
can denote all of these by $\overrightarrow{a}\,\overrightarrow{b}%
\,\overrightarrow{c}\,\overrightarrow{d}$. Furthermore, $\overrightarrow{a}%
\,\overrightarrow{b}\,\overrightarrow{c}\,\overrightarrow{d}%
=\overrightarrow{b}\,\overrightarrow{d}\,\overrightarrow{c}%
\,\overrightarrow{a}=\overrightarrow{c}\,\overrightarrow{a}%
\,\overrightarrow{b}\,\overrightarrow{d}$.

\item Hence, finite sums and products work as usual: $\sum_{i=1}%
^{k}\overrightarrow{a_{i}}$, $\sum_{i\in I}\overrightarrow{a_{i}}$,
$\prod_{i=1}^{k}\overrightarrow{a_{i}}$, $\prod_{i\in I}\overrightarrow{a_{i}%
}$. These can be computed in any order.

\item In particular, powers exist: $\overrightarrow{a}^{n}%
=\underbrace{\overrightarrow{a}\cdot\overrightarrow{a}\cdots\overrightarrow{a}%
}_{n\text{ times}}$ for all $n\in\mathbb{N}$ and $\overrightarrow{a}\in
K[\![x]\!]$.

This includes $\overrightarrow{a}^{0}=\underline{1}$.

\item Standard rules for exponents hold: $\overrightarrow{a}^{n+m}%
=\overrightarrow{a}^{n}\,\overrightarrow{a}^{m}$ and $(\overrightarrow{a}%
\,\overrightarrow{b})^{n}=\overrightarrow{a}^{n}\,\overrightarrow{b}^{n}$, etc.

\item The binomial formula holds: $(\overrightarrow{a}+\overrightarrow{b}%
)^{n}=\sum_{k=0}^{n}\dbinom{n}{k}\overrightarrow{a}^{k}\,\overrightarrow{b}%
^{n-k}$.

\item All other kinds of formulas hold, e.g.\ Vandermonde convolution:
$\dbinom{\overrightarrow{a}+\overrightarrow{b}}{n}=\sum\limits_{k}%
\dbinom{\overrightarrow{a}}{k}\dbinom{\overrightarrow{b}}{n-k}$ if
$K=\mathbb{Q}$, $\mathbb{R}$ or $\mathbb{C}$.

(Here, $\dbinom{x}{n}=\dfrac{x(x-1)\cdots(x-n+1)}{n!}$ for any $x$, be it a
number or a FPS.)
\end{itemize}

\begin{definition}
For any $n\in\mathbb{N}$ and $\overrightarrow{a}=(a_{0},a_{1},\ldots)\in
K[\![x]\!]$, we set $[x^{n}]\overrightarrow{a}=a_{n}$.

This is called the \emph{coefficient of $x^{n}$ in $\overrightarrow{a}$}.
\end{definition}

Thus, the definition of sum and product rewrites as follows:
\begin{equation}
\lbrack x^{n}](\overrightarrow{a}+\overrightarrow{b})=[x^{n}%
]\overrightarrow{a}+[x^{n}]\overrightarrow{b}\label{eq.4990-2017nov7.3a}%
\end{equation}
and%
\begin{align}
\lbrack x^{n}](\overrightarrow{a}\,\overrightarrow{b}) &  =[x^{0}%
]\overrightarrow{a}\cdot\lbrack x^{n}]\overrightarrow{b}+[x^{1}%
]\overrightarrow{a}\cdot\lbrack x^{n-1}]\overrightarrow{b}+\cdots
+[x^{n}]\overrightarrow{a}\cdot\lbrack x^{0}]\overrightarrow{b}\nonumber\\
&  =\sum_{i=0}^{n}[x^{i}]\overrightarrow{a}\cdot\lbrack x^{n-i}%
]\overrightarrow{b}\label{eq.4990-2017nov7.3c}\\
&  =\sum_{j=0}^{n}[x^{n-j}]\overrightarrow{a}\cdot\lbrack x^{j}%
]\overrightarrow{b}\,.\label{eq.4990-2017nov7.3d}%
\end{align}


\begin{proof}
[Proof of Theorem \ref{thm.fps.ring}.]Most parts are straightforward. Let me
just show the proof of associativity of multiplication (part of \textbf{(c)}),
which is the least trivial of them all.

\textbf{(c)} Associativity: Fix $n\in\mathbb{N}$. We shall show that
$[x^{n}]((\overrightarrow{a}\,\overrightarrow{b})\,\overrightarrow{c}%
)=[x^{n}](\overrightarrow{a}\,(\overrightarrow{b}\,\overrightarrow{c}))$.

Indeed,%
\begin{align*}
\lbrack x^{n}]((\overrightarrow{a}\,\overrightarrow{b})\,\overrightarrow{c})
&  =\sum_{j=0}^{n}\underbrace{[x^{n-j}](\overrightarrow{a}\,\overrightarrow{b}%
)}_{\substack{=\sum_{i=0}^{n-j}\left[  x^{i}\right]  \overrightarrow{a}%
\cdot\left[  x^{n-j-i}\right]  \overrightarrow{b}\\\text{(by
(\ref{eq.4990-2017nov7.3c}))}}}\cdot\,[x^{j}]\overrightarrow{c}%
\ \ \ \ \ \ \ \ \ \ \left(  \text{by (\ref{eq.4990-2017nov7.3d})}\right)  \\
&  =\sum_{j=0}^{n}\ \ \sum_{i=0}^{n-j}\left[  x^{i}\right]  \overrightarrow{a}%
\cdot\left[  x^{n-j-i}\right]  \overrightarrow{b}\cdot\lbrack x^{j}%
]\overrightarrow{c}\,,
\end{align*}
whereas%
\begin{align*}
\lbrack x^{n}](\overrightarrow{a}\,(\overrightarrow{b}\,\overrightarrow{c}))
&  =\sum_{i=0}^{n}[x^{i}]\overrightarrow{a}\cdot\underbrace{[x^{n-i}%
](\overrightarrow{b}\,\overrightarrow{c})}_{\substack{=\sum_{j=0}^{n-i}\left[
x^{n-i-j}\right]  \overrightarrow{b}\cdot\left[  x^{j}\right]
\overrightarrow{c}\\\text{(by (\ref{eq.4990-2017nov7.3d}))}}%
}\ \ \ \ \ \ \ \ \ \ \left(  \text{by (\ref{eq.4990-2017nov7.3c})}\right)  \\
&  =\sum_{i=0}^{n}\ \ \sum_{j=0}^{n-i}[x^{i}]\overrightarrow{a}\cdot\left[
x^{n-i-j}\right]  \overrightarrow{b}\cdot\left[  x^{j}\right]
\overrightarrow{c}\,.
\end{align*}


The RHSes of these two equalities are equal, since $\displaystyle\sum
_{j=0}^{n}\ \ \sum_{i=0}^{n-j}=\sum_{\substack{i,j\in\mathbb{N};\\i+j\leq
n}}=\sum_{i=0}^{n}\ \ \sum_{j=0}^{n-i}$ and $n-j-i=n-i-j$. Thus,
$[x^{n}]((\overrightarrow{a}\,\overrightarrow{b})\,\overrightarrow{c}%
)=[x^{n}](\overrightarrow{a}\,(\overrightarrow{b}\,\overrightarrow{c}))$.

So we have proved this for all $n\in\mathbb{N}$. Thus we conclude that
$(\overrightarrow{a}\,\overrightarrow{b})\,\overrightarrow{c}%
=\overrightarrow{a}\,(\overrightarrow{b}\,\overrightarrow{c})$, since a FPS is
just the sequence of its coefficients. This proves associativity of
multiplication in $K[\![x]\!]$.
\end{proof}

\bigskip Sometimes, infinite sums also make sense in $K[\![x]\!]$.

\begin{statement}
\textbf{Example:} The following infinite sum of FPS can be evaluated
coefficientwise:%
\begin{align*}
&  \ \ \ \ (1,1,1,1,1,1,\ldots)\\
&  +(0,1,1,1,1,1,\ldots)\\
&  +(0,0,1,1,1,1,\ldots)\\
&  +(0,0,0,1,1,1,\ldots)\\
&  +(0,0,0,0,1,1,\ldots)\\
&  +\cdots\\
&  =(1,2,3,4,5,6,7,8,\ldots)\,.
\end{align*}

\end{statement}

\begin{definition}
\label{def.fps.summable}A (possibly infinite) family $(\overrightarrow{a_{i}%
})_{i\in I}$ of FPSs is \emph{summable} if
\begin{equation}
\text{for each }n\in\mathbb{N}\text{, only finitely many }i\in I\text{ satisfy
}[x^{n}]\overrightarrow{a_{i}}\neq0.\label{eq.def.fps.summable.5}%
\end{equation}


In this case, the sum $\sum_{i\in I}\overrightarrow{a_{i}}$ is defined as the
FPS with%
\[
\lbrack x^{n}]\left(  \sum_{i\in I}\overrightarrow{a_{i}}\right)
=\underbrace{\sum_{i\in I}[x^{n}]\overrightarrow{a_{i}}}_{\substack{\text{a
sum with only finitely many}\\\text{nonzero addends, hence}%
\\\text{well-defined in }K}}\ \ \ \ \ \ \ \ \ \ \text{for all }n\in\mathbb{N}.
\]

\end{definition}

\begin{remark}
The condition (\ref{eq.def.fps.summable.5}) is \textbf{not} equivalent to
\textquotedblleft for each $n\in\mathbb{N}$, infinitely many $i\in I$ satisfy
$[x^{n}]\overrightarrow{a_{i}}=0$\textquotedblright.
\end{remark}

\setcounter{theo}{1}

\begin{proposition}
\label{prop.summable}Sums of summable families satisfy the usual rules for
summation (as long as all families involved are summable) with the exception
of \textquotedblleft interchange of summation signs\textquotedblright.
\end{proposition}

\begin{statement}
\textbf{Example.} Let me show where this exception arises. Actually, it
already arises when taking infinite sums of integers (not FPSs). An infinite
sum of integers is well-defined whenever only finitely many addends of the sum
are nonzero. For example,
\begin{align*}
\sum_{i=1}^{\infty}\left\lfloor 5/2^{i}\right\rfloor  & =\left\lfloor
5/2^{1}\right\rfloor +\left\lfloor 5/2^{2}\right\rfloor +\left\lfloor
5/2^{3}\right\rfloor +\left\lfloor 5/2^{4}\right\rfloor +\cdots\\
& =2+1+\underbrace{0+0+\cdots}_{\text{all these addends are }0}=2+1=3
\end{align*}
is a well-defined infinite sum of integers.

Now I claim that summation signs cannot always be interchanged for infinite
sums even if all the sums involved are well-defined. For an example, set%
\[
a_{i,j}=[i=j]-[i+1=j]\ \ \ \ \ \ \ \ \ \ \text{for all }i,j\in\{1,2,3,\ldots
\}.
\]


Here is what these integers $a_{i,j}$ look like:%
\[%
\begin{array}
[c]{c|cccccc}
& j=1 & j=2 & j=3 & j=4 & j=5 & \cdots\\\hline
i=1 & 1 & -1 &  &  &  & \\
i=2 &  & 1 & -1 &  &  & \\
i=3 &  &  & 1 & -1 &  & \\
i=4 &  &  &  & 1 & -1 & \\
\vdots &  &  &  &  & \ddots & \ddots
\end{array}
\]
(where all the invisible entries are $0$'s). Then%
\begin{align*}
\sum_{i=1}^{\infty}\ \ \underbrace{\sum_{j=1}^{\infty}a_{i,j}}_{=0}  &
=\sum_{i=1}^{\infty}0=0\,,\qquad\text{but}\\
\sum_{j=1}^{\infty}\ \ \underbrace{\sum_{i=1}^{\infty}a_{i,j}}%
_{\substack{=\left[  j=1\right]  \\\text{(an Iverson bracket)}}}  &
=\sum_{j=1}^{\infty}[j=1]=1\,.
\end{align*}

\end{statement}

But we can fix this exception if we require the \textbf{whole} family
$(a_{i,j})_{(i,j)\in I\times J}$ to be summable. That is, the following holds:

\begin{statement}
\textquotedblleft\emph{Discrete Fubini's theorem}\textquotedblright: If
$(\overrightarrow{a_{i,j}})_{(i,j)\in I\times J}$ is a summable family of FPS,
then%
\[
\sum_{i\in I}\ \ \sum_{j\in J}\overrightarrow{a_{i,j}}=\sum_{\left(
i,j\right)  \in I\times J}\overrightarrow{a_{i,j}}=\sum_{j\in J}\ \ \sum_{i\in
I}\overrightarrow{a_{i,j}}\,.
\]

\end{statement}

We can now answer the question \textquotedblleft what is $x$%
?\textquotedblright:

\begin{definition}
We let $x$ denote the FPS $(0,1,0,0,0,\ldots)$.
\end{definition}

\setcounter{theo}{2}

\begin{proposition}
\label{prop.xk}For all $k\in\mathbb{N}$, we have $x^{k}%
=(\underbrace{0,0,\ldots,0}_{k\text{ zeroes}},1,0,0,0,\ldots)$.
\end{proposition}

\begin{proof}
Induct over $k$, by observing that if $\overrightarrow{a}=(a_{0},a_{1}%
,a_{2},\ldots)$, then $x\,\overrightarrow{a}=(0,a_{0},a_{1},a_{2},\ldots)$.
\end{proof}

\begin{corollary}
\label{cor.fps.expansion}Any FPS $(a_{0},a_{1},\ldots)\in K[\![x]\!]$
satisfies%
\[
(a_{0},a_{1},\ldots)=a_{0}+a_{1}x+a_{2}x^{2}+\cdots=\sum_{n\in\mathbb{N}}%
a_{n}x^{n}\,.
\]
In particular, the RHS here is well-defined, i.e., the family $(a_{n}%
x^{n})_{n\in\mathbb{N}}$ is summable.
\end{corollary}

\begin{proof}
By Proposition \ref{prop.xk}, we have%
\begin{align*}
a_{0}+a_{1}x+a_{2}x^{2}+\cdots &  =\ \ \ \ \ \ (a_{0},0,0,0,\ldots)\\
&  \qquad+(0,a_{1},0,0,\ldots)\\
&  \qquad+(0,0,a_{2},0,\ldots)\\
&  \qquad+(0,0,0,a_{3},\ldots)\\
&  \qquad+\cdots\\
&  =\ \ \ \ \ \ (a_{0},a_{1},a_{2},a_{3},\ldots)\,.
\end{align*}

\end{proof}

No analysis was used to make these power series well-defined!

Now, we have learned:

\begin{itemize}
\item what FPS are,

\item how to do basic algebra with them ($+,\cdot,-$),

\item what $x$ is,

\item why we can compare coefficients (i.e., why $\sum_{n\in\mathbb{N}}%
a_{n}x^{n}=\sum_{n\in\mathbb{N}}b_{n}x^{n}$ implies $a_{k}=b_{k}$ $\forall\,k$).
\end{itemize}

In particular, Example 3 (in \S 4.1) is now justified.

We don't yet know:

\begin{itemize}
\item what we can substitute into a FPS,

\item when and why we can do fancier algebra (like solving quadratic
equations, or dividing FPS).
\end{itemize}

So, the remaining examples from \S 4.1 are still hanging loose. Let's get to
work justifying them.

\begin{definition}
Let $\overrightarrow{a}\in K[\![x]\!]$. A \emph{multiplicative inverse} of
$\overrightarrow{a}$ means a FPS $\overrightarrow{b}\in K[\![x]\!]$ such that
$\overrightarrow{a}\,\overrightarrow{b}=\overrightarrow{b}\,\overrightarrow{a}%
=\underline{1}$.
\end{definition}

\setcounter{theo}{4}

\begin{theorem}
\label{thm.fps.inv.unique}Let $\overrightarrow{a}\in K[\![x]\!]$. Then, there
is \textbf{at most one} multiplicative inverse of $\overrightarrow{a}$.
\end{theorem}

\begin{proof}
Let $\overrightarrow{b}$ and $\overrightarrow{c}$ be two multiplicative
inverses of $\overrightarrow{a}$. Then,%
\[
\overrightarrow{a}\,\overrightarrow{b}=\overrightarrow{b}\,\overrightarrow{a}%
=\underline{1}\quad\text{and}\quad\overrightarrow{a}\,\overrightarrow{c}%
=\overrightarrow{c}\,\overrightarrow{a}=\underline{1}\,.
\]
Then, $\overrightarrow{b}\,(\overrightarrow{a}\,\overrightarrow{c}%
)=\overrightarrow{b}\cdot\underline{1}=\overrightarrow{b}$ and
$(\overrightarrow{b}\,\overrightarrow{a})\,\overrightarrow{c}=\underline{1}%
\cdot\overrightarrow{c}=\overrightarrow{c}$. But by associativity, the LHSs
are equal, so we conclude that the RHSs are equal as well. That is,
$\overrightarrow{b}=\overrightarrow{c}$.
\end{proof}

\begin{definition}
The multiplicative inverse of $\overrightarrow{a}$ (if it exists) is called
$\overrightarrow{a}^{-1}$ or $1/\overrightarrow{a}$.
\end{definition}

We shall now stop writing arrows over symbols for FPS. That is, we will call
FPSs simply $a$ rather than $\overrightarrow{a}$.

\setcounter{theo}{5}

\begin{theorem}
\label{thm.fps.inv.exist}Let $a\in K[\![x]\!]$. Then $a$ has a multiplicative
inverse (in $K[\![x]\!]$) if and only if $[x^{0}]a$ has a multiplicative
inverse in $K$.
\end{theorem}

\begin{remark}
What numbers have multiplicative inverses in $K$ ?

\begin{itemize}
\item If $K=\mathbb{Z}$, then $1$ and $-1$ only.

\item If $K=\mathbb{Q}$, then all nonzero numbers.

\item If $K=\mathbb{R}$, then all nonzero numbers.

\item If $K=\mathbb{C}$, then all nonzero numbers. (In fact, any nonzero
$a+bi\in\mathbb{C}$ has inverse $\dfrac{1}{a+bi}=\dfrac{a-bi}{a^{2}+b^{2}}$.)
\end{itemize}

Thus, $\mathbb{Q}$, $\mathbb{R}$ and $\mathbb{C}$ are \emph{fields}.
\end{remark}

\begin{proof}
[Proof of Theorem~\ref{thm.fps.inv.exist}]$\Longrightarrow$: This is the easy
part. Assume that $a$ has a multiplicative inverse. We must show that
$[x^{0}]a$ has a multiplicative inverse in $K$.

Write the FPS $a$ as $(a_{0},a_{1},a_{2},\ldots)$. We've assumed that there
exists some FPS$\,b=(b_{0},b_{1},b_{2},\ldots)$ with $ab=\underline{1}$. So
\begin{align*}
(1,0,0,\ldots)  & =\underline{1}=ab=\left(  a_{0},a_{1},a_{2},\ldots\right)
\left(  b_{0},b_{1},b_{2},\ldots\right)  \\
& =(a_{0}b_{0},\ \ \ \text{whatever}).
\end{align*}
So $1=a_{0}b_{0}$ (by comparing the $0$-th entries of these sequences). So
$a_{0}$ has a multiplicative inverse in $K$ (namely $b_{0}$). Since
$a_{0}=\left[  x^{0}\right]  a$, this is exactly what we needed to show. \medskip

$\Longleftarrow$: Write the FPS $a$ as $(a_{0},a_{1},a_{2},\ldots)$. Assume
that $\left[  x^{0}\right]  a$ (that is, $a_{0}$) has a multiplicative inverse
in $K$.

We're looking for a multiplicative inverse of $a$. That is, we're looking for
a FPS $b=(b_{0},b_{1},b_{2},\ldots)$ with $ab=\underline{1}$. So we want
\begin{align*}
(1,0,0,0,\ldots)  & =\underline{1}=ab=(a_{0},a_{1},a_{2},\ldots)(b_{0}%
,b_{1},b_{2},\ldots)\\
& =(a_{0}b_{0},\;a_{0}b_{1}+a_{1}b_{0},\;a_{0}b_{2}+a_{1}b_{1}+a_{2}%
b_{0},\;\ldots).
\end{align*}


So we want%
\begin{align*}
1 &  =a_{0}\fbox{$b_0$}\,,\\
0 &  =a_{0}\fbox{$b_1$}+a_{1}b_{0}\,,\\
0 &  =a_{0}\fbox{$b_2$}+a_{1}b_{1}+a_{2}b_{0}\,,\\
0 &  =a_{0}\fbox{$b_3$}+a_{1}b_{2}+a_{2}b_{1}+a_{3}b_{0}\,,\\
&  \;\;\vdots
\end{align*}
Solve this system by elimination: get $b_{0}$ from the $1$st equation, then
$b_{1}$ from the next, and so on. This is possible, since $a_{0}=[x^{0}]a$ has
a multiplicative inverse, so it can be divided by. Thus, $b$ can be found, and
the proof is complete.
\end{proof}


\end{document}