\documentclass{beamer}%
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{array}
\usepackage{setspace}
\usepackage{graphicx}
\usepackage{etex}
\usepackage{amsthm}
\usepackage{color}
\usepackage{wasysym}
\usepackage[all]{xy}
\usepackage{textpos}
\usepackage{url}
\usepackage{color}
\usepackage{epsfig,amsfonts,bbm,mathrsfs}
\usepackage{verbatim}
\usepackage{amsfonts}
\usepackage{wrapfig}
\usepackage{tikz}
\usepackage{hyperref}%
\setcounter{MaxMatrixCols}{30}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.50.0.2960}
%TCIDATA{LastRevised=Friday, November 05, 2021 04:07:22}
%TCIDATA{<META NAME="GraphicsSave" CONTENT="32">}
%TCIDATA{<META NAME="SaveForMode" CONTENT="1">}
%TCIDATA{BibliographyScheme=Manual}
%BeginMSIPreambleData
\providecommand{\U}[1]{\protect\rule{.1in}{.1in}}
%EndMSIPreambleData
\definecolor{grau}{rgb}{.5 , .5 , .5}
\definecolor{dunkelgrau}{rgb}{.35 , .35 , .35}
\definecolor{schwarz}{rgb}{0 , 0 , 0}
\definecolor{violet}{RGB}{143,0,255}
\definecolor{forestgreen}{RGB}{34, 100, 34}
\newcommand{\red}{\color{red}}
\newcommand{\grey}{\color{grau}}
\newcommand{\green}{\color{forestgreen}}
\newcommand{\violet}{\color{violet}}
\newcommand{\blue}{\color{blue}}
\newcommand{\bIf}{\textbf{If} }
\newcommand{\bif}{\textbf{if} }
\newcommand{\bthen}{\textbf{then} }
\newcommand{\ZZ}{{\mathbb Z}}
\newcommand{\NN}{{\mathbb N}}
\newcommand{\QQ}{{\mathbb Q}}
\newcommand{\RR}{{\mathbb R}}
\newcommand{\CC}{{\mathbb C}}
\newcommand\arxiv[1]{\href{http://www.arxiv.org/abs/#1}{\texttt{arXiv:#1}}}
\newcommand{\OO}{\operatorname {O}}
\newcommand{\id}{\operatorname {id}}
\newcommand{\Sym}{\operatorname {Sym}}
\newcommand{\Nm}{\operatorname {N}}
\newcommand{\GL}{\operatorname {GL}}
\newcommand{\SL}{\operatorname {SL}}
\newcommand{\Or}{\operatorname {O}}
\newcommand{\im}{\operatorname {Im}}
\newcommand{\Iso}{\operatorname {Iso}}
\newcommand{\zero}{\mathbf{0}}
\newcommand{\ord}{\operatorname*{ord}}
\newcommand{\bbK}{{\mathbb{K}}}
\newcommand{\whP}{{\widehat{P}}}
\newcommand{\Trop}{\operatorname*{Trop}}
\newcommand{\TropZ}{{\operatorname*{Trop}\mathbb{Z}}}
\newcommand{\rato}{\dashrightarrow}
\newcommand{\lcm}{\operatorname*{lcm}}
\newcommand{\tlab}{\operatorname*{tlab}}
\newcommand{\are}{\ar@{-}}
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\tup}[1]{\left( #1 \right)}
\newcommand{\ive}[1]{\left[ #1 \right]}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\lf}[2]{#1^{\underline{#2}}}
\newcommand{\upslack}{\mathchoice{\rotatebox[origin=c]{180}{$\displaystyle A$}}{\rotatebox[origin=c]{180}{$\textstyle A$}}{\rotatebox[origin=c]{180}{$\scriptstyle A$}}{\rotatebox[origin=c]{180}{$\scriptscriptstyle A$}}}
\newcommand{\downslack}{A}
\newcommand{\bfupslack}{\mathchoice{\rotatebox[origin=c]{180}{$\displaystyle \mathbf{A}$}}{\rotatebox[origin=c]{180}{$\textstyle \mathbf{A}$}}{\rotatebox[origin=c]{180}{$\scriptstyle \mathbf{A}$}}{\rotatebox[origin=c]{180}{$\scriptscriptstyle \mathbf{A}$}}}
\newcommand{\bfdownslack}{\mathbf{A}}
\newcommand{\bfU}{\mathbf{U}}
\newcommand{\underbrack}[2]{\underbrace{#1}_{\substack{#2}}}
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}}
\usetheme{Frankfurt}
\usefonttheme[onlylarge]{structurebold}
\setbeamerfont*{frametitle}{size=\normalsize,series=\bfseries}
\setbeamertemplate{navigation symbols}{}
\setbeamertemplate{footline}[frame number]
\setbeamertemplate{itemize/enumerate body begin}{}
\setbeamertemplate{itemize/enumerate subbody begin}{\normalsize}
\beamersetuncovermixins{\opaqueness<1>{0}}{\opaqueness<2->{15}}
\newcommand{\STRUT}{\vrule width 0pt depth 8pt height 0pt}
\newcommand{\ASTRUT}{\vrule width 0pt depth 0pt height 11pt}
\theoremstyle{plain}
\newtheorem{conj}[theorem]{Conjecture}
\setbeamertemplate{headline}{}
\begin{document}

\author{Darij Grinberg (Drexel University)\\\textit{joint work with Tom Roby (UConn)}}
\title{Noncommutative birational rowmotion on a rectangle  \\[1pc]
\normalsize{A case study in noncommutative dynamics}
}
\date{\vspace{-1pc}17 August 2023,
IPAC Seminar
}

\frame{\titlepage\textbf{slides:} {\color{red}
\url{http://www.cip.ifi.lmu.de/~grinberg/algebra/ipac2023b.pdf}} \medskip\\
\textbf{paper:} {\color{red}
\url{https://arxiv.org/abs/2208.11156}} \medskip\\
\textbf{FPSAC abstract:} {\color{red}
\url{https://www.cip.ifi.lmu.de/~grinberg/algebra/fps2023.pdf}}}

\begin{frame}
\frametitle{\ \ \ \ Introduction: Posets}

\begin{itemize}
\item A \textbf{poset} (= partially ordered set) is a set $P$ with a
reflexive, transitive and antisymmetric relation.

\item We use the symbols $<$, $\leq$, $>$ and $\geq$ accordingly.

\item We draw posets as Hasse diagrams:
\[%
\begin{array}
[c]{l|r}%
\xymatrixrowsep{1.5pc}
\xymatrix{ & \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\ \left(2,1\right) \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] \\ & \left(1,1\right) & }
\phantom{xx} & \phantom{xx} \xymatrixrowsep{1.5pc}
\xymatrix{ \delta \ar@{-}[rd] & & \\ & \gamma \ar@{-}[ld] \ar@{-}[rd] & \\ \alpha & & \beta }
\end{array}
\]


\item We only care about finite posets here.

\item We say that $u \in P$ \textbf{is covered by} $v \in P$ (written $u
\lessdot v$) if we have $u < v$ and there is no $w \in P$ satisfying $u < w <
v$.

\item We say that $u\in P$ \textbf{covers} $v\in P$ (written $u\gtrdot v$) if
we have $u>v$ and there is no $w\in P$ satisfying $u>w>v$.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ More poset basics: $\widehat{P}$}

\begin{itemize}
\item Let $P$ be a finite poset. We define $\widehat{P}$ to be the poset
obtained by adjoining two new elements $0$ and $1$ to $P$ and forcing

\begin{itemize}
\item $0$ to be less than every other element, and

\item $1$ to be greater than every other element.
\end{itemize}
\end{itemize}

\textbf{Example:}
\[
\xymatrixrowsep{1.2pc} \xymatrixcolsep{1.2pc} \xymatrix{
& & & & & & {\red 1} \ar@{-}[d] & & \\
& \delta \ar@{-}[rd] & & & & & \delta \ar@{-}[rd] & & \\
P= & & \gamma \ar@{-}[ld] \ar@{-}[rd] & & {\color{red}\Longrightarrow} & \widehat{P} = & & \gamma \ar@{-}[ld] \ar@{-}[rd] & \\
& \alpha & & \beta & & & \alpha \ar@{-}[rd] & & \beta \ar@{-}[ld] \\
& & & & & & & {\red 0}
}
\]

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ More poset basics: linear extensions}

\begin{itemize}
\item A \textbf{linear extension} of $P$ means a list $\left(  v_{1}%
,v_{2},\ldots,v_{n}\right)  $ of all elements of $P$ (each only once) such
that $i<j$ whenever $v_{i}<v_{j}$.

\item For instance,%
\[
\xymatrix{ \delta \ar@{-}[rd] & & \\ & \gamma \ar@{-}[ld] \ar@{-}[rd] & \\ \alpha & & \beta }
\]
has two linear extensions $\left(  \alpha,\beta,\gamma,\delta\right)  $ and
$\left(  \beta,\alpha,\gamma,\delta\right)  $.

\item Every finite poset has at least one linear extension.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ More poset basics: order ideals}

\begin{itemize}

\item An \textbf{order ideal} of a poset $P$ is a subset $S$ of $P$ such that if $v \in S$ and $w \leq v$, then $w \in S$.

\item Examples (the elements of the order ideal are marked in red):
\[
\begin{array}{l|r}
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\red{\left(2,1\right)} \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\phantom{xx}
&
\phantom{xx}
\xymatrixrowsep{1.5pc}
\xymatrix{
\delta \ar@{-}[rd] & & \\
& \red{\gamma} \ar@{-}[ld] \ar@{-}[rd] & \\
\red{\alpha} & & \red{\beta}
}
\end{array}
\]
\hrulefill
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& 3 \ar@{-}[ld] \ar@{-}[rd] & \red{5} \ar@{-}[rd] & 6 \ar@{-}[d] & \red{7} \ar@{-}[ld] \\
1 & & \red{2} & \red{4} & &
}
\]

\item We let $J(P)$ denote the set of all order ideals of $P$.

\end{itemize}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion}

\begin{itemize}

\item \textbf{Classical rowmotion} is the rowmotion studied by Striker/Williams ({\red \arxiv{1108.1172}}). It has appeared many times before, under different guises:
\begin{itemize}
\item Brouwer/Schrijver (1974) (as a permutation of the antichains),
\item Fon-der-Flaass (1993) (as a permutation of the antichains),
\item Cameron/Fon-der-Flaass (1995) (as a permutation of the monotone Boolean functions),
\item Panyushev (2008), Armstrong-Stump-Thomas (2011) (as a permutation of the antichains or ``nonnesting partitions'', with relations to Lie theory).
\end{itemize}

\end{itemize}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion: the standard definition}

\begin{itemize}

\item Let $P$ be a finite poset.
\textbf{Classical rowmotion} is the map $\mathbf{r} : J(P) \to J(P)$ which sends
\only<1>{{\red{every order ideal $S$}}}%
\only<2-4>{{every order ideal $S$}}
to a new order ideal $\mathbf{r}\left(S\right)$ defined as follows:
\begin{itemize}
\item
\only<2>{{\red {\textbf{Invert colors} (i.e., take the complement $P \setminus S$).}}}
\only<1,3-4>{\textbf{Invert colors} (i.e., take the complement $P \setminus S$).}
\item
\only<3>{{\red {\textbf{Boil down to generators} (i.e., take the set $M$ of minimal elements of this complement).}}}
\only<1,2,4>{\textbf{Boil down to generators} (i.e., take the set $M$ of minimal elements of this complement).}
\item
\only<4>{{\red {\textbf{Complete downwards} (i.e., take the set $J$ of all $w\in P$ such that there exists an $m\in M$ such that $w\leq m$).}}}
\only<1-3>{\textbf{Complete downwards} (i.e., take the set $J$ of all $w\in P$ such that there exists an $m\in M$ such that $w\leq m$).}
\end{itemize}
Then, $\mathbf{r}\left(S\right) = J$.
\end{itemize}

{\bf Example:}

{\only<1>{ Let $S$ be the following order ideal ($\newmoon$ = inside order ideal):
\[
\xymatrixcolsep{1.5pc}
\xymatrix{
& \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & \\
\newmoon \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \\
& \newmoon & & \newmoon &
}
\] }}

{\only<2>{ Mark the elements of the complement {\blue blue}.
\[
\xymatrixcolsep{1.5pc}
\xymatrix{
& \blue \newmoon \ar@{-}[ld] \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \ar@{-}[rd] & \\
\fullmoon \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \\
& \fullmoon & & \fullmoon &
}
\] }}

{\only<3>{ Leave only the minimal elements:
\[
\xymatrixcolsep{1.5pc}
\xymatrix{
& \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & \\
\fullmoon \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \\
& \fullmoon & & \fullmoon &
}
\] }}

{\only<4>{ $\mathbf r(S)$ is the order ideal generated by $M$ (``everything below $M$''):
\[
\xymatrixcolsep{1.5pc}
\xymatrix{
& \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & \\
\fullmoon \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \ar@{-}[rd] & & \blue \newmoon \ar@{-}[ld] \\
& \blue \newmoon & & \blue \newmoon &
}
\] }}

\pause \pause \pause

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion: properties}

Classical rowmotion is a permutation of $J(P)$, hence has finite order. This order can be fairly large.

\pause
However, \textbf{for some types of $P$}, the order can be explicitly computed or bounded from above.

See Striker/Williams ({\red \arxiv{1108.1172}}) for the first generation of results.

\begin{itemize}

\item If $P$ is a $p \times q$-rectangle:
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & & \left(1,3\right) \ar@{-}[ld]\\
\left(2,1\right) \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] & \\
& \left(1,1\right) & &
}
\]
(shown here for $p=2$ and $q=3$), then $\ord\left(\mathbf{r}\right) = p+q$.

\end{itemize}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion: properties}

{\bf Example:}

\begin{overprint}
\onslide<1>
Let $S$ be the order ideal of the $2\times 3$-rectangle given by:
\onslide<2>
$\mathbf{r}(S)$ is
\onslide<3>
$\mathbf{r}^2(S)$ is
\onslide<4>
$\mathbf{r}^3(S)$ is
\onslide<5>
$\mathbf{r}^4(S)$ is
\onslide<6>
$\mathbf{r}^5(S)$ is
\end{overprint}

\begin{overprint}
\onslide<1>
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & & \left(1,3\right) \ar@{-}[ld]\\
{\red \left(2,1\right)} \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] & \\
& {\red \left(1,1\right)} & &
}
\]
\onslide<2>
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & & \left(1,3\right) \ar@{-}[ld]\\
\left(2,1\right) \ar@{-}[rd] & & {\red \left(1,2\right)} \ar@{-}[ld] & \\
& {\red \left(1,1\right)} & &
}
\]
\onslide<3>
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & & {\red \left(1,3\right)} \ar@{-}[ld]\\
{\red \left(2,1\right)} \ar@{-}[rd] & & {\red \left(1,2\right)} \ar@{-}[ld] & \\
& {\red \left(1,1\right)} & &
}
\]
\onslide<4>
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& {\red \left(2,2\right)} \ar@{-}[rd] \ar@{-}[ld] & & \left(1,3\right) \ar@{-}[ld]\\
{\red \left(2,1\right)} \ar@{-}[rd] & & {\red \left(1,2\right)} \ar@{-}[ld] & \\
& {\red \left(1,1\right)} & &
}
\]
\onslide<5>
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & & {\red \left(1,3\right)} \ar@{-}[ld]\\
\left(2,1\right) \ar@{-}[rd] & & {\red \left(1,2\right)} \ar@{-}[ld] & \\
& {\red \left(1,1\right)} & &
}
\]
\onslide<6>
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \left(2,3\right) \ar@{-}[rd] \ar@{-}[ld] & \\
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & & \left(1,3\right) \ar@{-}[ld]\\
{\red \left(2,1\right)} \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] & \\
& {\red \left(1,1\right)} & &
}
\]
\end{overprint}

\vspace{0.2cm}

\begin{overprint}
\onslide<6>
which is precisely the $S$ we started with.
\end{overprint}

\vspace{0.2cm}

\begin{overprint}
\onslide<6>
$\ord(\mathbf r) = p+q = 2+3 = 5$.
\end{overprint}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion: properties}

Further posets for which classical rowmotion has small order:

\begin{itemize}

\item If $P$ is a $\Delta$-shaped triangle with sidelength $p-1$:
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \\
& \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & \\
\fullmoon & & \fullmoon & & \fullmoon
}
\]
(shown here for $p=4$), then $\ord\left(\mathbf{r}\right) = 2p$ (if $p > 2$).

\item In this case, $\mathbf{r}^p$ is ``reflection in the $y$-axis'' (i.e., the central vertical axis).
\pause

\item More general examples come from finite Weyl groups
(Armstrong/Stump/Thomas, {\red \arxiv{1101.1277}})
and from minuscule weights of classical groups
(Rush/Shi, {\red \arxiv{1108.5245}};
Okada, {\red \arxiv{2004.05364}}).

\end{itemize}

\end{frame}


\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion: the toggling definition}

There is an alternative definition of classical rowmotion, which splits
it into many little steps.

\begin{itemize}

\item If $P$ is a poset and $v \in P$, then the \textbf{$v$-toggle}
is the map $\mathbf{t}_v : J(P) \to J(P)$ which takes every order
ideal $S$ to:
\begin{itemize}
\item $S \cup \left\{v\right\}$, if $v$ is not in $S$ but all elements of
$P$ covered by $v$ are in $S$ already;
\item $S \setminus \left\{v\right\}$, if $v$ is in $S$ but none of the
elements of $P$ covering $v$ is in $S$;
\item $S$ otherwise.
\end{itemize}

\item Simpler way to state this: $\mathbf{t}_v\left(S\right)$ is:
\begin{itemize}
\item $S \bigtriangleup \left\{v\right\}$ (symmetric difference)
if this is an order ideal;
\item $S$ otherwise.
\end{itemize}
(``Try to add or remove $v$ from $S$; if this breaks the
order ideal axiom, leave $S$ fixed.'')

\end{itemize}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Classical rowmotion: the toggling definition}

\begin{itemize}

\item Let $\left(v_1,v_2,...,v_n\right)$ be a \textbf{linear extension} of $P$; this means a list of all elements of $P$ (each only once) such that $i < j$ whenever $v_i < v_j$.

\item Cameron and Fon-der-Flaass showed that
\[
\mathbf r = \mathbf t_{v_1} \circ \mathbf t_{v_2} \circ ... \circ \mathbf t_{v_n}.
\]

\end{itemize}

\textbf{Example:}

\only<1>{Start with this order ideal $S$: \phantom{$t_{(2,2)}$}
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\red{\left(2,1\right)} \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\]
}

\only<2>{First apply $\mathbf t_{(2,2)}$, which changes nothing:
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\red{\left(2,1\right)} \ar@{-}[rd] & & \left(1,2\right) \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\]
}

\only<3>{Then apply $\mathbf t_{(1,2)}$, which adds $(1,2)$ to the order ideal:
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\red{\left(2,1\right)} \ar@{-}[rd] & & \red{\left(1,2\right)} \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\]
}

\only<4>{Then apply $\mathbf t_{(2,1)}$, which removes $(2,1)$ from the order ideal:
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\left(2,1\right) \ar@{-}[rd] & & \red{\left(1,2\right)} \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\]
}

\only<5>{Finally apply $\mathbf t_{(1,1)}$, which changes nothing:
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\left(2,1\right) \ar@{-}[rd] & & \red{\left(1,2\right)} \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\]
}

\only<6>{So this is $\mathbf r(S)$: \phantom{$t_{(2,2)}$}
\[
\xymatrixrowsep{1.5pc}
\xymatrix{
& \left(2,2\right) \ar@{-}[rd] \ar@{-}[ld] & \\
\left(2,1\right) \ar@{-}[rd] & & \red{\left(1,2\right)} \ar@{-}[ld] \\
& \red{\left(1,1\right)} &
}
\]
}

\pause \pause \pause \pause \pause 

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Goals of the talk}

\begin{itemize}

\item define \textbf{noncommutative birational rowmotion}: a generalization of classical rowmotion on several levels, due to David Einstein, James Propp, Tom Roby and myself, based on ideas of Anatol Kirillov and Arkady Berenstein.

\item extend the ``order $p+q$'' theorem for rectangles to this generalization.

\item ask some questions.

\end{itemize}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Noncommutative birational rowmotion: definition}

\begin{itemize}
\item Let $\mathbb{K}$ be a ring (not necessarily commutative).

\item A \textbf{$\mathbb{K}$-labelling of} $P$ will mean a function
$\widehat{P}\rightarrow\mathbb{K}$.

\item The values of such a function will be called the \textbf{labels} of the labelling.

\item We will represent labellings by drawing the labels on the vertices of
the Hasse diagram of $\widehat{P}$.
\end{itemize}

\textbf{Example:} This is a $\mathbb{Q}$-labelling of the $2\times
2$-rectangle:
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& 14 \ar@{-}[d] & \\
& 10 \ar@{-}[rd] \ar@{-}[ld] & \\
-2 \ar@{-}[rd] & & 7 \ar@{-}[ld] \\
& 1/3 \ar@{-}[d] & \\
& 12 &
}
\]

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: definition}

\begin{itemize}
\item For any $v\in P$, define the \textbf{birational $v$-toggle} as the
partial map $T_{v}:{\mathbb{K}}^{{\widehat{P}}}\dashrightarrow{\mathbb{K}%
}^{{\widehat{P}}}$ defined by
\[
\left(  T_{v}f\right)  \left(  w\right)  =\left\{
\begin{array}
[c]{cc}%
f\left(  w\right)  ,\ \ \ \  & \text{if }w\neq v;\\
\left(  \sum\limits_{\substack{u\in\widehat{P};\\u\lessdot v}}f\left(
u\right)  \right)  \cdot\overline{f\left(  v\right)  }\cdot\overline
{\sum\limits_{\substack{u\in\widehat{P};\\u\gtrdot v}}\overline{f\left(
u\right)  }},\ \ \ \ \  & \text{if }w=v
\end{array}
\right.
\]
for all $w\in{\widehat{P}}$.

Here (and in the following), $\overline{m}$ means $m^{-1}$ whenever
$m\in\mathbb{K}$. \pause


\item This is a \textbf{partial} map. If any of the inverses does not exist in
$\mathbb{K}$, then $T_{v}f$ is undefined! \pause


\item Notice that this is a \textbf{local change} to the label at $v$; all
other labels stay the same. \pause


\item If $\mathbb{K}$ is commutative, then $T_{v}^{2}=\operatorname{id}$ (on
the range of $T_{v}$).
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: definition}

\begin{itemize}
\item We define \textbf{(noncommutative)} \textbf{birational rowmotion} as the
partial map
\[
R:=T_{v_{1}}\circ T_{v_{2}}\circ\cdots\circ T_{v_{n}}:{\mathbb{K}%
}^{{\widehat{P}}}\dashrightarrow{\mathbb{K}}^{{\widehat{P}}},
\]
where $\left(  v_{1},v_{2},\ldots,v_{n}\right)  $ is a linear extension of $P$.

\item This is indeed independent on the linear extension, because: \pause


\begin{itemize}
\item $T_{v}$ and $T_{w}$ commute whenever $v$ and $w$ are incomparable (or
just don't cover each other);

\item we can get from any linear extension to any other by switching
incomparable adjacent elements.
\end{itemize}
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: example}

\textbf{Example:}

Let us ``rowmote'' a (generic) ${\mathbb{K}}$-labelling of the $2\times
2$-rectangle:
\[%
\begin{array}
[c]{c|c}%
\text{poset\ \ \ } & \text{\ \ labelling}\\\hline
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & 1 \ar@{-}[d] & \\ & (2,2) \ar@{-}[rd] \ar@{-}[ld] & \\ (2,1) \ar@{-}[rd] & & (1,2) \ar@{-}[ld] \\ & (1,1) \ar@{-}[d] & \\ & 0 & }
\phantom{yyy} & \phantom{yyy}
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & z \ar@{-}[rd] \ar@{-}[ld] & \\ x \ar@{-}[rd] & & y \ar@{-}[ld] \\ & w \ar@{-}[d] & \\ & a & }
\end{array}
\]
\pause We have $R = T_{(1,1)} \circ T_{(1,2)} \circ T_{(2,1)} \circ T_{(2,2)}$
(using the linear extension $((1,1),(1,2),(2,1),(2,2))$).

That is, toggle in the order ``top, left, right, bottom''.
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: example}

\textbf{Example:}

Let us ``rowmote'' a (generic) ${\mathbb{K}}$-labelling of the $2\times
2$-rectangle:
\[%
\begin{array}
[c]{c|c}%
\text{original labelling }f & \only<1>{\text{labelling }T_{(2,2)} f}
\only<2>{\text{labelling }T_{(2,1)}T_{(2,2)} f} \only<3>{\text{labelling
}T_{(1,2)}T_{(2,1)}T_{(2,2)} f} \only<4-5>{\text{labelling }T_{(1,1)}%
T_{(1,2)}T_{(2,1)}T_{(2,2)} f = Rf}\\\hline
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & z \ar@{-}[rd] \ar@{-}[ld] & \\ x \ar@{-}[rd] & & y \ar@{-}[ld] \\ & w \ar@{-}[d] & \\ & a & }
\phantom{yyy} & \phantom{yyy} \only<1>{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & \red{(x+y)\overline{z}b} \ar@{-}[rd] \ar@{-}[ld] & \\ x \ar@{-}[rd] & & y \ar@{-}[ld] \\ & w \ar@{-}[d] & \\ & a & }
} \only<2>{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & (x+y)\overline{z}b \ar@{-}[rd] \ar@{-}[ld] & \\ \red{w\overline{x}(x+y)\overline{z}b} \ar@{-}[rd] & & y \ar@{-}[ld] \\ & w \ar@{-}[d] & \\ & a & }
} \only<3>{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & (x+y)\overline{z}b \ar@{-}[rd] \ar@{-}[ld] & \\ w\overline{x}(x+y)\overline{z}b \ar@{-}[rd] & & \red{w\overline{y}(x+y)\overline{z}b} \ar@{-}[ld] \\ & w \ar@{-}[d] & \\ & a & }
} \only<4>{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & (x+y)\overline{z}b \ar@{-}[rd] \ar@{-}[ld] & \\ w\overline{x}(x+y)\overline{z}b \ar@{-}[rd] & & w\overline{y}(x+y)\overline{z}b \ar@{-}[ld] \\ & \vphantom{\sum\limits^f} \hbox to 3em{\hss$\displaystyle\red{a \overline{w} \cdot \overline{\overline{w \overline{x} (x+y) \overline{z} b} + \overline{w \overline{y} (x+y) \overline{z} b}}}$\hss} \ar@{-}[d] & \\ & a & }
} \only<5>{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{ & b \ar@{-}[d] & \\ & (x+y)\overline{z}b \ar@{-}[rd] \ar@{-}[ld] & \\ w\overline{x}(x+y)\overline{z}b \ar@{-}[rd] & & w\overline{y}(x+y)\overline{z}b \ar@{-}[ld] \\ & \red{a\overline{z}b} \ar@{-}[d] & \\ & a & }
}%
\end{array}
\]
\only<1>{We are using $R = T_{(1,1)} \circ T_{(1,2)} \circ T_{(2,1)}
\circ\color{red}{T_{(2,2)}}$.} \only<2>{We are using $R = T_{(1,1)} \circ
T_{(1,2)} \circ{\color{red}{T_{(2,1)}}} \circ T_{(2,2)}$.} \only<3>{We are
using $R = T_{(1,1)} \circ{\color{red}{T_{(1,2)}}} \circ T_{(2,1)} \circ
T_{(2,2)}$.} \only<4>{We are using $R = {\color{red}{T_{(1,1)}}} \circ
T_{(1,2)} \circ T_{(2,1)} \circ T_{(2,2)}$.} \only<5>{We have used $R =
{T_{(1,1)}} \circ T_{(1,2)} \circ T_{(2,1)} \circ T_{(2,2)}$ and simplified
the result.} \pause \pause \pause \pause

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ {\color{yellow} Birational rowmotion: motivation}}

\begin{itemize}
\item Why is this called birational rowmotion?

\item Indeed, it generalizes classical rowmotion of order ideals:

\begin{itemize}
\item Let ${\operatorname*{Trop}\mathbb{Z}}$ be the \textbf{tropical semiring}
over ${\mathbb{Z}}$. This is the set ${\mathbb{Z}} \cup\left\{  -\infty
\right\}  $ with ``addition'' $\left(  a,b\right)  \mapsto\max\left\{
a,b\right\}  $ and ``multiplication'' $\left(  a,b\right)  \mapsto a+b$. This
is a semifield. \pause


\item To every order ideal $S\in J(P)$, assign a ${\operatorname*{Trop}%
\mathbb{Z}}$-labelling $\operatorname*{tlab}S$ defined by
\[
\left(  \operatorname*{tlab}S\right)  \left(  v\right)  =\left\{
\begin{array}
[c]{cc}%
1, & \text{if }v\notin S\cup\left\{  0\right\}  ;\\
0, & \text{if }v\in S\cup\left\{  0\right\}  .
\end{array}
\right.
\]
This map $\operatorname*{tlab}:J\left(  P\right)  \rightarrow\left(
\operatorname*{Trop}\mathbb{Z}\right)  ^{\widehat{P}}$ is injective. \pause


\item Let $\mathbf{t}_{v}$ be the order ideal $v$-toggle, and let $\mathbf{r}$
be order ideal rowmotion. Then:%
\[
T_{v}\circ\operatorname*{tlab}=\operatorname*{tlab}\circ\,\mathbf{t}%
_{v},\ \ \ \ \ \ \ \ \ \ R\circ\operatorname*{tlab}=\operatorname*{tlab}%
\circ\,\mathbf{r}.\text{ }\pause
\]


\item Don't like semifields? Use $\mathbb{Q}$ and take the \textquotedblleft
tropical limit\textquotedblright.
\end{itemize}
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: some orders}

\begin{itemize}
\item If $\mathbb{K}$ is commutative, then birational rowmotion $R$ has nice
orders for nice posets (mostly
{\color{red}\href{https://arxiv.org/abs/1402.6178}{Grinberg/Roby 2014}}):

\begin{itemize}
\item If $P$ is a rectangle $\left[  p\right]  \times\left[  q\right]  $, then
$R^{p+q}=\operatorname*{id}$. \pause


\item If $P$ is a \textquotedblleft right half\textquotedblright%
\ $\vartriangleright$ of the square $\left[  p\right]  \times\left[  p\right]
$, then $R^{2p}=\operatorname*{id}$. \pause


\item If $P$ is a \textquotedblleft top half\textquotedblright\ $\Delta$ or
\textquotedblleft bottom half\textquotedblright\ $\nabla$ of the square
$\left[  p\right]  \times\left[  p\right]  $, then $R^{2p}=\operatorname*{id}%
$, and moreover $R^{p}$ is reflection across the vertical axis. \pause


\item More generally, if $P$ is the minuscule poset associated to a minuscule
weight $\lambda$ of a finite-dimensional simple Lie algebra $\mathfrak{g}$,
then $R^{h}=\operatorname*{id}$, where $h$ is the Coxeter number of
$\mathfrak{g}$. ({\color{red}\href{https://doi.org/10.37236/9557}{Soichi
Okada, doi:10.37236/9557}} .) \pause


\item If $P$ is an \textquotedblleft$n$-graded forest\textquotedblright\ (a
forest with all leaves having rank $n$), then $R^{\ell}=\operatorname*{id}$
for $\ell=\operatorname{lcm}\left(  1,2,\ldots,n+1\right)  $.
\end{itemize}
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: some chaos}

\begin{itemize}
\item In general, even if $\mathbb{K}$ is commutative,
$R$ can have infinite order -- e.g., for the following two
posets:
\[
\begin{array}
[c]{l|r}%
\xymatrixrowsep{1.5pc}\xymatrix{ \fullmoon \ar@{-}[d] \ar@{-}[dr] & \fullmoon \ar@{-}[d] \ar@{-}[dl] & \fullmoon \ar@{-}[dl] \\ \fullmoon & \fullmoon & }\phantom{xx} &
\phantom{xx}\xymatrixrowsep{1.5pc}\xymatrixcolsep{0.2pc}\xymatrix{ & \fullmoon \ar@{-}[dl] \ar@{-}[dr] & & \fullmoon \ar@{-}[dl] \ar@{-}[dr] & & \fullmoon \ar@{-}[dl] \ar@{-}[dr] & \\ \fullmoon & & \fullmoon & & \fullmoon & & \fullmoon }
\end{array}
\]
\pause


\item Things get even more complicated when $\mathbb{K}$ is noncommutative...
\pause


\item Take this poset:%
\[
\xymatrixrowsep{1.5pc}\xymatrix{ \fullmoon \ar@{-}[dr] & \fullmoon \ar@{-}[d] & \fullmoon \ar@{-}[dl] \\ & \fullmoon }\phantom{xx}
\]
This satisfies $R^{6}=\operatorname*{id}$ if $\mathbb{K}$ is commutative, but
nothing like that in general. \pause


\item However, not all is lost!
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: the rectangle case}

\begin{itemize}
\item Let $p$ and $q$ be two positive integers. Let $\mathbb{K}$ be a ring.
Let $P$ be the $p\times q$-rectangle poset: i.e.,%
\[
P:=\left[  p\right]  \times\left[  q\right]  ,\ \ \ \ \ \ \ \ \ \ \text{where
}\left[  m\right]  :=\left\{  1,2,\ldots,m\right\}  .
\]


(The order on $P$ is entrywise.)

\only<1>{ \textbf{Example:} For $p=3$ and $q=4$, this is%
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}%
%TCIMACRO{\TeXButton{3x4-rect}{\scalebox{0.9}{
%\xymatrix{
%& & & & {\left(3,4\right)} \ar@{-}[rd] \ar@{-}[ld] & & \\
%& & & {\left(3,3\right)} \ar@{-}[rd] \ar@{-}[ld] & & {\left(2,4\right)}
%\ar@{-}[rd] \ar@{-}[ld] & \\
%& & \left(3,2\right) \ar@{-}[rd] \ar@{-}[ld] & & {\left(2,3\right)} \ar@
%{-}[rd] \ar@{-}[ld] & & {\left(1,4\right)} \ar@{-}[ld] \\
%& \left(3,1\right) \ar@{-}[rd] & & {\left(2,2\right)} \ar@{-}[rd] \ar@
%{-}[ld] & & {\left(1,3\right)} \ar@{-}[ld] & \\
%& & \left(2,1\right) \ar@{-}[rd] & & {\left(1,2\right)} \ar@{-}[ld] & & \\
%& & & {\left(1,1\right)} & & &
%}
%}}}%
%BeginExpansion
\scalebox{0.9}{
\xymatrix{
& & & & {\left(3,4\right)} \ar@{-}[rd] \ar@{-}[ld] & & \\
& & & {\left(3,3\right)} \ar@{-}[rd] \ar@{-}[ld] & & {\left(2,4\right)}
\ar@{-}[rd] \ar@{-}[ld] & \\
& & \left(3,2\right) \ar@{-}[rd] \ar@{-}[ld] & & {\left(2,3\right)} \ar@
{-}[rd] \ar@{-}[ld] & & {\left(1,4\right)} \ar@{-}[ld] \\
& \left(3,1\right) \ar@{-}[rd] & & {\left(2,2\right)} \ar@{-}[rd] \ar@
{-}[ld] & & {\left(1,3\right)} \ar@{-}[ld] & \\
& & \left(2,1\right) \ar@{-}[rd] & & {\left(1,2\right)} \ar@{-}[ld] & & \\
& & & {\left(1,1\right)} & & &
}
}%
%EndExpansion
.
\]
} \pause


\item Let $f\in\mathbb{K}^{\widehat{P}}$ be a $\mathbb{K}$-labelling. Let
$a=f\left(  0\right)  $ and $b=f\left(  1\right)  $.
\only<2>{
\vspace{15pc}
}
\pause

\end{itemize}

\begin{block}{\textbf{Periodicity theorem (* 2015, $\dagger$ 2022 G \& Roby):}}
If $a$ and $b$ are invertible and
$R^{p+q}f$ is well-defined, then\vspace{-0.7pc}
\[
\left(  R^{p+q}f\right)  \left(  x\right)  =a\overline{b}\cdot f\left(
x\right)  \cdot\overline{a}b\ \ \ \ \ \ \ \ \ \ \text{for each }%
x\in\widehat{P}.
\]
\end{block}

\only<3>{Note that $a\overline{b}\cdot f\left(
x\right)  \cdot\overline{a}b$ is \textbf{not} generally
conjugate  to $f\left(x\right)$.
\vspace{9pc}
}
\pause


\begin{block}{\textbf{Reciprocity theorem (* 2015, $\dagger$ 2022 G \& Roby):}}
Let $\ell\in\mathbb{N}$.
Let $\left(  i,j\right)  \in P$.
If $R^{\ell}f$
is well-defined and $\ell\geq i+j-1$, then\vspace{-0.7pc}
\begin{align*}
\left(  R^{\ell}f\right)  \left(  i,j\right)   &  =a\cdot\overline{\left(
R^{\ell-i-j+1}f\right)  \underbrace{\left(  p+1-i,q+1-j\right)  }%
_{=\text{antipode of }\left(  i,j\right)  \text{ in }P}}\cdot b .
\end{align*}
\end{block}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: the rectangle case, example}

\begin{itemize}
\item \textbf{Example:} Iteratively apply $R$ to a labelling of the $2\times2$-rectangle.
\end{itemize}
\only<1>{
\qquad Here is $R^0 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& z \ar@{-}[rd] \ar@{-}[ld] & \\
x \ar@{-}[rd] & & y \ar@{-}[ld] \\
& w \ar@{-}[d] & \\
& a &
}
\]
}
\only<2>{
\qquad Here is $R^1 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& (x+y)\overline{z}b \ar@{-}[rd] \ar@{-}[ld] & \\
w\overline{x}(x+y)\overline{z}b \ar@{-}[rd] & & w\overline{y}(x+y)\overline
{z}b \ar@{-}[ld] \\
& a\overline{z}b \ar@{-}[d] & \\
& a &
}
\]
}
\only<3>{
\qquad Here is $R^2 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& w\left(\overline{x}+\overline{y}\right)b \ar@{-}[rd] \ar@{-}[ld] & \\
a\cdot\overline{x+y}\cdot x\left(\overline{x}+\overline{y}\right)b \ar@
{-}[rd] & & a\cdot\overline{x+y}\cdot y\left(\overline{x}+\overline{y}%
\right)b \ar@{-}[ld] \\
& a\overline{b}z\cdot\overline{x+y}\cdot b \ar@{-}[d] & \\
& a &
}
\]
}
\only<4>{
\qquad Here is $R^3 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& a\overline{w}b \ar@{-}[rd] \ar@{-}[ld] & \\
... \ar@{-}[rd] & & a \overline{b}z\cdot\overline{x+y}\cdot\overline
{\overline{x}+\overline{y}}\cdot\overline{y}\cdot\left(x+y\right)\overline
{w}b \ar@{-}[ld] \\
& a\overline{b}\cdot\overline{\overline{x}+\overline{y}}\cdot\overline
{w}b \ar@{-}[d] & \\
& a &
}
\]
}
\only<5>{
\qquad Here is $R^4 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& a\overline{b}z\overline{a}b \ar@{-}[rd] \ar@{-}[ld] & \\
... \ar@{-}[rd] & & a\overline{b}\cdot\overline{\overline{x}+\overline{y}%
}\cdot\overline{x+y}\cdot y\left(\overline{x}+\overline{y}\right
)\left(x+y\right)\overline{a}b \ar@{-}[ld] \\
& a\overline{b}w\overline{a}b \ar@{-}[d] & \\
& a &
}
\]
}
\only<6>{
\qquad Here is $R^4 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& a\overline{b}z\overline{a}b \ar@{-}[rd] \ar@{-}[ld] & \\
a\overline{b}x\overline{a}b \ar@{-}[rd] & & a\overline{b}y\overline{a}%
b \ar@{-}[ld] \\
& a\overline{b}w\overline{a}b \ar@{-}[d] & \\
& a &
}
\]
\qquad (after nontrivial simplifications).
}
\only<7>{
\qquad Here is $R^4 f$ :
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& b \ar@{-}[d] & \\
& a\overline{b}z\overline{a}b \ar@{-}[rd] \ar@{-}[ld] & \\
a\overline{b}x\overline{a}b \ar@{-}[rd] & & a\overline{b}y\overline{a}%
b \ar@{-}[ld] \\
& a\overline{b}w\overline{a}b \ar@{-}[d] & \\
& a &
}
\]
}
%EndExpansion
\pause\pause\pause\pause\pause\pause


\qquad This confirms the periodicity theorem for $p = q = 2$.

\begin{itemize}
\item Note that this is similar to Kontsevich's periodicity conjecture, proved
by Iyudu/Shkarin
({\color{red}\arxiv{1305.1965}}).
\end{itemize}

\vspace{9pc}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Birational rowmotion: the rectangle case, example}

\begin{itemize}

\only<1>{
\item
Here are $R^0 f, R^1 f, \ldots, R^4 f$ for a generic $f \in \mathbb{K}^{\widehat{[2]\times [2]}}$ again, this time fully simplified and with the $f(0) = a$ and $f(1) = b$ labels removed:
\[
\hspace{-3.4pc}
\scalebox{0.8}{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0pc}\xymatrix{
& & z \are[dl] \are[dr] & & & & & & & \tup{x+y} \overline{z} b \are[dl] \are[dr]
\\
R^0 f = & x \are[dr] & & y \are[dl] &  ; & & &  R^1 f = & w \overline{x} \tup{x+y} \overline{z} b \are[dr] & & w \overline{y} \tup{x+y} \overline{z} b \are[dl]
\\
& & w & & & & & & & a \overline{z} b
\\
\\
& & w \tup{\overline{x} + \overline{y}} b \are[dl] \are[dr] & & & & & & & a \overline{w} b \are[dl] \are[dr]
\\
R^2 f = & a \overline{y} b \are[dr] & & a \overline{x} b \are[dl] &  ; & & &  R^3 f = & a \overline{b} z \overline{x+y} y \overline{w} b \are[dr] & & a \overline{b} z \overline{x+y} x \overline{w} b \are[dl]
\\
& & a \overline{b} z \overline{x+y} b & & & & & & & a \overline{b} \cdot \overline{\overline{x} + \overline{y}} \cdot \overline{w} b
}
}
\]
}

\only<2>{
\item
Here are $R^0 f, R^1 f, \ldots, R^4 f$ for a generic $f \in \mathbb{K}^{\widehat{[2]\times [2]}}$ again, this time fully simplified and with the $f(0) = a$ and $f(1) = b$ labels removed:
\[
\hspace{-3.4pc}
\scalebox{0.8}{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0pc}\xymatrix{
& & {\blue z} \are[dl] \are[dr] & & & & & & & \tup{x+y} \overline{z} b \are[dl] \are[dr]
\\
R^0 f = & {\green x} \are[dr] & & {\red y} \are[dl] &  ; & & &  R^1 f = & w \overline{x} \tup{x+y} \overline{z} b \are[dr] & & w \overline{y} \tup{x+y} \overline{z} b \are[dl]
\\
& & {\violet w} & & & & & & & {\blue a \overline{z} b}
\\
\\
& & w \tup{\overline{x} + \overline{y}} b \are[dl] \are[dr] & & & & & & & {\violet a \overline{w} b} \are[dl] \are[dr]
\\
R^2 f = & {\red a \overline{y} b} \are[dr] & & {\green a \overline{x} b} \are[dl] &  ; & & &  R^3 f = & a \overline{b} z \overline{x+y} y \overline{w} b \are[dr] & & a \overline{b} z \overline{x+y} x \overline{w} b \are[dl]
\\
& & a \overline{b} z \overline{x+y} b & & & & & & & a \overline{b} \cdot \overline{\overline{x} + \overline{y}} \cdot \overline{w} b
}
}
\]
Equally colored labels are related by reciprocity. Can you spot some more?
}

\only<3>{
\item
Here are $R^0 f, R^1 f, \ldots, R^4 f$ for a generic $f \in \mathbb{K}^{\widehat{[2]\times [2]}}$ again, this time fully simplified and with the $f(0) = a$ and $f(1) = b$ labels removed:
\[
\hspace{-3.4pc}
\scalebox{0.8}{
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0pc}\xymatrix{
& & z \are[dl] \are[dr] & & & & & & & {\blue \tup{x+y} \overline{z} b} \are[dl] \are[dr]
\\
R^0 f = & x \are[dr] & & y \are[dl] &  ; & & &  R^1 f = & w \overline{x} \tup{x+y} \overline{z} b \are[dr] & & {\red w \overline{y} \tup{x+y} \overline{z} b} \are[dl]
\\
& & w & & & & & & & a \overline{z} b
\\
\\
& & w \tup{\overline{x} + \overline{y}} b \are[dl] \are[dr] & & & & & & & a \overline{w} b \are[dl] \are[dr]
\\
R^2 f = & a \overline{y} b \are[dr] & & a \overline{x} b \are[dl] &  ; & & &  R^3 f = & {\red a \overline{b} z \overline{x+y} y \overline{w} b} \are[dr] & & a \overline{b} z \overline{x+y} x \overline{w} b \are[dl]
\\
& & {\blue a \overline{b} z \overline{x+y} b} & & & & & & & a \overline{b} \cdot \overline{\overline{x} + \overline{y}} \cdot \overline{w} b
}
}
\]
Here are some more instances of reciprocity. (There are more.)
}

\end{itemize}

\vspace{9pc}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ {\color{yellow} The commutative case}}

\begin{itemize}
\item In 2014, we proved both theorems for commutative $\mathbb{K}$. \pause


\item \textit{Proof outline {\small (inspired by A. Y. Volkov,
{\color{red}\href{https://arxiv.org/abs/hep-th/0606094}{\textit{arXiv:hep-th/0606094}%
}})}:}

\begin{itemize}
\item WLOG assume $\mathbb{K}$ is a field (because our claims boil down to
polynomial identities). \pause


\item Show that \textquotedblleft almost all\textquotedblright\ labellings of
$P$ are in the image of a certain map $\operatorname*{Grasp}\nolimits_{0}$
from the matrix space $\mathbb{K}^{p\times\left(  p+q\right)  }$ to
$\mathbb{K}^{\widehat{P}}$. \pause


\only<4>{ Explicitly, if $A\in\mathbb{K}^{p\times\left(  p+q\right)  }$ is any
matrix, then $\left(  \operatorname*{Grasp}\nolimits_{0}A\right)  \left(
0\right)  =\left(  \operatorname*{Grasp}\nolimits_{0}A\right)  \left(
1\right)  =1$ and
\[
\left(  \operatorname*{Grasp}\nolimits_{0}A\right)  \left(  i,j\right)
=\dfrac{\det\left(  A\left[  1:i\mid i+j-1:p+j\right]  \right)  }{\det\left(
A\left[  0:i\mid i+j:p+j\right]  \right)  }%
\]
for all $\left(  i,j\right)  \in P$, where the $A\left[  a:b\mid c:d\right]
$s are certain submatrices of $A$. } \only<4>{(Note that this map
$\operatorname*{Grasp}\nolimits_{0}$ actually factors through the
Grassmannian.)} \pause


\item Construct a commutative diagram%
\[%
%TCIMACRO{\TeXButton{commutative square}{\xymatrix@C=4pc{
%\mathbb{K}^{p\times\left(p+q\right)} \ar@{.>}[r]^-{\operatorname{Grasp}_0}
%\ar[d]_-{\rho}& \mathbb{K}^{\widehat{P}} \ar@{.>}[d]^-{R} \\
%\mathbb{K}^{p\times\left(p+q\right)} \ar@{.>}[r]^-{\operatorname{Grasp}_0}
%&  \mathbb{K}^{\widehat{P}}
%}}}%
%BeginExpansion
\xymatrix@C=4pc{
\mathbb{K}^{p\times\left(p+q\right)} \ar@{.>}[r]^-{\operatorname{Grasp}_0}
\ar[d]_-{\rho}& \mathbb{K}^{\widehat{P}} \ar@{.>}[d]^-{R} \\
\mathbb{K}^{p\times\left(p+q\right)} \ar@{.>}[r]^-{\operatorname{Grasp}_0}
&  \mathbb{K}^{\widehat{P}}
}%
%EndExpansion
\ \ ,
\]
where $\rho$ is (more or less) rotating the matrix horizontally (last column
to front). \pause


\item Conclude that $R^{p+q}=\operatorname*{id}$ because $\rho^{p+q}%
=\operatorname*{id}$. \pause


\item Reciprocity also easy using $\operatorname{Grasp}_{0}$.
\end{itemize}
\end{itemize}

\vspace{9pc}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ {\color{yellow} First attempts at general proof}}

\begin{itemize}
\item This looks easy; the devil is in the details (particularly the
\textquotedblleft almost all\textquotedblright\ part: not just Zariski density
but also some rescaling required). \pause


\item Can this be generalized to arbitrary $\mathbb{K}$ ? \pause


\item \textbf{In some sense, yes:} Replace determinants by quasideterminants
(Gelfand/Retakh,
{\color{red}\href{https://arxiv.org/abs/q-alg/9705026v1}{arXiv:q-alg/9705026}%
}; see also
{\color{red}\href{https://arxiv.org/abs/math/0208146}{arXiv:math/0208146}}).
\pause


Specifically, redefine $\operatorname*{Grasp}\nolimits_{0}$ by
\[
\left(  \operatorname*{Grasp}\nolimits_{0}A\right)  \left(  i,j\right)
=\left(  -1\right)  ^{i}q_{0,i+j-1}^{\left\{  1:i\mid i+j:p+j\right\}
}\left(  A\right)  .
\]
The \textquotedblleft algebra\textquotedblright\ works!\pause


\item Unfortunately, the technical parts no longer work:

\begin{itemize}
\item What does \textquotedblleft almost all\textquotedblright\ mean for
noncommutative $\mathbb{K}$ ? \pause


\item Can we WLOG assume that $\mathbb{K}$ is a skew field? \pause


No: e.g., the identity $x\overline{yx}y=1$ holds in all skew fields but not in
all rings. \pause

\end{itemize}

\item We now believe this approach is a dead end.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ {\color{yellow} Enter Musiker}}

\begin{itemize}
\item New proofs of periodicity and reciprocity in the commutative-$\mathbb{K}%
$ case were found by Gregg Musiker and Tom Roby in
{\color{red}\href{https://arxiv.org/abs/1801.03877}{arXiv:1801.03877}}.

They proceed by giving an explicit formula for $\left(  R^{k}f\right)  \left(
i,j\right)  $. For instance, $\left(  R^{3}f\right)  \left(  3,2\right)  $
\begin{align*}
&  =\frac{1}{{\footnotesize A}_{02}{\footnotesize +A}_{11}{\footnotesize +A}%
_{20}}(A_{01}A_{02}A_{11}A_{12}+A_{01}A_{02}A_{12}A_{20}+A_{01}A_{02}%
A_{20}A_{21}\\
&  \ \ \ \ \ \ \ \ \ \ +A_{02}A_{10}A_{12}A_{20}+A_{02}A_{10}A_{20}%
A_{21}+A_{10}A_{11}A_{20}A_{21}),
\end{align*}
where%
\[
A_{ij}:=\left(  f\left(  i,j+1\right)  +f\left(  i+1,j\right)  \right)
\diagup f\left(  i+1,j+1\right)  .\text{ \pause}%
\]


\item General formula for $\left(  R^{k}f\right)  \left(  i,j\right)  $
involves sums over NILPs (non-intersecting lattice path families) in numerator
and denominator, as well as index shifting and a case split (\textquotedblleft
small\textquotedblright\ $k$ and \textquotedblleft large\textquotedblright%
\ $k$ behave differently). \pause


\item Lattice paths can be generalized to noncommutative $\mathbb{K}$, but
NILPs? Unclear in what order to multiply different paths.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ What now?}

\begin{itemize}
\item We are back at square 1: no known theory available. \pause


\item Let's play around with the setting. \\
Step 1: Introduce notations...
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ A new beginning}

\begin{itemize}
\item Fix $p$, $q$, $P$ and $f$. Assume that $R^{\ell}f$ is well-defined for
all necessary $\ell$. Let $a=f\left(  0\right)  $ and $b=f\left(  1\right)  $.
\pause


\item For any $x\in \widehat{P}$ and $\ell\in\mathbb{N}$, write%
\[
x_{\ell}:=\left(  R^{\ell}f\right)  \left(  x\right)  .
\]
Thus, $x_{0}=f\left(  x\right)  $ and $0_{\ell}=a$ and $1_{\ell}=b$. \pause


\item The definition of $R$ yields
\[
\left(  Rf\right)  \left(  v\right)  =\left(  \sum\limits_{u\lessdot
v}f\left(  u\right)  \right)  \cdot\overline{f\left(  v\right)  }%
\cdot\overline{\sum\limits_{u\gtrdot v}\overline{\left(  Rf\right)  \left(
u\right)  }}\ \ \ \ \ \ \ \ \ \ \text{for each }v\in P.
\]
(In both sums, $u$ ranges over $\widehat{P}$; this is implied from now on.)
\pause


\item In other words,%
\[
v_{1}=\left(  \sum\limits_{u\lessdot v}u_{0}\right)  \cdot\overline{v_{0}%
}\cdot\overline{\sum\limits_{u\gtrdot v}\overline{u_{1}}}%
\ \ \ \ \ \ \ \ \ \ \text{for each }v\in P.
\]

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Transition equation}

\begin{itemize}
\item We have just shown that
\[
v_{1}=\left(  \sum\limits_{u\lessdot v}u_{0}\right)  \cdot\overline{v_{0}%
}\cdot\overline{\sum\limits_{u\gtrdot v}\overline{u_{1}}}
\ \ \ \ \ \ \ \ \ \ \text{for each }v\in P.\text{ \pause}
\]


\item Similarly,
\[
v_{\ell+1}=\left(  \sum\limits_{u\lessdot v}u_{\ell}\right)  \cdot
\overline{v_{\ell}}\cdot\overline{\sum\limits_{u\gtrdot v}\overline{u_{\ell
+1}}}\ \ \ \ \ \ \ \ \ \ \text{for each }v\in P\text{ and }\ell\in\mathbb{N}.
\]
\pause

\item So far, we have just rewritten our
setup using the (more convenient)
$x_{\ell}:=\left(  R^{\ell}f\right)  \left(  x\right)$
notation.

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Simplifying the goal}

\begin{itemize}
\item We must prove:%
\begin{align*}
\text{\textbf{periodicity}}  &  \mathbf{:}\text{ }x_{p+q}=a\overline{b}\cdot
x_{0}\cdot\overline{a}b;\\
\text{\textbf{reciprocity}}  &  \mathbf{:}\text{ }x_{\ell}=a\cdot
\overline{y_{\ell-i-j+1}}\cdot b\\
&  \ \ \ \ \ \ \ \ \ \ \text{if }x=\left(  i,j\right)  \text{ and }y=\left(
p+1-i,q+1-j\right)  .\text{ \pause}%
\end{align*}


\item Periodicity follows from reciprocity: Indeed, if $x=\left(  i,j\right)
$ and $x^{\prime}=\left(  p+1-i,q+1-j\right)  $, then
\begin{align*}
x_{p+q} &  =a\cdot\overline{x_{p+q-i-j+1}^{\prime}}\cdot
b\ \ \ \ \ \ \ \ \ \ \left(  \text{by reciprocity}\right)  \\
&  =a\cdot\overline{a\cdot\overline{x_{0}}\cdot b}\cdot
b\ \ \ \ \ \ \ \ \ \ \left(  \text{by reciprocity again}\right)  \\
&  =a\overline{b}\cdot x_{0}\cdot\overline{a}b.\text{ }%
\end{align*}
\pause Thus, it suffices to prove reciprocity. \pause

\item Moreover, reciprocity in general follows from reciprocity for
$\ell=i+j-1$ (just apply it to $R^{k}f$ instead of $f$ otherwise).
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Paths, $\downslack$s and $\upslack$s}

\begin{itemize}
\item A \textbf{path} shall mean a sequence $\left(  v_{0}\gtrdot v_{1}%
\gtrdot\cdots\gtrdot v_{k}\right)  $ of elements of $\widehat{P}$. We call it
a path from $v_{0}$ to $v_{k}$.\pause


\item For each $v\in P$ and $\ell\in\mathbb{N}$, set
\[
\downslack_{\ell}^{v}:=v_{\ell}\cdot\overline{\sum\limits_{u\lessdot v}u_{\ell}%
}\ \ \ \ \ \ \ \ \ \ \text{and}\ \ \ \ \ \ \ \ \ \ \upslack_{\ell}%
^{v}:=\overline{\sum\limits_{u\gtrdot v}\overline{u_{\ell}}}\cdot
\overline{v_{\ell}}.
\]
Also, set $\downslack_{\ell}^{v}=\upslack_{\ell}^{v}=1$ when $v\in\left\{
0,1\right\}  $. \pause


\item For any path $\mathbf{p}=\left(  v_{0}\gtrdot v_{1}\gtrdot\cdots\gtrdot
v_{k}\right)  $, set
\[
\downslack_{\ell}^{\mathbf{p}}:=\downslack_{\ell}^{v_{0}}\downslack_{\ell}^{v_{1}}%
\cdots\downslack_{\ell}^{v_{k}}\ \ \ \ \ \ \ \ \ \ \text{and}%
\ \ \ \ \ \ \ \ \ \ \upslack_{\ell}^{\mathbf{p}}:=\upslack_{\ell}^{v_{0}}%
\upslack_{\ell}^{v_{1}}\cdots\upslack_{\ell}^{v_{k}}.\text{ \pause}%
\]


\item If $u$ and $v$ are elements of $\widehat{P}$, set%
\begin{align*}
\downslack_{\ell}^{u\rightarrow v}  &  :=\sum_{\mathbf{p}\text{ is a path from
}u\text{ to }v}\downslack_{\ell}^{\mathbf{p}}\ \ \ \ \ \ \ \ \ \ \text{and}\\
\upslack_{\ell}^{u\rightarrow v}  &  :=\sum_{\mathbf{p}\text{ is a path from
}u\text{ to }v}\upslack_{\ell}^{\mathbf{p}}.
\end{align*}

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Path formulas}

\begin{itemize}
\item \textbf{Path formulas:}

\begin{itemize}
\item[\textbf{(a)}] We have%
\[
u_{\ell}=\overline{\upslack_{\ell}^{1\rightarrow u}}\cdot
b\ \ \ \ \ \ \ \ \ \ \text{for each }u\in P.
\]


\item[\textbf{(b)}] We have%
\[
u_{\ell}=\downslack_{\ell}^{u\rightarrow0}\cdot a\ \ \ \ \ \ \ \ \ \ \text{for
each }u\in P.\text{ \pause}
\]

\end{itemize}

\only<2-6>{
\item \textit{Proof idea:} The $\ell$ is constant.
Hence, we omit it, writing $\upslack^v$ for $\upslack^v_\ell$. \medskip
}
\pause

\only<3-5>{
\textbf{(a)} Rewrite the claim as
$\upslack^{1\to u} = b \overline{u_\ell}$. \pause \\
Prove this by downwards induction on $u$. \pause \\
Induction step: Given $v \in P$ such that
$\upslack^{1 \to u} = b \overline{u_\ell}$ for all $u \gtrdot v$.
Since any path $1 \to v$ passes through a unique $u \gtrdot v$,
we have
\begin{align*}
\upslack^{1\to v} &= \sum_{u \gtrdot v} \upslack^{1 \to u} \upslack^v 
= \sum_{u \gtrdot v} b \overline{u_\ell} \upslack^v
\qquad \left(\text{by induction hypothesis}\right) \\
& = b \overline{v_\ell} \qquad \left(\text{by definition of $\upslack^v$}\right),
\qquad \text{qed.}
\end{align*}
}
\pause

\only<6>{
\textbf{(b)} Analogous, but use upwards induction instead.}
\pause \pause \pause

\begin{itemize}
\item[\textbf{(c)}] We have%
\[
u_{\ell}=\overline{\upslack_{\ell}^{\left(p,q\right)\rightarrow u}}\cdot
b\ \ \ \ \ \ \ \ \ \ \text{for each }u\in P.
\]


\item[\textbf{(d)}] We have%
\[
u_{\ell}=\downslack_{\ell}^{u\rightarrow \left(1,1\right)}\cdot a\ \ \ \ \ \ \ \ \ \ \text{for
each }u\in P.\text{ \pause}
\]

\end{itemize}

\item \textit{Proof idea:} Each path $1 \to u$ begins with
the step $1 \gtrdot \left(p,q\right)$.
Thus,
$\upslack_{\ell}^{1\rightarrow u}
= \upslack_{\ell}^{\left(p,q\right)\rightarrow u}$
(since $\upslack_{\ell}^{1} = 1$).
Hence, \textbf{(c)} follows from \textbf{(a)}.
\pause

Similarly, \textbf{(d)} follows from \textbf{(b)}.

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Transition equation in $\downslack$-$\upslack$-form}

\begin{itemize}

\item \textbf{Transition equation in }$\downslack$\textbf{-}$\upslack$%
\textbf{-form:}%
\[
\upslack_{\ell+1}^{v}=\downslack_{\ell}^{v}\ \ \ \ \ \ \ \ \ \ \text{for each }%
v\in\widehat{P}\text{ and }\ell\in\mathbb{N}.\text{ \pause}%
\]

\item \textit{Proof idea:} Above we showed that%
\[
v_{\ell+1}=\left(  \sum\limits_{u\lessdot v}u_{\ell}\right)  \cdot
\overline{v_{\ell}}\cdot\overline{\sum\limits_{u\gtrdot v}\overline{u_{\ell
+1}}}.
\]
Take reciprocals on both sides, multiply by $\overline{\sum\limits_{u\gtrdot
v}\overline{u_{\ell+1}}}$ and rewrite using $\upslack_{\ell+1}^{v}$ and
$\downslack_{\ell}^{v}$.
\pause

\item \textbf{Corollary of the transition equation:}
\[
\upslack_{\ell+1}^{\mathbf{p}}=\downslack_{\ell}^{\mathbf{p}}%
\ \ \ \ \ \ \ \ \ \ \text{for each path }\mathbf{p}\text{ and each }\ell
\in\mathbb{N}.
\]
\pause Hence, $\upslack_{\ell+1}^{u\rightarrow v}=\downslack_{\ell}^{u\rightarrow
v}$ for any $u,v\in \widehat{P}$.

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Reciprocity at $(1, 1)$}

\begin{itemize}


\item Now, for the bottommost element $\left(  1,1\right)  $ of $P$, we have
\begin{align*}
\left(  1,1\right)  _{1}  &  =\overline{\upslack_{1}^{\left(p,q\right)
\rightarrow\left(
1,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{by path formula
\textbf{(c)}}\right) \\
&  =\overline{\downslack_{0}^{\left(p,q\right)\rightarrow \left(1,1\right)}}
\cdot b\ \ \ \ \ \ \ \ \ \ \left(
\text{since }\upslack_{\ell+1}^{u\rightarrow v}=\downslack_{\ell}^{u\rightarrow
v}\right) \\
&  =a\cdot\overline{\left(  p,q\right)  _{0}}\cdot
b\ \ \ \ \ \ \ \ \ \ \left(  \text{by path formula \textbf{(d)}}\right)  .
\end{align*}
Thus, reciprocity is proved for $i=j=1$. \pause


\item What now?
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ The case $j = 1$ suffices: part 1}

\begin{itemize}
\item We can simplify our goal one bit further. Consider the ``neighborhood''
of an element of our rectangle $P$:
\[%
%TCIMACRO{\TeXButton{5-X}{\xymatrix@R=0.6pc@C=1pc{
%u \are[rd] & & v \are[ld] && \text{(rank $k+1$)}\\
%& m \are[rd] \are[ld] &  && \text{(rank $k$)} \\
%s & & t && \text{(rank $k-1$)}
%}}}%
%BeginExpansion
\xymatrix@R=0.6pc@C=1pc{
u \are[rd] & & v \are[ld] && \text{(rank $k+1$)}\\
& m \are[rd] \are[ld] &  && \text{(rank $k$)} \\
s & & t && \text{(rank $k-1$)}
}%
%EndExpansion
\]
(where the \textbf{rank} of an $\left(  i,j\right)  \in P$ is defined to be
$i+j-1$).

Say we have shown (our ``induction hypotheses'')
that reciprocity holds for each of $s,t,m,u$; that is, we
have%
\begin{align*}
s_{\ell}  &  =a\cdot\overline{s_{\ell-\left(  k-1\right)  }^{\prime}}\cdot
b,\ \ \ \ \ \ \ \ \ \ t_{\ell}=a\cdot\overline{t_{\ell-\left(  k-1\right)
}^{\prime}}\cdot b,\\
m_{\ell}  &  =a\cdot\overline{m_{\ell-k}^{\prime}}\cdot
b,\ \ \ \ \ \ \ \ \ \ u_{\ell}=a\cdot\overline{u_{\ell-\left(  k+1\right)
}^{\prime}}\cdot b
\end{align*}
for all sufficiently high $\ell$, where $x^{\prime}$ denotes the antipode of
$x$ (that is, if $x=\left(  i,j\right)  $, then $x^{\prime}=\left(
p+1-i,q+1-j\right)  $). \pause


\textbf{Claim:} Then, reciprocity also holds for $v$; that is, we have
$v_{\ell}=a\cdot\overline{v_{\ell-\left(  k+1\right)  }^{\prime}}\cdot b$
for all $\ell \geq k+1$.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ {\color{yellow}The case $j = 1$ suffices: part 2}}

\begin{itemize}
\item \textit{Proof idea.} Fix $\ell\geq k+1$, and compare the transition
equations%
\begin{align*}
m_{\ell}  &  =\left(  s_{\ell-1}+t_{\ell-1}\right)  \cdot\overline{m_{\ell-1}%
}\cdot\overline{\overline{u_{\ell}}+\overline{v_{\ell}}}%
\ \ \ \ \ \ \ \ \ \ \text{and}\\
m_{\ell-k}^{\prime}  &  =\left(  u_{\ell-k-1}^{\prime}+v_{\ell-k-1}^{\prime
}\right)  \cdot\overline{m_{\ell-k-1}^{\prime}}\cdot\overline{\overline
{s_{\ell-k}^{\prime}}+\overline{t_{\ell-k}^{\prime}}}%
\end{align*}
using the induction hypotheses
$m_{\ell}   =a\cdot\overline{m_{\ell-k}^{\prime}}\cdot b$,
\begin{align*}
s_{\ell-1}  &  =a\cdot\overline{s_{\ell-k}^{\prime}}\cdot
b,\ \ \ \ \ \ \ \ \ \ t_{\ell-1}=a\cdot\overline{t_{\ell-k}^{\prime}}\cdot
b,\\
m_{\ell-1}  &  =a\cdot\overline{m_{\ell-1-k}^{\prime}}\cdot
b,\ \ \ \ \ \ \ \ \ \ u_{\ell}=a\cdot\overline{u_{\ell-\left(  k+1\right)
}^{\prime}}\cdot b,
\end{align*}
noting that \vspace{-0.5pc}
\[%
%TCIMACRO{\TeXButton{5-X}{\scalebox{0.8}{
%\xymatrix@R=0.7pc@C=1pc{
%u \are[rd] & & v \are[ld] & & & t' \are[rd] & & s' \are[ld] \\
%& m \are[rd] \are[ld] & & \Longrightarrow& & & m' \are[ld] \are[rd] \\
%s & & t & & & v' & & u'\ \ .
%}
%}}}%
%BeginExpansion
\scalebox{0.8}{
\xymatrix@R=0.7pc@C=1pc{
u \are[rd] & & v \are[ld] & & & t' \are[rd] & & s' \are[ld] \\
& m \are[rd] \are[ld] & & \Longrightarrow& & & m' \are[ld] \are[rd] \\
s & & t & & & v' & & u'\ \ .
}
}%
%EndExpansion
\]


\pause \only<2>{After subtracting $u_{\ell}=a\cdot\overline{u_{\ell-\left(
k+1\right)  }^{\prime}}\cdot b$, out comes $v_{\ell}=a\cdot\overline
{v_{\ell-\left(  k+1\right)  }^{\prime}}\cdot b$.} \pause


\item This argument still works if $s$, $t$ or $u$ does not exist. \pause


\item Thus, in order to prove reciprocity for all $\left(  i,j\right)  $, it
suffices (by induction) to prove it in the case when $j=1$.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Where are we?}

\begin{itemize}
\item So we have proved reciprocity for $i=j=1$, and we need to prove it for
$j=1$. \pause


\item The next case to try is $\left(  i,j\right)  =\left(  2,1\right)  $. We
need to show that%
\[
\left(  2,1\right)  _{2}=a\cdot\overline{\left(  p-1,q\right)  _{0}}\cdot
b.\text{ \pause}%
\]


\item Using the path formulas (as in the case $i=j=1$), we can boil this down
to%
\[
\downslack_{1}^{\left(  p,q\right)  \rightarrow\left(  2,1\right)  }=\upslack
_{1}^{\left(  p-1,q\right)  \rightarrow\left(  1,1\right)  }.\text{ \pause}%
\]
Note the lack of rowmotion in this formula! The $\ell$ here is constantly $1$,
so it is a property of a single labeling. Thus, we drop the subscripts.

\item \textbf{Our new goal:} Prove that%
\[
\downslack^{\left(  p,q\right)  \rightarrow\left(  2,1\right)  }=\upslack^{\left(
p-1,q\right)  \rightarrow\left(  1,1\right)  }.
\]

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ The conversion lemma}

\begin{minipage}[0.8\textheight]{\textwidth}
\begin{columns}[T]
\begin{column}{0.5\textwidth}

\begin{itemize}
\item More generally:

\item \textbf{Conversion lemma:}

Let $u$ and $u'$ be two adjacent elements on the top-right
edge of $P$ (that is, $u=\left(  k,q\right)$ and $u^{\prime}=\left(  k-1,q\right)$).
Let $d$ and $d'$ be two adjacent elements on the bottom-left edge of $P$ (that is, $d=\left(  i,1\right)$
 and $d^{\prime}=\left(  i-1,1\right)$). Then,
\[
\downslack_{\ell}^{u\rightarrow d}=\upslack_{\ell}^{u^{\prime}\rightarrow
d^{\prime}}\qquad\text{for each }\ell\in\mathbb{N}\text{.}
\]
In short:
\[
\downslack^{u\rightarrow d}=\upslack^{u^{\prime}\rightarrow
d^{\prime}}.
\]

\end{itemize}

\end{column}

\begin{column}{0.5\textwidth}
\vspace{0.5pc}
\begin{tikzpicture}[scale=0.6]
\coordinate (W) at (-3, 0);
\coordinate (N) at (0, 3);
\coordinate (E) at (6, -3);
\coordinate (S) at (3, -6);
\draw[thick] (W) -- (N) -- (E) -- (S) -- (W);
\coordinate [label=80:{$u$}] (u) at (1, 2);
\coordinate [label=80:{$u'$}] (u') at (1.5, 1.5);
\coordinate [label=190:{$d$}] (d) at (0.5, -3.5);
\coordinate [label=190:{$d'$}] (d') at (1, -4);
\fill (u) circle [radius=0.1];
\fill (u') circle [radius=0.1];
\fill (d) circle [radius=0.1];
\fill (d') circle [radius=0.1];
\draw[red, very thick] (u) -- (0.5, 1.5) -- (1, 1) -- (-0.5, -0.5) -- (0, -1) -- (-1, -2) -- (d);
\draw[blue, very thick] (u') -- (2.5, 0.5) -- (2, 0) -- (2.5, -0.5) -- (2, -1) -- (2.5, -1.5) -- (2, -2) -- (2.5, -2.5) -- (2, -3) -- (d');
\end{tikzpicture}
\end{column}
\end{columns}
\end{minipage}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Rowmotion begone, part 1}

\begin{itemize}
\item If we can prove the conversion lemma, we will obtain reciprocity not
only for $\left(  i,j\right)  =\left(  2,1\right)  $, but also for all
$\left(  i,j\right)  $ on the bottom-left edge of $P$ (that is, for the entire
case $j=1$), because we can argue as follows:
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Rowmotion begone, part 2}

\vspace{-1pc}%
\begin{align*}
\left(  i,1\right)  _{i}  &  =\overline{\upslack_{i}^{\left(p,q\right)
\rightarrow\left(
i,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{by path formula
\textbf{(c)}}\right) \\
&  =\overline{\downslack_{i-1}^{\left(  p,q\right)  \rightarrow\left(  i,1\right)
}}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{since }\upslack_{\ell+1}%
^{u\rightarrow v}=\downslack_{\ell}^{u\rightarrow v}\right) \\
&  =\overline{\upslack_{i-1}^{\left(  p-1,q\right)  \rightarrow\left(
i-1,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{by the conversion
lemma}\right) \\
&  =\overline{\downslack_{i-2}^{\left(  p-1,q\right)  \rightarrow\left(
i-1,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{since }\upslack
_{\ell+1}^{u\rightarrow v}=\downslack_{\ell}^{u\rightarrow v}\right) \\
&  =\overline{\upslack_{i-2}^{\left(  p-2,q\right)  \rightarrow\left(
i-2,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{by the conversion
lemma}\right) \\
&  =\cdots\\
&  =\overline{\upslack_{1}^{\left(  p-i+1,q\right)  \rightarrow\left(
1,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{by the conversion
lemma}\right) \\
&  =\overline{\downslack_{0}^{\left(  p-i+1,q\right)  \rightarrow\left(
1,1\right)  }}\cdot b\ \ \ \ \ \ \ \ \ \ \left(  \text{since }\upslack_{\ell
+1}^{u\rightarrow v}=\downslack_{\ell}^{u\rightarrow v}\right) \\
&  =a\cdot\overline{\left(  p-i+1,q\right)  _{0}}\cdot
b\ \ \ \ \ \ \ \ \ \ \left(  \text{by path formula \textbf{(d)}}\right)  .
\end{align*}

\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Rowmotion begone, part 3}

\begin{itemize}
\item This proves reciprocity
\[
\left(i,1\right)_\ell = a\cdot\overline{\left(  p-i+1,q\right)  _{\ell-i}}\cdot b
\]
for $\ell=i$. \pause

The case $\ell>i$ follows by applying this to $R^{\ell-i}f$ instead of $f$. \pause


\item Thus, we only need to prove the conversion lemma. We can now drop all
subscripts forever!
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Proving the conversion lemma: the intuition}

\begin{itemize}
\item Let us again look at the picture:
\[
\begin{tikzpicture}[scale=0.6]
\coordinate (W) at (-3, 0);
\coordinate (N) at (0, 3);
\coordinate (E) at (6, -3);
\coordinate (S) at (3, -6);
\draw[thick] (W) -- (N) -- (E) -- (S) -- (W);
\coordinate [label=80:{$u$}] (u) at (1, 2);
\coordinate [label=80:{$u'$}] (u') at (1.5, 1.5);
\coordinate [label=190:{$d$}] (d) at (0.5, -3.5);
\coordinate [label=190:{$d'$}] (d') at (1, -4);
\fill (u) circle [radius=0.1];
\fill (u') circle [radius=0.1];
\fill (d) circle [radius=0.1];
\fill (d') circle [radius=0.1];
\draw[red, very thick] (u) -- (0.5, 1.5) -- (1, 1) -- (-0.5, -0.5) -- (0, -1) -- (-1, -2) -- (d);
\draw[blue, very thick] (u') -- (2.5, 0.5) -- (2, 0) -- (2.5, -0.5) -- (2, -1) -- (2.5, -1.5) -- (2, -2) -- (2.5, -2.5) -- (2, -3) -- (d');
\end{tikzpicture}
\]
We must prove $\downslack^{u\rightarrow d}=\upslack^{u^{\prime}\rightarrow
d^{\prime}}$. \pause


\item How do we interpolate between paths $u\rightarrow d$ and paths
$u^{\prime}\rightarrow d^{\prime}$ ?
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Proving the conversion lemma: path-jump-paths}

\begin{itemize}
\item We define a \textbf{path-jump-path} to be a sequence
\[
\mathbf{p}=\left(  v_{0}\gtrdot v_{1}\gtrdot\cdots\gtrdot v_{i}%
\blacktriangleright v_{i+1}\gtrdot v_{i+2}\gtrdot\cdots\gtrdot v_{k}\right)
\]
of elements of $P$, where the relation $x\blacktriangleright y$ means
\textquotedblleft$y$ is one step down and some steps to the right of
$x$\textquotedblright\ (that is, if $x=\left(  r,s\right)  $, then $y=\left(
r-k,s+k-1\right)  $ for some $k>0$).

We say that this path-jump-path $\mathbf{p}$ has \textbf{jump at }$i$.
\pause

\only<2>{\textbf{Example} of a path-jump-path:
\[
\begin{tikzpicture}[scale=0.5]
\fill (0, 5) circle [radius=0.1];
\fill (1, 4) circle [radius=0.1];
\fill (2, 3) circle [radius=0.1];
\fill (1, 2) circle [radius=0.1];
\fill (2, 1) circle [radius=0.1];
\fill (1, 0) circle [radius=0.1];
\fill (4, -1) circle [radius=0.1];
\fill (5, -2) circle [radius=0.1];
\fill (4, -3) circle [radius=0.1];
\fill (3, -4) circle [radius=0.1];
\draw[very thick] (0, 5) -- (1, 4) -- (2, 3) -- (1, 2) -- (2, 1) -- (1, 0);
\draw[red, very thick] (1, 0) -- (4, -1);
\draw[very thick] (4, -1) -- (5, -2) -- (4, -3) -- (3, -4);
\end{tikzpicture}
\qquad \text{(The red edge is the jump.)}
\]
}
\pause


For any such path-jump-path $\mathbf{p}$, we set%
\[
E_{\mathbf{p}}:=\downslack^{v_{0}}\downslack^{v_{1}}\cdots\downslack^{v_{i-1}}%
v_{i}\overline{v_{i+1}}\upslack^{v_{i+2}}\upslack^{v_{i+3}}\cdots\upslack^{v_{k}} .
\]
\only<3>{(Here, we are omitting the $\ell$ subscripts -- so $v_{i}$ means
$\left(  v_{i}\right)  _{\ell}$ and $v_{i+1}$ means $\left(  v_{i+1}\right)
_{\ell}$.)} \pause


\item \only<4>{\vspace{-1pc}Now, if $k=\operatorname*{rank}u-\operatorname*{rank}%
\left(  d^{\prime}\right)  $, then%
\[
\downslack^{u\rightarrow d}=\sum_{\substack{\mathbf{p}\text{ is a path-jump-path
}u\rightarrow d^{\prime}\\\text{with jump at }k-1}}E_{\mathbf{p}},
\]
since $\downslack^{d}=d\overline{d^{\prime}}$, and similarly%
\[
\upslack^{u^{\prime}\rightarrow d^{\prime}}=\sum_{\substack{\mathbf{p}\text{ is
a path-jump-path }u\rightarrow d^{\prime}\\\text{with jump at }0}%
}E_{\mathbf{p}}.
\]
}

\end{itemize}
\vspace{9pc}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Proving the conversion lemma: moving the jump}

\begin{itemize}
\item So we need to show that%
\[
\sum_{\substack{\mathbf{p}\text{ is a path-jump-path }u\rightarrow d^{\prime
}\\\text{with jump at }k-1}}E_{\mathbf{p}}=\sum_{\substack{\mathbf{p}\text{ is a
path-jump-path }u\rightarrow d^{\prime}\\\text{with jump at }0}}E_{\mathbf{p}%
}.\text{ \pause}%
\]


\item Reasonable to expect that%
\[
\sum_{\substack{\mathbf{p}\text{ is a path-jump-path }u\rightarrow d^{\prime
}\\\text{with jump at }i}}E_{\mathbf{p}}=\sum_{\substack{\mathbf{p}\text{ is a
path-jump-path }u\rightarrow d^{\prime}\\\text{with jump at }i+1}%
}E_{\mathbf{p}}%
\]
for each $0\leq i<k-1$. \pause


\item This is indeed true and can be proved by a \textquotedblleft
local\textquotedblright\ argument
(rewriting two consecutive steps of the path).
 \pause


\item This is similar to the \textquotedblleft zipper
argument\textquotedblright\ in lattice models. (Is there a Yang--Baxter
equation lurking?)
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ \color{yellow}Proving the conversion lemma: the civilized version, part 1}

\begin{itemize}
\item Modulo the details omitted, this finishes the proof of the reciprocity
theorem. \pause


\item However, the path-jump-path argument is somewhat messy. We can make it
slicker by rewriting it in matrix notation: \pause


\item Define three $P\times P$-matrices $\bfdownslack$, $\bfupslack$ and $\mathbf{U}$ by%
\begin{align*}
\bfdownslack_{x,y}  &  :=\downslack^{x}\left[  x\gtrdot y\right]
,\ \ \ \ \ \ \ \ \ \ \bfupslack_{x,y}:=\upslack^{y}\left[  x\gtrdot y\right]  ,\\
\mathbf{U}_{x,y}  &  :=x\overline{y}\left[  x\blacktriangleright y\right]  
\ \ \ \ \ \ \ \ \  \ \ \text{for all } x, y \in P.
\end{align*}
Here, $\left[  \mathcal{A}\right]  $ is the Iverson bracket (i.e., truth
value) of a statement $\mathcal{A}$; the relation $x\blacktriangleright y$
means \textquotedblleft$y$ is one step down and some steps to the right of
$x$\textquotedblright\ as before. And again, we are omitting the $\ell$
subscripts, so $x\overline{y}$ actually means $x_{\ell}\overline{y_{\ell}}$.

\item Now, we claim that%
\[
\bfdownslack \mathbf{U} = \mathbf{U}\bfupslack.
\]

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ \color{yellow}Proving the conversion lemma: the civilized version, part 2}

\begin{itemize}
\item Now, we claim that $\bfdownslack \mathbf{U} = \mathbf{U}\bfupslack$.

Indeed, this follows easily from the following neat lemma: If%
\[%
%TCIMACRO{\TeXButton{4-square}{\xymatrix@R=0.6pc@C=0.6pc{
%& u \are[dl] \are[dr] \\
%v \are[dr] & & w \are[dl] \\
%& d
%}}}%
%BeginExpansion
\xymatrix@R=0.6pc@C=0.6pc{
& u \are[dl] \are[dr] \\
v \are[dr] & & w \are[dl] \\
& d
}%
%EndExpansion
\]
are four adjacent elements of $P$, then%
\[
\overline{w}\cdot\upslack^{d}\cdot d=\overline{u}\cdot\downslack^{u}\cdot
v\ \ \ \ \ \ \ \ \ \ \text{and}\ \ \ \ \ \ \ \ \ \ \overline{v}\cdot\upslack
^{d}\cdot d=\overline{u}\cdot\downslack^{u}\cdot w.
\]
\only<1>{(The $u$ and $d$ here are unrelated to the $u$ and $d$
from the conversion lemma!)}
\vspace{-0.5pc}
\pause

\item From $\bfdownslack \mathbf{U} = \mathbf{U}\bfupslack$, we easily obtain%
\[
\bfdownslack^{\circ k} \mathbf{U}
= \mathbf{U} \bfupslack^{\circ k}\ \ \ \ \ \ \ \ \ \ \text{for any }%
k\in\mathbb{N},
\]
where $A^{\circ k}$ means the $k$-th power of a matrix $A$. \pause


\item Setting $k=\operatorname*{rank}u-\operatorname*{rank}d$ and comparing
the $\left(  u,d^{\prime}\right)  $-entries of both sides, we quickly obtain
$\downslack^{u\rightarrow d}=\upslack^{u^{\prime}\rightarrow d^{\prime}}$ (since
$x\blacktriangleright d^{\prime}$ holds only for $x=d$, and since
$u\blacktriangleright x$ holds only for $x=u^{\prime}$). This proves the
conversion lemma again.
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Is that all? Part 1: Semirings}

\begin{itemize}
\item We consider these new proofs to be simpler and nicer than our 2014 one
for the commutative case. \pause


\item However, in some sense they are still imperfect. \pause


\item \textbf{Recall:} Classical rowmotion is (a restriction of) birational
rowmotion on the tropical \textbf{semi}field. \pause

Semifields are not rings! (No subtraction.) \pause

In the \textbf{commutative} case, the theorems hold for semifields
(and, more generally, commutative semirings) because they hold for fields
and because they are ``essentially'' polynomial identities (once you
clear denominators). \pause

% Back in 2014, when we assumed commutativity, we could argue that because our
% theorems held for fields, they would also hold for semifields (and, more
% generally, commutative semirings) for \textquotedblleft permanence of
% identities\textquotedblright\ reasons (clear denominators, compare polynomial
% coefficients). \pause

This \textbf{fails} for noncommutative $\mathbb{K}$ !

\item \textbf{Scary example} ({\color{red}{David Speyer,
\href{https://mathoverflow.net/a/401273/}{MathOverflow \#401273}}}): If $x$ and
$y$ are two elements of a ring such that $x+y$ is invertible, then%
\[
x\cdot\overline{x+y}\cdot y=y\cdot\overline{x+y}\cdot x.
\]
But this is not true if \textquotedblleft ring\textquotedblright\ is replaced
by \textquotedblleft semiring\textquotedblright!
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Is that all? Part 2: The semiring question}

\begin{itemize}
\item Thus, we are left with a
\end{itemize}

\begin{block}{\textbf{Question:}}
Are the periodicity and reciprocity theorems still true if ``ring'' is replaced by ``semiring''?
(I.e., we no longer require $\mathbb{K}$ to have a subtraction.)
\end{block}
\pause

\begin{itemize}
\item Note that the main hurdle is the argument that reduced the general case
to the $j=1$ case. That argument used subtraction!
\pause


\item We have partial results, e.g., for $p=q=3$ and for $p=2$.
\only<3>{
\vspace{10pc}
}

\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Is that all? Part 3: Other posets}

\begin{itemize}

\item Other posets remain to be studied.

\begin{block}{\textbf{Conjecture:}}
Let $P$ be the triangle-shaped poset $\Delta\tup{p}$ or
its reflection $\nabla\tup{p}$.
Let $f \in \mathbb{K}^{\widehat P}$ be a labelling such
that $R^p f$ exists.
Let $a = f\tup{0}$ and $b = f\tup{1}$.
Then, for each $x \in \widehat{P}$, we have
\[
\tup{R^pf}\tup{x} = a \overline b \cdot f\tup{x'}
\cdot \overline a b ,
\]
where $x'$ is the reflection of $x$ across the y-axis.
\end{block}
\only<1>{
\[
\xymatrixrowsep{0.9pc}\xymatrixcolsep{0.20pc}\xymatrix{
& & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \\
& \fullmoon \ar@{-}[ld] \ar@{-}[rd] & & \fullmoon \ar@{-}[ld] \ar@{-}[rd] & \\
\fullmoon & & \fullmoon & & \fullmoon
}
\]
}
\pause

\item We have a similar conjecture for other kinds
of triangles and (still unproved even in the commutative
case!) for trapezoids.
\pause

\only<3>{
\item As already mentioned, other simple posets such
as
\[
\xymatrixrowsep{1.5pc}\xymatrix{ \fullmoon \ar@{-}[dr] & \fullmoon \ar@{-}[d] & \fullmoon \ar@{-}[dl] \\ & \fullmoon }\phantom{xx}
\]
do not have periodic behavior for noncommutative
$\mathbb{K}$.
}
\pause

\begin{block}{\textbf{Question:}}
What other results like ours are known in the noncommutative case?
\end{block}

\end{itemize}
\vspace{10pc}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Acknowledgments}

\begin{itemize}
\item \textbf{Tom Roby}: collaboration

\item \textbf{Mathematisches Forschungsinstitut Oberwolfach}: hospitality in
July/August 2021

\item \textbf{Banff International Research Station}: 2021 conference where
this was first presented

\item \textbf{Anna Pun}: invitation

\item \textbf{Michael Joseph, Tim Campion, Max Glick, Maxim
Kontsevich, Gregg Musiker, Pace Nielsen, James Propp, Pasha Pylyavskyy, Bruce
Sagan, Roland Speicher, David Speyer, Hugh Thomas, and Jurij Volcic}: discussions

\item \textbf{Sage and Sage-combinat}: computations

\item \textbf{the birational combinatorics community}: keeping the subject
exciting since 2013

\item \textbf{you}: your patience
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ Some references}

\begin{itemize}
\item {\small David Einstein, James Propp, \textit{Combinatorial,
piecewise-linear, and birational homomesy for products of two chains}, 2013.
{\color{red} \url{http://arxiv.org/abs/1310.5294}} }

\item {\small David Einstein, James Propp, \textit{Piecewise-linear and
birational toggling}, 2014. {\color{red}
\url{https://arxiv.org/abs/1404.3455}} }

\item {\small Darij Grinberg, Tom Roby, \textit{Iterative properties of
birational rowmotion}, 2014. {\color{red}
\url{http://arxiv.org/abs/1402.6178}} }

\item {\small Michael Joseph, Tom Roby, \textit{Birational and noncommutative
lifts of antichain toggling and rowmotion}, 2019. {\color{red}
\url{https://arxiv.org/abs/1909.09658}} }

\item {\small Michael Joseph, Tom Roby, \textit{A birational lifting of the
Stanley-Thomas word on products of two chains}, 2020. {\color{red}
\url{https://arxiv.org/abs/2001.03811}} }

\item {\small Gregg Musiker, Tom Roby, \textit{Paths to Understanding
Birational Rowmotion on Products of Two Chains}, 2019. {\color{red}
\url{https://arxiv.org/abs/1801.03877}} }
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ \color{yellow}The Y-system connection}

\begin{itemize}

\item \textbf{Zamolodchikov periodicity conjecture in type AA} (proved by A. Yu. Volkov, {\red \arxiv{hep-th/0606094v1}}):
Let $r$ and $s$ be positive integers.
Let $Y_{i,\ j,\ k}$ be elements of a commutative ring for $i \in [r]$
and $j \in [s]$ and $k \in \ZZ$.
Assume that
\[
Y_{i,\ j,\ k+1} Y_{i,\ j,\ k-1}
=
\dfrac{(1 + Y_{i+1,\ j,\ k}) (1 + Y_{i-1,\ j,\ k})}{(1 + 1/Y_{i,\ j+1,\ k}) (1 + 1/Y_{i,\ j-1,\ k})}
\]
for all $i,\ j,\ k$, where sums involving ``off-grid'' points (e.g., $1 + Y_{0,\ j,\ k}$) are understood as $1$. \\
Then, $Y_{i,\ j,\ k+2(r+s+2)} = Y_{i,\ j,\ k}$ for all $i,\ j,\ k$.
\pause

\item \textbf{Observation (Max Glick and others, ca. 2015?):}
This is equivalent to periodicity of birational rowmotion ($R^{p+q} = 1$)
for $[p]\times [q]$, where $p = r+1$ and $q = s+1$, when the ring is commutative.
\only<2>{Explicitly,
\[
Y_{i,\ j,\ i+j-2k} = (R^k f)(i, j+1) \ \diagup\ (R^k f)(i+1, j) .
\]
(Fine points omitted.)
\vspace{9pc}}
\pause

\item \textbf{Disappointment:}
Zamolodchikov periodicity does not generalize to noncommutative rings
(no matter how we order the five factors).

\end{itemize}
\vspace{9pc}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ \color{yellow}Johnson and Liu 2022}

\begin{itemize}

\item A recent preprint by Joseph Johnson and Ricky Ini Liu
(\textit{Birational rowmotion and the octahedron recurrence},
{\red \arxiv{2204.04255}}) reproves the ``order $p+q$''
theorem for commutative $\mathbb{K}$ in a simpler way
(besides doing a number of other interesting things).
\pause

\item The main idea of their proof is to reduce
birational rowmotion to the octahedron recurrence, and
prove the latter is periodic using lattice paths and LGV.
\pause

\item We don't know if the octahedron recurrence is
well-behaved for noncommutative $\mathbb{K}$ (too many
options to check), but LGV certainly is not available.
\pause

\item Lemma 4.1 in the Johnson-Liu preprint generalizes
our conversion lemma in the commutative case from
single paths to $k$-tuples of nonintersecting paths.
We don't know how this could be done in the noncommutative
case; it is unclear in what order to multiply labels
from different paths.

\end{itemize}
\vspace{9pc}
\end{frame}

\begin{frame}
\frametitle{\ \ \ \ \color{yellow}One more little result}

\begin{block}{\textbf{Proposition (2022, G \& Roby):}}
Let $P$ be any finite poset.
Let $f\in\mathbb{K}^{\widehat{P}}$. Then,
\[
f\tup{1}\cdot\sum_{\substack{u\in\widehat{P};\\u\gtrdot0}}\overline{\left(
Rf\right)  \left(  u\right)  }\cdot f\tup{0}
=\sum_{\substack{u\in\widehat{P};\\u\lessdot1}}f\left(  u\right),  
% \qquad \text{where }
% a=f\left( 0\right)  \text{ and } b=f\left(  1\right)  ,
\]
assuming that the inverses $\overline{\left(  Rf\right)  \left(  u\right)  }$
are well-defined.
\end{block}
\pause

\begin{block}{\textbf{Corollary (2022+, G \& Roby):}}
Let $P$ be any finite poset.
Let $f\in\mathbb{K}^{\widehat{P}}$ with
$f\left( 0\right) = f\left(  1\right) = 1$.
Then, the quantity
\[
\sum_{\substack{u,v\in\widehat{P};\\u\lessdot v}}
f \left(  u\right) \cdot \overline{f\left(v\right)}
\]
is unchanged under birational rowmotion (i.e., when we
replace $f$ by $Rf$).
\end{block}


% \begin{itemize}

% \end{itemize}
\vspace{9pc}
\end{frame}

\end{document}