\documentclass[fullpage, 10pt, openany]{book}
\usepackage{sectsty, fancyhdr, amsmath}
%%%%%%%%%%%Package for writing the answers at the back of the book
\usepackage{answers}

%%%%%%%%%%Assorted packages
\usepackage{makeidx}
\usepackage{hangcaption}
\usepackage{ntheorem}
\usepackage[colorlinks=true,linkcolor=red, pdftitle={linear algebra}, pdfauthor={david santos},  bookmarksopen, dvips]{hyperref}
\usepackage{pdflscape}
\usepackage{thumbpdf}
\usepackage[dvips]{graphicx}

%%%%%                   Postscript drawing packages
\usepackage{pstricks, pstricks-add}
\usepackage{pst-plot}
\usepackage{pst-3d}
\usepackage{pst-eucl}



%%%%%%%%%%%%%%CHAPTER HEADINGS
\usepackage[Lenny]{fncychap}


%%%%%                  FONTS
\usepackage[latin1]{inputenc}
\usepackage{t1enc}
\usepackage{bookman}
\usepackage{pifont, stmaryrd} %%for the dingautolists and the proofsymbol
\usepackage{euler} %%Knuth's euler math font
\mathversion{bold}
\usepackage{mathrsfs} %%for the fancy cursive mathscr
\usepackage{amsfonts}
\usepackage{amssymb}
 \DeclareSymbolFont{EulerFraktur}{U}{euf}{m}{n}
\SetSymbolFont{EulerFraktur}{bold}{U}{euf}{b}{n}
\DeclareMathSymbol{!}\mathord  {EulerFraktur}{"21}
\DeclareMathSymbol{(}\mathopen {EulerFraktur}{"28}
\DeclareMathSymbol{)}\mathclose{EulerFraktur}{"29}
\DeclareMathSymbol{+}\mathbin  {EulerFraktur}{"2B}
\DeclareMathSymbol{-}\mathbin  {EulerFraktur}{"2D}
\DeclareMathSymbol{=}\mathrel  {EulerFraktur}{"3D}
\DeclareMathSymbol{[}\mathopen {EulerFraktur}{"5B}
\DeclareMathSymbol{]}\mathclose{EulerFraktur}{"5D}
\DeclareMathSymbol{"}\mathord  {EulerFraktur}{"7D}
\DeclareMathSymbol{&}\mathord  {EulerFraktur}{"26}
\DeclareMathSymbol{:}\mathrel  {EulerFraktur}{"3A}
\DeclareMathSymbol{;}\mathpunct{EulerFraktur}{"3B}
\DeclareMathSymbol{?}\mathord  {EulerFraktur}{"3F}
\DeclareMathSymbol{^}\mathord  {EulerFraktur}{"5E}
\DeclareMathSymbol{`}\mathord  {EulerFraktur}{"12}
\DeclareMathDelimiter{(}{EulerFraktur}{"28}{largesymbols}{"00}
\DeclareMathDelimiter{)}{EulerFraktur}{"29}{largesymbols}{"01}
\DeclareMathDelimiter{[}{EulerFraktur}{"5B}{largesymbols}{"02}
\DeclareMathDelimiter{]}{EulerFraktur}{"5D}{largesymbols}{"03}


%%%%%%%%%%PAGE FORMATTING
\topmargin -.7in \textheight 9.5in \oddsidemargin -.3in
\evensidemargin -.3in \textwidth 7in



%%%%%FLOAT PLACEMENT

\renewcommand{\topfraction}{.85}
\renewcommand{\bottomfraction}{.7}
\renewcommand{\textfraction}{.15}
\renewcommand{\floatpagefraction}{.66}
\renewcommand{\dbltopfraction}{.66}
\renewcommand{\dblfloatpagefraction}{.66}
\setcounter{topnumber}{9} \setcounter{bottomnumber}{9}
\setcounter{totalnumber}{20} \setcounter{dbltopnumber}{9}


\newcount\n              % recursion depth
\newcount\x\newcount\y   % pen location
\newcount\xo\newcount\yo % old pen location
\newcount\dx\newcount\dy % next step to take
\newcount\t              % temp for swap
%
\def\swp{\t=\dx\dx=\dy\dy=\t}% swap step deltas
\def\L{\swp\multiply\dx by-1}% turn so step is pi/2 left of where it was
\def\R{\swp\multiply\dy by-1}% turn so step is pi/2 right of where it was
\def\S{%% take a step with pen down
  \xo=\x \yo=\y %
  \advance\x by\dx%
  \advance\y by\dy%
  \ln{\the\xo}{\the\yo}{\the\x}{\the\y}%
}
\def\ln#1#2#3#4{\qline(#1,#2)(#3,#4)}% draw a line from old to new pen position
% represent angle, which is +-90 degrees,
% as a [dx,dy] vector instead
\def\h#1#2{\ifnum\n=0\relax\else%
  \advance\n by-1%
  #1\h#2#1\S#2\h#1#2\S\h#1#2#2\S\h#2#1#1%
  \advance\n by1\fi%
}



%%%%Title Page

\makeatletter
\def\thickhrulefill{\leavevmode \leaders \hrule height 1pt\hfill \kern \z@}
\renewcommand{\maketitle}{\begin{titlepage}%
    \let\footnotesize\small
    \let\footnoterule\relax
    \parindent \z@
    \reset@font
    \null\vfil
    \begin{flushleft}
     \@title
    \end{flushleft}
    \par
    \hrule height 4pt
    \par
    \begin{flushright}
    \@author \par
    \end{flushright}
  \vskip 60\p@
  \vspace*{\stretch{2}}
\psset{unit=2mm}

\bigskip
\makebox[\textwidth]{%
  \begin{pspicture}(0,0)(63,63)
    \x=0 \y=0
    \dx=1 \dy=0
    \n=6 \h\L\R
  \end{pspicture}%
}   \vskip 60\p@
    \vspace*{\stretch{2}}
    \begin{center}
\Large\textsf{\today\quad REVISION}
    \end{center}
  \end{titlepage}%
  \setcounter{footnote}{0}%
}


\makeatother

\title{\textcolor{red}{\Large Linear Algebra Notes
}}
\author{\textcolor{blue}{David A. SANTOS} \\ \href{mailto:dsantos@ccp.edu}{dsantos@ccp.edu}}
%%%%%%%


\setlength{\fboxrule}{1.5pt}

%%%%%%%%%%%%%%%%%INTERVALS
%%%%%%%% lo= left open, rc = right closed, etc.
\def\lcrc#1#2{\left[#1 \ ; #2 \right]}
\def\loro#1#2{ \left]#1 \ ; #2 \right[}
\def\lcro#1#2{\left[#1 \ ; #2 \right. \left[ \right.}
\def\lorc#1#2{\left. \right[#1 \ ; #2 \left.\right]}



%%%%%                Non-standard commands and symbols
\newcommand{\BBZ}{\mathbb{Z}}
\newcommand{\BBR}{\mathbb{R}}
\newcommand{\BBN}{\mathbb{N}}
\newcommand{\BBC}{\mathbb{C}}
\newcommand{\BBQ}{\mathbb{Q}}
\newcommand{\BBF}{\mathbb{F}}
\newcommand{\dis}{\displaystyle}
\def\fun#1#2#3#4#5{\everymath{\displaystyle}{{#1} : \vspace{1cm}
\begin{array}{ccc}{#4} & \rightarrow &
{#5}\\
{#2} &  \mapsto & {#3} \\
\end{array}}}

\def\binom#1#2{{#1\choose#2}}
\def\T#1#2{\mathscr{T}_{#1}{#2}}
\def\gl#1#2{{\bf GL}_{#1}(#2)}
\def\algebra#1#2{\langle #1,#2\rangle}
\def\field#1#2#3{\langle #1,#2, #3\rangle}
\def\vecspace#1#2#3#4{\langle #1,#2, #3, #4\rangle}
\def\span#1{{\rm span}\left(#1\right)}
\def\rank#1{{\rm rank}\left(#1\right)}
\def\sgn#1{{\rm signum}\left(#1\right)}
\def\mat#1#2{{\bf M}_{#1}(#2)}
\def\norm#1{{\left|\left|#1\right|\right|}}
\newcommand{\bulletproduct}{{\scriptscriptstyle \stackrel{\bullet}{{}}}}
\def\dotprod#1#2{\v{#1} \bulletproduct \v{#2}}
\newcommand{\cross}{{\boldsymbol\times}}
\def\crossprod#1#2{\v{#1}\cross \v{#2}}
\def\sgn#1{{\rm sgn}(#1)}
\def\tr#1{{\rm tr}\left(#1\right)}
\def\adj#1{{\rm adj}\left(#1\right)}
\def\proj#1#2{{\rm proj} _{\v{#2}} ^{\v{#1}} }
\def\vol#1{{\rm volume}(#1)}
\def\ball#1#2{B_{#2}({\bf #1})}
\newcommand\zeropoint{O}
\def\v#1{{\bf \overrightarrow{#1}}}
\def\vect#1{\overrightarrow{#1}}
\newcommand{\idefun}{{\bf Id\ }}
\newcommand{\QUOTEME}[2]{\begin{quote}{\it\textbf{#1}}\nolinebreak[1] \blue\hspace*{\fill} \mbox{-\textsl{#2}} \hspace*{\fill}\end{quote}}

\def\bipoint#1#2{[#1, #2]}
\def\anglebetween#1#2{\widehat{(\v{#1}, \v{#2})}}
\def\rank#1{{\rm rank}\left(#1\right)}
\def\ker#1{{\rm ker}\left(#1\right)}
\def\im#1{{\rm Im}\left(#1\right)}
\def\lcm#1{{\rm lcm}\left(#1\right)}
\def\rowrank#1{{\rm row\ rank}\left(#1\right)}
\def\columnrank#1{{\rm column\ rank}\left(#1\right)}
\setcounter{MaxMatrixCols}{15}
%%%%Jim Hefferon's Linear Algebra macros.
%--------grstep
% For denoting a Gauss' reduction step.
% Use as: \grstep{\rho_1+\rho_3} or \grstep[2\rho_5 \\ 3\rho_6]{\rho_1+\rho_3}
\newcommand{\grstep}[2][\relax]{%
   \ensuremath{\mathrel{
       \mathop{\rightsquigarrow}\limits^{#2\mathstrut}_{
                                     \begin{subarray}{l} #1 \end{subarray}}}}}
\newcommand{\swap}{\leftrightarrow}


%-------------augmatrix
% Augmented matrix.  Usage (note the argument does not count the aug col):
% \begin{augmatrix}{2}
%   1  2  3 \\  4  5  6
% \end{augmatrix}
\newenvironment{augmatrix}[1]{%
  \left[\begin{array}{@{}*{#1}{c}|c@{}}
}{%
  \end{array}\right]
}



%-------------bmat
% For matrices with arguments.
% Usage: \begin{bmat}{c|c|c} 1 &2 &3 \end{bmat}
\newenvironment{bmat}[1]{
  \left[\begin{array}{@{}#1@{}}
}{\end{array}\right] }

%------------colvec and rowvec
% Column vector and row vector.  Usage:
%  \colvec{1  \\ 2 \\ 3 \\ 4} and \rowvec{1  &2  &3}
\newcommand{\colvec}[1]{\begin{bmatrix} #1 \end{bmatrix}}
\newcommand{\colpoint}[1]{\begin{pmatrix} #1 \end{pmatrix}}
\newcommand{\rowvec}[1]{\begin{bmatrix} #1 \end{bmatrix}}
\renewcommand{\arraystretch}{1.2}



%%%%%                    THEOREM-LIKE ENVIRONMENTS
\newcommand{\proofsymbol}{\Pisymbol{pzd}{113}}
\theorembodyfont{\small}
\newtheorem{pro}{Problem}[section]

\theorempreskipamount .5cm \theorempostskipamount .5cm
\theoremstyle{change} \theoremheaderfont{\sffamily\bfseries}
\theorembodyfont{\normalfont} \newtheorem{thm}{Theorem}
\newtheorem{exa}[thm]{{\cal Example}}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{df}[thm]{Definition}
\newtheorem{lem}[thm]{Lemma}

\Newassociation{answer}{Answer}{linearans}

\newtheorem{rul}[thm]{Rule}
\newenvironment{pf}[0]{\itshape\begin{quote}{\bf Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{rem}[0]{\begin{quote}{\huge\textcolor{red}{\Pisymbol{pzd}{43}}}\itshape }{\end{quote}}
\newenvironment{nota}[0]{\begin{quote}{\textcolor{red}{\bf Notation:\ }}\itshape }{\end{quote}}
\newenvironment{solu}[0]{\begin{quote}{\bf Solution:\ }$\blacktriangleright$ \itshape }{$\blacktriangleleft$\end{quote}}

\usepackage{multicol}
%%%%%%%%%%%%%%%%%%%%
\makeatletter
\newcommand{\twocoltoc}{%
  \section*{\contentsname
      \@mkboth{%
        }{}}%
  \begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900
    \@starttoc{toc}%
  \end{multicols}}
\makeatother
%%%%%%%%%%%%
\fontencoding{T1}
    \fontfamily{fmv}
    \selectfont
\DeclareFixedFont{\bigghv}{T1}{phv}{b}{n}{1.2cm}
\DeclareFixedFont{\bighv}{T1}{phv}{b}{n}{0.6cm}
\DeclareFixedFont{\bigithv}{T1}{phv}{b}{sl}{0.8cm}
\DeclareFixedFont{\smallit}{T1}{phv}{b}{it}{9pt}


\makeindex
%%%%%%                  And voilą the document !!!





\begin{document}
\Opensolutionfile{linearans}[linearansC1]

\pagenumbering{roman}
\renewcommand{\headrulewidth}{1pt}
\renewcommand{\footrulewidth}{1pt}
\lhead{\nouppercase{\textcolor{blue}{\rightmark}}}
\rhead{\nouppercase{\textcolor{blue}{\leftmark}}}
\lhead[\rm\thepage]{\textcolor{blue}{\it \rightmark}} \rhead[\it
\textcolor{blue}{\leftmark}]{\rm\thepage} \cfoot{}
\thispagestyle{empty} \pagestyle{fancy}


% Front matter may follow here
\begin{frontmatter}
 \maketitle
{\small \twocoltoc{}}
 \end{frontmatter}

 \clearpage



\begin{quote}
    Copyright \copyright{}  2007  David Anthony SANTOS.
    Permission is granted to copy, distribute and/or modify this document
    under the terms of the GNU Free Documentation License, Version 1.2
    or any later version published by the Free Software Foundation;
    with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
    A copy of the license is included in the section entitled ``GNU
    Free Documentation License''.
\end{quote}

\clearpage







\clearpage


\chapter*{Preface}
\markboth{}{} \addcontentsline{toc}{chapter}{Preface}
\markright{Preface}


These notes started during the Spring of 2002, when John MAJEWICZ
and I each taught a section of Linear Algebra. I would like to thank
him for numerous suggestions on the written notes.

\bigskip

The students of my class were: Craig BARIBAULT, Chun CAO, Jacky
CHAN, Pho DO, Keith HARMON, Nicholas SELVAGGI, Sanda SHWE, and Huong
VU. I must also thank my former student William CARROLL for some
comments and for supplying the proofs of a few results.


\bigskip

John's students were David HERN\'{A}NDEZ, Adel JAILILI, Andrew KIM,
Jong KIM, Abdelmounaim LAAYOUNI, Aju MATHEW, Nikita MORIN,
Thomas NEGR\'{O}N, Latoya ROBINSON, and Saem SOEURN.\\

\bigskip

Linear Algebra is often a student's first introduction to abstract
mathematics. Linear Algebra is well suited for this, as it  has a
number of beautiful but elementary and easy to prove theorems. My
purpose with these notes is to introduce students to the concept of
proof in a gentle manner.





\bigskip

\hfill David A. Santos
\href{mailto:dsantos@ccp.edu}{dsantos@ccp.edu}

\vfill

 \section*{To the Student} \markboth{}{}
\addcontentsline{toc}{chapter}{To the Student} \markright{To the
Student} These notes are provided for your benefit as an attempt to
organise the salient points of the course. They are a {\em very
terse} account of the main ideas of the course, and are to be used
mostly to refer to central definitions and theorems. The number of
examples is minimal, and here you will  find few exercises. The {\em
motivation} or informal ideas of looking at a certain topic, the
ideas linking a topic with another,  the worked-out examples, etc.,
are given in class. Hence  these notes are not  a substitute to
lectures: {\bf you must always attend to lectures}. The order of the
notes may not necessarily be the order followed in the class.

\bigskip
There is a certain algebraic fluency  that is necessary for a course
at this level. These algebraic prerequisites would be difficult to
codify here, as they vary depending on class response and the topic
lectured. If at any stage you stumble in Algebra, seek help! I am
here to help you!

\bigskip

Tutoring can sometimes help,  but bear in mind that whoever tutors
you may not be familiar with my conventions. Again, I am here to
help! On the same vein, other books may help, but the approach
presented here is at times unorthodox and finding alternative
sources might be difficult.

\bigskip

Here are more recommendations:
\begin{itemize}
\item Read a section before class discussion, in particular, read
the definitions.
\item Class provides the informal discussion, and you will profit
from the comments of your classmates, as well as gain confidence by
providing your insights and interpretations of a topic. {\bf Don't
be absent! }

\item Once the lecture of a particular topic has been given, take a fresh look at the notes of the lecture topic.
\item Try to understand a single example well, rather than
ill-digest multiple examples.
\item Start working on the distributed homework ahead of time.
\item {\bf Ask questions during the lecture.} There are two main  types of questions
that you are likely to ask.
\begin{enumerate}
\item {\em Questions of Correction: Is that a minus sign there?} If you think that, for example, I have
missed out a minus sign or wrote $P$ where it should have been
$Q$,\footnote{My doctoral adviser used to say ``I said $A$, I wrote
$B$, I meant $C$ and it should have been $D$!} then by all means,
ask. No one likes to carry an error till line XLV because the
audience failed to point out an error on line I. Don't wait till the
end of the class to point out an error. Do it when  there is still
time to correct it!
\item {\em Questions of Understanding: I don't get it!} Admitting that you do not understand something is
an act requiring utmost courage. But if you don't, it is likely that
many others in the audience also don't. On the same vein, if you
feel you can explain a point to an inquiring classmate, I will allow
you time in the lecture to do so.  The best way to ask a question is
something like: ``How did you get from the second step to the third
step?'' or ``What does it mean to complete the square?''
Asseverations like ``I don't understand'' do not help me answer your
queries. If I consider that you are asking the same questions too
many times, it may be that you need extra help, in which case we
will settle what to do outside the lecture.
\end{enumerate}
\item Don't fall behind! The sequence of topics is closely
interrelated, with one topic leading to another.
\item The use of calculators is allowed, especially in the occasional lengthy calculations.
However, when graphing, you will need to provide
algebraic/analytic/geometric support of your arguments. The
questions on assignments and exams will be posed in such a way that
it will be of no advantage to have a graphing calculator.
\item Presentation is critical. Clearly outline your ideas. When writing solutions, outline major steps and write in
complete sentences. As a guide, you may try to emulate the style
presented in the scant examples furnished in these notes.
\end{itemize}


\clearpage
\renewcommand{\arraystretch}{2.1}
\renewcommand{\chaptermark}{\markboth{\chaptername\ \thechapter}}
\renewcommand{\sectionmark}{\markright}
\chapter{Preliminaries}
\section{Sets and Notation}
\pagenumbering{arabic} \setcounter{page}{1}

\begin{df}We will mean
by a {\em set} \index{sets} a collection of well defined members
or {\em elements}.\end{df}

\begin{df} The following sets have special symbols.
\begin{center}
\begin{tabular}{ll}
$\BBN = \{0,1,2, 3, \ldots\}$ & denotes the set of natural
numbers. \\
$\BBZ = \{\ldots, -3,-2,-1,0,1,2, 3, \ldots\}$ & denotes the set
of integers. \\
$\BBQ$ & denotes the set of rational numbers. \\
$\BBR$ & denotes the set of real numbers. \\
$\BBC$ & denotes the set of complex numbers. \\
$\varnothing$ & denotes the empty set.\\
\end{tabular}
\end{center}
\end{df}
\begin{df}[Implications] The symbol $\implies$ is read ``implies'', and the symbol
$\Longleftrightarrow$ is read ``if and only if.''\end{df}

\begin{exa}
Prove that between any two rational numbers there is always a
rational number.
\end{exa}
\begin{solu} Let $(a, c)\in\BBZ^2$, $(b, d)\in (\BBN\setminus\{0\})^2$,
$\frac{a}{b}< \frac{c}{d}$. Then $da < bc$. Now
$$ab + ad < ab + bc  \implies a(b+d) < b(a+c) \implies \dfrac{a}{b} < \dfrac{a+c}{b+d},    $$
$$da + dc < cb + cd  \implies d(a+c) < c(b+d) \implies \dfrac{a+c}{b+d} < \dfrac{c}{d},    $$
whence the rational number $\dfrac{a+c}{b+d}$ lies between
$\frac{a}{b}$ and $\frac{c}{d}$.
\end{solu}
\begin{rem}
We can also argue that the average of two distinct numbers lies
between the numbers and so if $r_1<r_2$ are rational numbers, then
$\dfrac{r_1+r_2}{2}$ lies between them.
\end{rem}


\begin{df}Let $A$ be a set. If $a$ belongs to the set $A$, then we
write $a\in A$, read ``$a$ is an element of $A$.'' If $a$ does not
belong to the set $A$, we write $a\not\in A$, read ``$a$ is  not
an element of $A$.'' \end{df}

\begin{df}[Conjunction, Disjunction, and Negation]
The symbol $\vee$ is read ``or'' ({\em disjunction}), the symbol
$\wedge$ is read ``and'' ({\em conjunction}), and the symbol
$\neg$ is read ``not.''
\end{df}

\begin{df}[Quantifiers]
The symbol $\forall$ is read ``for all'' (the {\em universal
quantifier}), and  the symbol $\exists$ is read ``there exists''
(the {\em existential quantifier}). \index{quantifiers}
\index{quantifiers!universal} \index{quantifiers!existential}
\end{df}
We have
\begin{equation}
\neg (\forall x\in A, P(x)) \iff (\exists \in A, \neg P(x))
\end{equation}
\begin{equation}
\neg(\exists \in A,  P(x)) \iff  (\forall x\in A, \neg P(x))
\end{equation}

\begin{df}[Subset] If  $\forall a\in A$ we have $a\in B$, then we write $A \subseteq
B$, which we read ``$A$ is a subset of $B$.''\end{df} In
particular, notice that for any set $A$, $\varnothing \subseteq A$
and $A \subseteq A$.  Also \index{sets!subsets}
$$\BBN \subseteq\BBZ \subseteq\BBQ \subseteq\BBR \subseteq\BBC.$$
\begin{rem}
$A = B \iff (A \subseteq B) \wedge (B \subseteq A)$.
\end{rem}
\begin{df}
The {\em union} \index{sets!union} of two sets $A$ and $B$, is the
set
$$A\cup B = \{x:(x\in A)\ \vee\ (x\in B)\}.$$
This is read ``$A$ union $B$.'' See figure \ref{fig:a_union_b}.
\end{df}
\begin{df}
The {\em intersection} \index{sets!intersection} of two sets $A$
and $B$, is
$$A\cap B = \{x:(x\in A)\ \wedge \ (x\in B)\}.$$
This is read ``$A$ intersection $B$.'' See figure
\ref{fig:a_intersection_b}.
\end{df}
\begin{df}
The {\em difference} \index{sets!difference} of two sets $A$ and
$B$, is
$$A\setminus B = \{x:(x\in A)\ \wedge (x\not\in B)\}.$$
This is read ``$A$ set minus $B$.'' See figure
\ref{fig:a_minus_b}.
\end{df}

\vspace{1cm}
\begin{figure}[h]
\hfill
\begin{minipage}{3cm}$$\psset{unit=.7pc}
\pscircle[fillstyle=hlines,
fillcolor=red](-1,0){2}\pscircle[fillstyle=hlines,
fillcolor=red](1,0){2} \uput[d](-1,-2){A}\uput[d](1,-2){B}
$$\vspace{1cm}\footnotesize\hangcaption{$A\cup B$} \label{fig:a_union_b}
\end{minipage}
\hfill
\begin{minipage}{3cm}$$ \psset{unit=.7pc} \pscircle(-1,0){2}\pscircle(1,0){2}
\uput[d](-1,-2){A}\uput[d](1,-2){B}
\pscustom[fillstyle=solid,fillcolor=green]{\psarc(1,0){2}{120}{240}\psarc(-1,0){2}{270}{60}}
$$\vspace{1cm}\footnotesize\hangcaption{$A\cap B$} \label{fig:a_intersection_b}
\end{minipage}
\hfill
\begin{minipage}{3cm}$$\psset{unit=.7pc}
\pscircle[fillstyle=solid,fillcolor=blue](-1,0){2}\pscircle(1,0){2}
\uput[d](-1,-2){A}\uput[d](1,-2){B}
\pscustom[fillstyle=solid,fillcolor=white]{\psarc(1,0){2}{120}{240}\psarc(-1,0){2}{270}{60}}
$$\vspace{1cm}\footnotesize\hangcaption{$A\setminus B$} \label{fig:a_minus_b}
 \end{minipage} \hfill
\end{figure}
\vspace{1cm}

\begin{exa}
Prove by means of set inclusion that $$(A \cup B) \cap C = (A \cap
C) \cup (B \cap C).$$ \end{exa} \begin{solu} We have,
$$\begin{array}{lll} x \in (A \cup B) \cap C & \iff &
x\in (A \cup B) \wedge x\in C \\
& \iff & (x\in A \vee x\in B) \wedge x\in C \\
& \iff & (x\in A \wedge x \in C) \vee (x\in B \wedge x\in C) \\
& \iff & (x\in A\cap C) \vee (x\in B \cap C)\\
& \iff & x \in (A \cap C) \cup (B \cap C),
\end{array}$$which establishes the equality.
\end{solu}
\begin{df}
Let $A_1, A_2, \ldots, A_n$, be sets. The {\em Cartesian Product}
of these $n$ sets is defined and denoted by \index{sets!Cartesian
Product}
$$A_1\times A_2\times \cdots \times A_n =
\{(a_1, a_2, \ldots , a_n): a_k \in A_k\},$$ that is, the set of
all ordered $n$-tuples whose elements belong to the given sets.
\end{df}
\begin{rem}
In the particular case when all the $A_k$ are equal to a set $A$,
we write
$$A_1\times A_2\times \cdots \times A_n = A^n.$$If $a\in A$ and $b\in A$
we write $(a,b)\in A^2.$\end{rem}








\begin{df}\index{absolute value}
Let $x\in \BBR$. The {\em absolute value of $x$}---denoted by
$|x|$---is defined by
\begin{center}
 \fcolorbox{blue}{yellow}{ $ |x| =
\left\{
\begin{tabular}{ll}
$-x$       & \rm{if} \ $x < 0$, \\
$x$ & \rm{if} \ $x \geq 0$.
\end{tabular}
\right. $}
\end{center}
\end{df}
It follows from the definition that  for $x\in\BBR$,
\begin{equation}\label{eq:abs_val_interval}
-|x| \leq x \leq |x|.
\end{equation}
\begin{equation}\label{eq:sandwiched_abs_val}
t \geq 0 \implies |x| \leq t \iff -t \leq x \leq t.
\end{equation}
\begin{equation}\label{eq:abs_val_to_sq_rt}
\forall a\in \BBR \implies \sqrt{a^2} = |a|.
\end{equation}
\begin{thm}[Triangle Inequality] \index{inequality!triangle!for real numbers} Let $(a, b)\in \BBR^2$. Then
\begin{equation}\fcolorbox{blue}{yellow}{ $|a + b| \leq |a| + |b|$.} \label{ineq:triangle_real_numbers}\end{equation}

\end{thm}
\begin{pf}
From \ref{eq:abs_val_interval}, by addition,
$$-|a| \leq a \leq |a| $$to$$-|b| \leq b \leq |b| $$we obtain
$$-(|a| + |b| ) \leq a + b \leq (|a| + |b|),$$whence the theorem follows by \ref{eq:sandwiched_abs_val}.
\end{pf}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\section*{\psframebox{Homework}}

\begin{pro}
Prove that between any two rational numbers there is an irrational
number.
\end{pro}

\begin{pro}
Prove that $X \setminus (X\setminus A) = X\cap A$.
\begin{answer}
$$\begin{array}{lll}x\in X \setminus (X\setminus A) & \iff & x\in X \wedge x\not\in (X\setminus A) \\
& \iff & x\in X \wedge x\in A \\
& \iff & x\in X\cap A.   \end{array}$$
\end{answer}
\end{pro}
\begin{pro}
Prove that $$ X\setminus (A \cup B) = (X\setminus A) \cap
(X\setminus B). $$
\begin{answer}
$$\begin{array}{lll} X\setminus (A \cup B) & \iff & x\in X\wedge (x\not\in (A \cup
B))\\
& \iff & x\in X \wedge (x\not\in A \wedge x\not\in B) \\ & \iff &
(x\in X \wedge x\not\in A) \wedge (x\in X\wedge x\not\in B) \\
& \iff & x\in (X\setminus A) \wedge x\in (X\setminus B) \\
&\iff & x\in (X\setminus A) \cap (X\setminus B).
\end{array}
$$
\end{answer}
\end{pro}
\begin{pro}
Prove that $$X\setminus (A \cap B) = (X\setminus A) \cup (X\setminus
B) .$$
\end{pro}
\begin{pro}
Prove that $$(A\cup B)\setminus (A \cap B) = (A\setminus B) \cup (B
\setminus A).$$
\end{pro}
\begin{pro} Write the union $A \cup B \cup C$ as a {\em
disjoint} union of sets.\begin{answer} One possible solution is
$$ A \cup B \cup C = A \cup (B \setminus A) \cup (C \setminus (A \cup
B)).$$
\end{answer}
\end{pro}
\begin{pro}
Prove that a set with $n\geq 0$ elements has $2^n$
subsets.
\end{pro}
\begin{pro}
 Let $(a, b)\in \BBR^2$. Prove that $$ ||a| - |b||  \leq |a - b|.$$
\begin{answer}
We have $$|a| = |a - b + b| \leq |a - b| + |b|,$$giving
$$|a| - |b| \leq |a - b|.$$Similarly,
$$|b| = |b - a + a| \leq |b - a| + |a| = |a - b| + |a|,$$gives
$$|b| - |a| \leq |a - b|.$$The stated inequality follows from this.
\end{answer}
\end{pro}
\end{multicols}




\section{Partitions and Equivalence Relations}
\begin{df}
Let $S \neq \varnothing$ be a set. A {\em partition} of $S$ is a
collection of non-empty, pairwise  disjoint subsets  of $S$ whose
union is $S$. \index{partition}
\end{df}
\begin{exa}
Let $$2\BBZ = \{ \ldots , -6, -4, -2, 0, 2, 4, 6, \ldots\}=
\overline{0}$$ be the set of even integers and let
$$2\BBZ + 1 = \{ \ldots , -5, -3, -1, 1, 3, 5, \ldots\}=
\overline{1}$$ be the set of odd integers. Then
$$(2\BBZ)\cup (2\BBZ + 1) = \BBZ, \ \ (2\BBZ)\cap (2\BBZ + 1) = \varnothing,$$
and so $\{2\BBZ, 2\BBZ + 1\}$ is a partition of $\BBZ$.
\label{ex:mod2}\end{exa}
\begin{exa}
Let $$3\BBZ = \{ \ldots -9, , -6, -3, 0, 3, 6, 9, \ldots\}  =
\overline{0}$$ be the integral multiples of $3$, let
$$3\BBZ + 1 = \{ \ldots , -8, -5, -2, 1, 4, 7, \ldots\}=
\overline{1}$$ be the integers leaving remainder $1$ upon division
by $3$, and let
$$3\BBZ + 2 = \{ \ldots , -7, -4, -1, 2, 5, 8, \ldots\} =
\overline{2}$$ be integers leaving remainder $2$ upon division by
$3$. Then

$$(3\BBZ)\cup (3\BBZ + 1) \cup (3\BBZ + 2) = \BBZ, $$ $$ (3\BBZ)\cap (3\BBZ + 1) = \varnothing, \ (3\BBZ)\cap (3\BBZ + 2) = \varnothing,
(3\BBZ + 1)\cap (3\BBZ + 2) = \varnothing,$$ and so $\{3\BBZ, 3\BBZ
+ 1, 3\BBZ + 2\}$ is a partition of $\BBZ$. \label{ex:mod3}\end{exa}
\begin{rem}
Notice that $\overline{0}$ and $\overline{1}$ do not mean the same
in examples \ref{ex:mod2} and  \ref{ex:mod3}. Whenever we make use
of this notation, the integral divisor must be made explicit.
\end{rem}
\begin{exa}
Observe $$\BBR = (\BBQ) \cup (\BBR \setminus \BBQ), \ \ \varnothing
= (\BBQ) \cap (\BBR \setminus \BBQ),$$which means that the real
numbers can be partitioned into the rational and irrational numbers.
\end{exa}
\begin{df}
Let $A, B$ be sets. A {\em relation} $R$ is a subset of the
Cartesian product $A\times B$. We write the fact that $(x, y)\in
R$ as $x \sim y.$ \index{relation}
\end{df}
\begin{df}
Let $A$ be a set  and $R$ be a relation on $A\times A$. Then $R$
is said to be
\begin{itemize}
\item {\bf reflexive} if $(\forall x\in A), x\sim
x$,\index{relation!reflexive} \item {\bf symmetric} if $(\forall
(x, y)\in A^2), x\sim y \implies   y \sim x$,
\index{relation!symmetric}\item {\bf anti-symmetric} if $(\forall
(x, y)\in A^2), (x\sim y) \wedge (y \sim x) \implies   x = y$,
\item {\bf transitive} if $(\forall (x, y, z)\in A^3), (x\sim
y)\wedge (y\sim z) \implies (x\sim z)$.
\index{relation!transitive}
\end{itemize}
A relation $R$ which is reflexive, symmetric and transitive is
called an {\em equivalence relation} on $A$. A relation $R$ which
is reflexive, anti-symmetric and transitive is called a {\em
partial order} on $A$. \index{relation!equivalence} \index{partial
order}
\end{df}
\begin{exa}
Let $S = $\{All Human Beings\}, and define $\sim$ on $S$ as $a\sim
b$ if and only if $a$ and $b$ have the same mother. Then $a\sim a$
since any human $a$ has the same mother as himself. Similarly,
$a\sim b \implies b\sim a$ and $(a\sim b)\wedge (b\sim c) \implies
(a\sim c)$. Therefore $\sim$ is an equivalence relation.
\end{exa}
\begin{exa}
Let $L$  be the set of all lines on the plane and write $l_1\sim
l_2$ if  $l_1 || l_2$ (the line $l_1$ is parallel to the line
$l_2$). Then $\sim$ is an equivalence relation on $L$.
\end{exa}

\begin{exa}
In $\BBQ$ define the relation $\frac{a}{b} \sim \frac{x}{y} \iff ay
= bx$, where we will always assume that the denominators are
non-zero. Then $\sim$ is an equivalence relation. For $\frac{a}{b}
\sim \frac{a}{b}$ since $ab = ab$. Clearly
$$\frac{a}{b} \sim \frac{x}{y} \implies
ay = bx \implies xb = ya \implies \frac{x}{y} \sim \frac{a}{b}.$$
Finally, if $\frac{a}{b} \sim \frac{x}{y}$ and $\frac{x}{y} \sim
\frac{s}{t}$ then we have $ay = bx$ and $xt = sy$. Multiplying
these two equalities $ayxt = bxsy$. This gives $$ayxt - bxsy = 0
\implies xy(at - bs) = 0.
$$Now if $x = 0$, we will have $a=s = 0$, in which case trivially
$at = bs$. Otherwise we must have $at - bs = 0$ and so
$\frac{a}{b} \sim \frac{s}{t}$.
\end{exa}
\begin{exa}
Let $X$ be a collection of sets. Write $A\sim B$ if $A \subseteq
B$. Then $\sim$ is a partial order on $X$.
\end{exa}
\begin{exa}
For $(a, b)\in\BBR^2$ define $$a\sim b \Leftrightarrow a^2 + b^2 >
2.$$ Determine, with proof, whether $\sim$ is reflexive, symmetric,
and/or transitive. Is $\sim$ an equivalence relation?

\end{exa}
\begin{solu} Since $0^2 + 0^2 \ngtr 2$, we have $0\nsim 0$ and so $\sim$ is
not reflexive. Now,
$$\begin{array}{lll} a \sim b & \Leftrightarrow & a^2 + b^2 \\ & \Leftrightarrow & b^2 + a^2 \\ & \Leftrightarrow & b \sim a,\end{array}$$
so $\sim$ is symmetric. Also $0 \sim 3$ since $0^2 + 3^2 > 2$ and
$3 \sim 1$ since $3^2 + 1^2 > 2$. But $0 \nsim 1$ since $0^2 + 1^2
\ngtr 2$. Thus the relation is not transitive. The relation,
therefore, is not an equivalence relation.

\end{solu}
\begin{df}
Let $\sim$ be an equivalence relation on a set $S$. Then the {\em
equivalence class of $a$} is defined and denoted by
$$[a] = \{x\in S: x\sim a\}   .$$
\end{df}



\begin{lem}
Let $\sim$ be an equivalence relation on a set $S$. Then two
equivalence classes are either identical or disjoint.
\label{lem:equiv_classes}\end{lem}
\begin{pf}
We prove that if $(a, b)\in S^2$, and $[a]\cap [b] \neq
\varnothing$ then $[a] = [b]$. Suppose that $x\in [a]\cap [b]$.
Now $x\in [a]\implies x\sim a \implies a\sim x$, by symmetry.
Similarly, $x\in [b]\implies x\sim b.$ By transitivity $$(a\sim
x)\wedge (x\sim b) \implies a\sim b.$$Now, if $y\in [b]$ then
$b\sim y$. Again by transitivity, $a\sim y$. This means that $y\in
[a]$. We have shewn that $y\in [b]\implies y\in [a]$ and so
$[b]\subseteq [a]$. In a similar fashion, we may prove that
$[a]\subseteq [b]$. This establishes the result.
\end{pf}

\begin{thm}
Let $S \neq \varnothing$ be a set. Any equivalence relation on $S$
induces a partition of $S$. Conversely, given a partition of $S$
into disjoint, non-empty subsets,  we can define an equivalence
relation on $S$ whose equivalence classes are precisely these
subsets. \label{thm:equiv_relation_yields_partition}\end{thm}
\begin{pf}
By Lemma \ref{lem:equiv_classes}, if $\sim$ is an equivalence
relation on $S$ then
$$ S = \bigcup _{a\in S} [a],$$ and $[a]\cap [b] = \varnothing$ if $a\nsim b$.
This proves the first half of the theorem.

\bigskip

Conversely, let $$ S = \bigcup _{\alpha} S_\alpha,  \ \ S_\alpha
\cap S_\beta = \varnothing\ \ {\rm if}\ \alpha \neq \beta,$$ be a
partition of $S$. We define the relation $\approx$ on $S$ by
letting $a\approx b$ if and only if they belong to the same
$S_\alpha$. Since the $S_\alpha$ are mutually disjoint, it is
clear that $\approx$ is an equivalence relation on $S$ and that
for $a\in S_\alpha,$ we have $[a] = S_\alpha$.
\end{pf}
\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
For $(a, b) \in(\BBQ\setminus \{0\} )^2$ define the relation $\sim$
as follows: $a\sim b \Leftrightarrow \frac{a}{b} \in \BBZ$.
Determine whether this relation is reflexive, symmetric, and/or
transitive.
\begin{answer} $a \sim a$ since $\frac{a}{a} = 1\in \BBZ$, and so
the relation is reflexive. The relation is not symmetric. For $2
\sim 1$ since $\frac{2}{1}\in \BBZ$ but $1 \nsim 2$ since
$\frac{1}{2} \not\in \BBZ$. The relation is transitive. For assume
$a\sim b$ and $b\sim c$. Then there exist $(m, n)\in \BBZ^2$ such
that $\frac{a}{b} = m, \frac{b}{c} = n$. This gives
$$\frac{a}{c} = \frac{a}{b}\cdot\frac{b}{c} = mn\in\BBZ,$$and so
$a\sim c.$
\end{answer}
\end{pro}
\begin{pro}
Give an example of a relation on $\BBZ\setminus \{0\} $ which is
reflexive, but is neither symmetric nor transitive.
\begin{answer} Here is one possible example: put $a\sim b
\Leftrightarrow \frac{a^2 + a}{b}\in \BBZ$. Then clearly if
$a\in\BBZ\setminus \{0\} $ we have $a\sim a$ since $\frac{a^2 +
a}{a} = a + 1 \in \BBZ$. On the other hand, the relation is not
symmetric, since $5\sim 2$ as $\frac{5^2 + 5}{2} = 15\in \BBZ$ but
$2\not\sim 5$, as $\frac{2^2 + 2}{5} = \frac{6}{5}\not\in \BBZ$. It
is not transitive either, since $\frac{5^2 + 5}{3}\in\BBZ\implies 5
\sim 3$ and $\frac{3^2 + 3}{12}\in\BBZ\implies    3 \sim 12$ but
$\frac{5^2 + 5}{12}\not\in\BBZ$ and so $5\nsim 12$.
\end{answer}
\end{pro}
\begin{pro}
Define the relation $\sim$ in $\BBR$ by $x\sim y \iff xe^y = ye^x$.
Prove that $\sim$ is an equivalence relation.
\end{pro}
\begin{pro}
Define the relation $\sim$ in $\BBQ$ by $x\sim y \iff \exists
h\in\BBZ$ such that $x = \dfrac{3y + h}{3}$. [A] Prove that $\sim$
is an equivalence relation. [B] Determine $[x]$, the equivalence of
$x\in\BBQ$. [C] Is $\frac{2}{3}\sim\frac{4}{5}$?
\begin{answer} [B] $[x] = x + \dfrac{1}{3}\BBZ$. [C] No.  \end{answer}
\end{pro}
\end{multicols}
\section{Binary Operations}
\index{binary operation}
\begin{df}
Let $S, T$ be sets. A {\em binary operation} is a function
$$\fun{\otimes}{(a, b)}{(a, b)}{S\times S}{T}.$$We usually
use the ``infix'' notation $a \otimes   b$ rather than the
``prefix'' notation $  \otimes (a, b)$. If $S = T$ then we say
that the binary operation is {\em internal} or {\em closed} and if
$S \neq T$ then we say that it is {\em external}.\index{binary
operation!internal} If
$$a\otimes b = b\otimes  a$$ then we say that the operation $\otimes$ is {\em
commutative}\index{binary operation!commutative} and if
$$a\otimes (b \otimes c) = (a\otimes b)\otimes c,$$we say that it
is {\em associative.} \index{binary operation!associative} If
$\otimes$ is associative, then we can write $$a\otimes (b \otimes
c) = (a \otimes b) \otimes c = a \otimes b \otimes c,$$without
ambiguity.
\end{df}\begin{rem}We usually omit the sign $\otimes$
and use juxtaposition to indicate the operation $\otimes$. Thus we
write $ab$ instead of $a\otimes b$.
\end{rem}
\begin{exa}
The operation $+$ (ordinary addition) on the set $\BBZ \times \BBZ$
is a commutative and associative closed binary operation.
\end{exa}
\begin{exa}
The operation $-$ (ordinary subtraction) on the set $\BBN \times
\BBN$ is a non-commutative, non-associative non-closed binary
operation.
\end{exa}


\begin{exa}
The operation $\otimes$ defined by $a \otimes b = 1 + ab$ on the set
$\BBZ\times \BBZ$ is a commutative but non-associative internal
binary operation. For
$$a \otimes b = 1 + ab = 1 + ba = b a,$$proving
commutativity. Also, $1 \otimes  (2 \otimes   3)  = 1  \otimes (7)
= 8$ and $(1  \otimes 2) \otimes  3 = (3)
 \otimes  3 = 10$, evincing non-associativity.
\end{exa}
\begin{df}
Let $S$ be a set and $ \otimes : S\times S \rightarrow S$ be a
closed binary operation. The couple $\algebra{S}{ \otimes }$ is
called an {\em algebra}.\index{algebra}
\end{df}
\begin{rem}
When we desire to drop the sign $\otimes$ and indicate the binary
operation by juxtaposition, we simply speak of the ``algebra
$S$.''
\end{rem}
\begin{exa}
Both $\algebra{\BBZ}{+}$ and $\algebra{\BBQ}{\cdot}$ are algebras.
Here $+$ is the standard addition of real numbers and $\cdot$ is the
standard multiplication.
\end{exa}
\begin{exa}
$\algebra{\BBZ}{-}$ is a non-commutative, non-associative algebra.
Here $-$ is the standard subtraction operation on the real numbers
\end{exa}

\begin{exa}[Putnam Exam, 1972]
Let $S$ be a set and let $*$ be a binary operation of $S$
satisfying the laws $\forall (x, y)\in S^2$
\begin{equation}x*(x*y) = y,\label{eq:law_1}\end{equation}
\begin{equation}(y*x)*x = y.\label{eq:law_2}\end{equation} Shew that $*$ is commutative, but
not necessarily associative.

\end{exa}
\begin{solu} By (\ref{eq:law_2})
$$x*y = ((x*y)*x)*x.$$
By (\ref{eq:law_2}) again
$$((x*y)*x)*x = ((x*y)*((x*y)*y))*x.$$
By (\ref{eq:law_1})
$$((x*y)*((x*y)*y))*x = (y)*x = y*x,$$
which is what we wanted to prove.
\bigskip

  To shew that the operation is not necessarily associative,
  specialise
  $S = \BBZ$ and $x*y = -x-y$ (the opposite of $x$ minus $y$).
  Then clearly in this case $*$ is commutative, and satisfies (\ref{eq:law_1}) and (\ref{eq:law_2}) but
  $$0*(0*1) = 0*(-0-1) = 0*(-1) = -0-(-1) = 1,$$ and
  $$(0*0)*1 = (-0-0)*1 = (0)*1 = -0 - 1 = -1,$$
evincing that the operation is not associative.
\end{solu}
\begin{df}
Let $S$ be an algebra. Then $l\in S$ is called a {\em left
identity} if $\forall  s\in S$ we have $l s = s$. Similarly $r\in
S$ is called a {\em right identity} if $\forall s\in S$ we have $s
r = s$.
\end{df}
\begin{thm} If  an algebra $S$ possesses a left identity $l$ and a right
identity $r$ then $l = r.$
\end{thm}
\begin{pf} Since $l$ is a left identity
$$r = l r.$$Since $r$ is a right identity
$$l = l r.$$Combining these two, we gather
$$r = l r = l,$$whence the theorem follows.
\end{pf}
\begin{exa}
In $\algebra{\BBZ}{+}$ the element $0\in \BBZ$ acts as an identity,
and in $\algebra{\BBQ}{\cdot}$ the element $1\in \BBQ$ acts as an
identity.
\end{exa}
\begin{df}
Let $S$ be an algebra. An element $a\in S$ is said to be {\em
left-cancellable} or {\em left-regular} if $\forall (x, y) \in
S^2$
$$a x = a y \implies   x = y.$$Similarly,
element $b\in S$ is said to be {\em right-cancellable}  or {\em
right-regular} if $\forall (x, y) \in S^2$
$$x b = y b\implies   x = y.$$ Finally, we say an
element $c\in S$ is {\em cancellable} or {\em regular} if it is
both left and right cancellable.
\end{df}
\begin{df}
Let $\algebra{S}{ \otimes }$ and $\algebra{S}{\top}$ be algebras.
We say that $\top$ is {\em left-distributive} with respect to $
\otimes $ if
$$\forall (x, y, z)\in S^3, \ x\top (y  \otimes z) = (x\top y)\otimes(x\top
z).$$Similarly, we say that $\top$ is {\em right-distributive}
with respect to $ \otimes $ if
$$\forall (x, y, z)\in S^3, \ (y  \otimes z)\top x = (y\top x)\otimes (z\top
x).$$We say that  $\top$ is {\em distributive} with respect to $
\otimes $ if it is both left and right distributive with respect
to $ \otimes $.

\end{df}
\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Let $$S = \{x\in\BBZ : \exists (a, b)\in \BBZ^2, x= a^3 + b^3 + c^3
-3abc \}.$$ Prove that $S$ is closed under multiplication, that is,
if $x\in S$ and $y\in S$ then $xy \in S$.
\begin{answer}
Let $\omega = -\frac{1}{2} + i\frac{\sqrt{3}}{2}$. Then $\omega^2
+ \omega + 1 = 0$ and $\omega^3=1$. Then $$x= a^3 + b^3 + c^3
-3abc = (a +b+c)(a + \omega b + \omega^2 c)(a + \omega^2b +
c\omega), $$ $$y = u^3 + v^3 + w^3 - 3uvw = (u + v + w)(u + \omega
v + \omega^2 w)(u + \omega^2v+ \omega w).$$ Then
$$(a+b+c)(u+v+w) = au + av + aw + bu + bv + bw + cu + cv + cw,
  $$
  $$\begin{array}{lll}(a + \omega b + \omega^2 c)(u + \omega v + \omega^2 w) & = &
   au + bw + cv \\
 & & \qquad +\omega (av + bu + cw) \\
  & & \qquad +\omega^2 (aw+bv+cu), \\
  \end{array}$$
and
$$\begin{array}{lll}(a + \omega^2 b + \omega c)(u + \omega^2 v + \omega w) &  = &
au + bw + cv \\
& & \qquad +\omega (aw + bv + cu) \\
& & \qquad +\omega^2 (av + bu + cw). \\
\end{array}$$
This proves that $$\begin{array}{lll}xy & = &(au + bw + cv)^3 +
(aw + bv + cu)^3 + (av + bu + cw)^3\\ & &  -3(au + bw + cv)(aw +
bv + cu)(av + bu + cw), \\
\end{array}
$$which proves that $S$ is closed under multiplication.
\end{answer}

\end{pro}

\begin{pro}
Let $\algebra{S}{ \otimes }$ be an associative algebra, let $a\in
S$ be a fixed element and define the closed binary operation
$\top$ by
$$x\top y = x \otimes  a  \otimes  y.$$Prove that $\top$ is also
associative over $S\times S$. \begin{answer} We have
$$x \top (y \top z) = x\top (y  \otimes  a  \otimes  z) = (x)  \otimes  (a) \otimes   (y  \otimes  a  \otimes  z ) =
x  \otimes  a  \otimes  y  \otimes  a  \otimes  z,$$where we may
drop the parentheses since $ \otimes $ is associative. Similarly
$$(x \top y) \top z = (x  \otimes a  \otimes  y) \top z = (x   \otimes a  \otimes  y)  \otimes  (a)   \otimes  (z) =
x  \otimes  a  \otimes  y  \otimes  a  \otimes  z .$$ By virtue of
having proved
$$ x \top (y \top z) = (x \top y) \top z,$$associativity is
established.
\end{answer}
\end{pro}


\begin{pro}
On $\BBQ \cap ]-1;1[$ define the a binary operation $ \otimes $
$$a \otimes  b = \frac{a + b}{1 + ab},$$where juxtaposition means ordinary
multiplication and $+$ is the ordinary addition of real numbers.
Prove that
\begin{dingautolist}{202}
\item Prove that $ \otimes $ is a closed binary operation on $\BBQ
\cap
]-1;1[$. \\

 \item Prove that $ \otimes $ is both commutative and
associative. \\


\item Find an element $e\in\BBR$ such that $(\forall a \in \BBQ \cap
]-1;1[)\ (e \otimes  a =
a)$. \\


\item Given $e$ as above and an arbitrary element $a\in \BBQ \cap
]-1;1[$, solve the equation $a \otimes  b = e$ for
$b$. \\

\end{dingautolist}
\begin{answer} We proceed in order.\begin{dingautolist}{202}  \item Clearly, if $a, b$ are
rational numbers, $$|a| < 1, |b|<1 \implies |ab| < 1 \implies -1 <
ab < 1 \implies 1 + ab > 0,$$whence the denominator never vanishes
and since sums, multiplications and divisions of rational numbers
are rational, $\dfrac{a + b}{1 + ab}$ is also rational. We must
prove now that $-1 < \dfrac{a + b}{1 + ab} < 1$ for $(a, b)\in
]-1; 1[^2$. We have
$$\begin{array}{lll} -1 < \dfrac{a + b}{1 + ab} <  1 & \Leftrightarrow &
-1 - ab < a + b < 1 + ab \\
 & \Leftrightarrow & -1 -ab - a - b < 0 < 1 + ab - a - b \\
& \Leftrightarrow & -(a + 1)(b + 1) < 0 < (a - 1)(b - 1).
\end{array}$$
Since $(a, b)\in ]-1; 1[^2$, $(a + 1)(b + 1) > 0$ and so $-(a +
1)(b + 1) < 0$ giving the sinistral inequality. Similarly $a - 1 <
0$ and $b - 1 < 0$ give $(a - 1)(b - 1) > 0$, the dextral
inequality. Since the steps are reversible, we have established
that indeed $-1 < \dfrac{a + b}{1 + ab} <  1$. \item Since $a
\otimes b = \dfrac{a + b}{1 + ab} = \dfrac{b + a}{1 + ba} = b
\otimes  a$, commutativity follows trivially. Now
$$\begin{array}{lll}a  \otimes (b \otimes  c) &  = &
 a \otimes \left(\dfrac{b + c}{1 + bc}\right) \vspace{2mm} \\ & = &
 \dfrac{a + \left(\dfrac{b + c}{1 + bc}\right)}{1 + a\left(\dfrac{b + c}{1 +
 bc}\right)} \vspace{2mm} \\
& = & \dfrac{a(1 + bc) + b + c}{1 + bc + a(b + c)} = \dfrac{a + b
+ c + abc}{1 + ab + bc + ca}. \end{array}$$One the other hand,
$$\begin{array}{lll} (a \otimes  b)  \otimes c &  = &  \left(\dfrac{a + b}{1 + ab}\right) \otimes c  \vspace{2mm} \\
& = & \dfrac{\left(\dfrac{a + b}{1 + ab}\right) + c}{1 +
\left(\dfrac{a + b}{1 + ab}\right)c} \vspace{2mm} \\ & = &
\dfrac{(a + b)+ c(1 + ab)}{1 + ab + (a + b)c} \vspace{2mm} \\ &  =
&  \dfrac{a + b + c + abc}{1 + ab + bc + ca},\end{array}$$ whence
$ \otimes $ is associative. \item If $a \otimes e = a$ then
$\dfrac{a + e}{1 + ae} = a$, which gives $a + e = a + ea^2$ or
$e(a^2 - 1) = 0$. Since $a \neq \pm 1$, we must have $e = 0$.
\item If $a  \otimes b = 0$, then $\dfrac{a + b}{1 + ab} = 0$,
which means that $b = -a$.
\end{dingautolist}
\end{answer}
\end{pro}

\begin{pro}
On $\BBR \setminus \{1\}$ define the a binary operation $ \otimes $
$$a \otimes  b = a+b-ab,$$where juxtaposition means ordinary
multiplication and $+$ is the ordinary addition of real numbers.
Clearly $\otimes$ is a closed binary operation. Prove that
\begin{dingautolist}{202}
 \item Prove that $ \otimes $ is both commutative and
associative. \\


\item Find an element $e\in\BBR \setminus \{1\}$ such that $(\forall
a \in \BBR \setminus \{1\})\ (e \otimes  a =
a)$. \\


\item Given $e$ as above and an arbitrary element $a\in \BBR
\setminus \{1\}$, solve the equation $a \otimes  b = e$ for
$b$. \\

\end{dingautolist}
\begin{answer} We proceed in order.\begin{dingautolist}{202} \item Since $a
\otimes b = a+b-ab = b+a-ba = b \otimes a$, commutativity follows
trivially. Now
$$\begin{array}{lll}a  \otimes (b \otimes  c) &  = &
 a\otimes (b + c -bc) \\ & = &
 a + b+c -bc -a(b+c-bc)\\
& = & a+b+c-ab-bc-ca + abc.
\end{array}$$One the other hand,
$$\begin{array}{lll} (a \otimes  b)  \otimes c &  = &  \left(a+b-ab\right) \otimes c  \\
& = & a+b-ab + c - (a+b-ab)c
\\ & = & a+b + c -ab-bc -ca + abc, \\ \end{array}$$ whence $ \otimes $ is associative. \item If $a
\otimes e = a$ then $a+e-ae = a$, which gives $e(1-a) = 0$. Since
$a \neq 1$, we must have $e = 0$. \item If $a \otimes b = 0$, then
$a+b-ab = 0$, which means that $b(1-a) = -a$. Since $a\neq 1$ we
find $b = -\dfrac{a}{1-a}$.
\end{dingautolist}
\end{answer}
\end{pro}

\begin{pro}[{\red\bf Putnam Exam, 1971}] Let $S$ be a set and let $\circ$ be a
binary operation on $S$ satisfying the two laws
$$(\forall x\in S) (x \circ x = x),$$and
$$(\forall (x, y, z) \in S^3) ((x\circ y)\circ z = (y \circ z)\circ x). $$
Shew that $\circ$ is commutative. \begin{answer} We have
$$\begin{array}{lll}
x\circ y & = & (x\circ y)\circ (x\circ y) \\
& = & [y\circ (x \circ y)]\circ x \\
& = & [(x \circ y)\circ x] \circ y \\
& = & [(y \circ x)\circ x] \circ y \\
& = & [(x \circ x)\circ y] \circ y \\
& = & (y \circ y) \circ (x\circ x) \\
& = & y\circ x, \\
\end{array}$$proving commutativity.
\end{answer}
\end{pro}
\begin{pro}
Define the {\em symmetric difference} of the sets $A, B$ as $A
\triangle B = (A\setminus B) \cup (B\setminus A)$. Prove that
$\triangle$ is commutative and associative.
\end{pro}
\end{multicols}
\section{$\BBZ_n$}
\index{division algorithm}
\begin{thm}[Division Algorithm] Let $n > 0$ be an integer. Then
for any integer $a$ there exist unique integers $q$ (called the
{\em quotient}) and $r$ (called the {\em remainder}) such that $a
= qn + r$ and $0 \leq r < q$. \label{thm:division_algorithm}
\end{thm}
\begin{pf}
In the proof of this theorem, we use the following property of the
integers, called the {\em well-ordering principle}: any non-empty
set of non-negative integers has a smallest element.

\bigskip

Consider the set $$S = \{ a -bn: b\in \BBZ \wedge a \geq bn\}.$$
Then $S$ is a collection of nonnegative integers and $S \neq
\varnothing$ as $\pm a - 0\cdot n \in S$ and this is non-negative
for one choice of sign. By the Well-Ordering Principle, $S $ has a
least element, say $r$. Now, there must be some $q \in \BBZ$ such
that $r = a - qn$ since $r \in S $. By construction, $r \geq 0.$ Let
us prove that $r < n$. For assume that $r \geq n.$ Then $r
> r - n = a - qn - n = a - (q + 1)n \geq 0$, since $r - n \geq 0.$
But then $a - (q + 1)n \in S $ and $a - (q + 1)n < r$ which
contradicts the fact that $r$ is the smallest member of $S $. Thus
we must have $0 \leq r < n.$ To prove that $r$ and $q$ are unique,
assume that $q_1n + r_1 = a = q_2n + r_2$, $0 \leq r_1 < n$, $0
\leq r_2 < n. $ Then $r_2 - r_1 = n(q_1 - q_2)$, that is, $n$
divides $(r_2 - r_1)$. But $|r_2 - r_1| < n,$ whence $r_2 = r_1$.
From this it also follows that $q_1 = q_2.$ This completes the
proof.   \end{pf}

\begin{exa} If $n = 5$ the Division Algorithm says that we
 can arrange all the integers in five columns as follows:
$$\begin{array}{rrrrr}
\vdots & \vdots & \vdots & \vdots & \vdots \\
-10 & -9 & -8 & -7 & -6 \\
-5 & -4 & -3 & -2 & -1 \\
0 & 1 & 2 & 3 & 4 \\
5 & 6 & 7 & 8 & 9 \\
\vdots & \vdots & \vdots & \vdots & \vdots \\
\end{array}$$
The arrangement above shews that any integer comes in one of $5$
flavours: those leaving remainder $0$ upon division by $5$, those
leaving remainder $1$ upon division by $5$, etc. We let
$$5\BBZ = \{\ldots, -15, -10, -5, 0 , 5, 10, 15, \ldots\} = \overline{0},$$
$$5\BBZ + 1 = \{\ldots, -14, -9, -4, 1 , 6, 11, 16, \ldots\} = \overline{1},$$
$$5\BBZ  + 2= \{\ldots, -13, -8, -3, 2 , 7, 12, 17, \ldots\} = \overline{2},$$
$$5\BBZ  + 3= \{\ldots, -12, -7, -2, 3 , 8, 13, 18, \ldots\}=\overline{3},$$
$$5\BBZ + 4 = \{\ldots, -11, -6, -1, 4 , 9, 14, 19, \ldots\}=\overline{4},$$

and
$$\BBZ_5 = \{\overline{0}, \overline{1}, \overline{2}, \overline{3}, \overline{4} \}.$$
\end{exa}
Let $n $ be a fixed positive integer. Define the relation $\equiv$
by $x\equiv y$ if and only if they leave the same remainder upon
division by $n$. Then clearly $\equiv$ is an equivalence relation.
As such it partitions the set of integers $\BBZ$ into disjoint
equivalence classes by Theorem
\ref{thm:equiv_relation_yields_partition}. This motivates the
following definition.

\begin{df} Let $n $ be a positive integer. The $n$ {\em residue classes} upon
division by $n$ are
$$\overline{0} = n\BBZ, \ \ \overline{1} = n\BBZ + 1, \ \ \overline{2} = n\BBZ + 2, \ \ \ldots, \ \ \overline{n - 1} = n\BBZ + n - 1.$$
The {\em set of residue classes modulo} $n$ is
$$\BBZ_n = \{\overline{0}, \overline{1}, \ldots , \overline{n -
1}\}.$$\index{residue classes}
\end{df}
Our interest is now to define some sort of ``addition'' and some
sort of ``multiplication'' in $\BBZ_n$.
\begin{thm}[Addition and Multiplication Modulo $n$] Let $n$ be a
positive integer. For $(\overline{a\vphantom{b}}, \overline{b})\in
(\BBZ_n)^2$ define $\overline{a\vphantom{b}} + \overline{b} =
\overline{r}$, where $r$ is the remainder of $a + b$ upon division
by $n$.  and $\overline{a\vphantom{b}}\cdot \overline{b} =
\overline{t}$, where $t$ is the remainder of $ab$ upon division by
$n$. Then these operations are well defined. \index{addition modulo
n}\index{multiplication modulo n}
\end{thm}
\begin{pf}
We need to prove that given  arbitrary representatives of the
residue classes, we always obtain the same result from our
operations. That is, if $\overline{a\vphantom{'}} = \overline{a'}$
and $\overline{b\vphantom{'}} = \overline{b'}$ then we have
$\overline{a\vphantom{b}} + \overline{b} = \overline{a'} +
\overline{b'}$ and $\overline{a\vphantom{b}}\cdot\overline{b} =
\overline{a'\vphantom{b'}}\cdot\overline{b'}$.

\bigskip
Now $$\begin{array}{l}\overline{a} = \overline{a}'\implies \exists
(q,q')\in\BBZ^2, r \in \BBN, a = qn + r, \ a' = q'n + r, \ 0 \leq r
< n, \\ \overline{b} = \overline{b}'\implies \exists
(q_1,q_1')\in\BBZ^2, r_1 \in \BBN, b = q_1n + r_1, \ b' = q_1'n +
r_1, \ 0 \leq r_1 < n. \end{array}$$ Hence $$a+b = (q + q_1)n + r +
r_1, \ \ \ a'+b' = (q' + q_1')n + r + r_1,
$$meaning that both $a+b$ and $a'+b'$ leave the same remainder
upon division by $n$, and therefore $$\overline{a\vphantom{b}} +
\overline{b} = \overline{a+b} = \overline{a'+b'} = \overline{a'} +
\overline{b'}.$$

\bigskip
Similarly $$ab = (qq_1n + qr_1 + rq_1)n + rr_1, \ \ \ a'b' =
(q'q_1'n + q'r_1 + rq_1')n + rr_1,
$$and so both $ab$ and $a'b'$ leave the same remainder
upon division by $n$, and therefore
$$\overline{a\vphantom{b}}\cdot\overline{b} = \overline{ab} = \overline{a'b'}
= \overline{a'\vphantom{b'}}\cdot\overline{b'}.$$ This proves the
theorem.
\end{pf}



\begin{exa}
 Let $$\BBZ_6 = \{\overline{0}, \overline{1}, \overline{2}, \overline{3},
\overline{4}, \overline{5}\}$$be the residue classes modulo $6$.
Construct the natural addition $+$ table for $\BBZ_6$. Also,
construct the natural multiplication $\cdot$ table for $\BBZ_6$.

\end{exa}
\begin{solu}The required tables are given in tables \ref{tab:add_z6}
and \ref{tab:mult_z6}.
\end{solu}
\begin{table}[h]\begin{minipage}{6cm}$$
\begin{array}{|c||c|c|c|c|c|c|}
\hline + & \overline{0}  & \overline{1}  & \overline{2}  &
\overline{3}  & \overline{4}  & \overline{5}    \\
\hline  \overline{0} & \overline{0}  & \overline{1}  &
\overline{2}
& \overline{3} & \overline{4}  & \overline{5}   \\
\hline  \overline{1} & \overline{1}  & \overline{2}  &
\overline{3}
& \overline{4} & \overline{5}  & \overline{0}    \\
\hline  \overline{2} & \overline{2}  & \overline{3}  &
\overline{4}
& \overline{5} & \overline{0}  & \overline{1}   \\
\hline  \overline{3} & \overline{3}  & \overline{4}  &
\overline{5}
& \overline{0} & \overline{1}  & \overline{2}   \\
\hline  \overline{4} & \overline{4}  & \overline{5}  &
\overline{0}
& \overline{1} & \overline{2}  & \overline{3}   \\
\hline  \overline{5} & \overline{5}  & \overline{0}  &
\overline{1}
& \overline{2} & \overline{3}  & \overline{4}   \\

\hline
\end{array}
$$\footnotesize\hangcaption{Addition table for $\BBZ_6$.} \label{tab:add_z6}\end{minipage}\hfill
\begin{minipage}{6cm}$$
\begin{array}{|c||c|c|c|c|c|c|}
\hline \cdot & \overline{0}  & \overline{1}  & \overline{2}  &
\overline{3}  & \overline{4}  & \overline{5}   \\
\hline  \overline{0} & \overline{0}  & \overline{0}  &
\overline{0}
& \overline{0} & \overline{0}  & \overline{0}   \\
\hline  \overline{1} & \overline{0}  & \overline{1}  &
\overline{2}
& \overline{3} & \overline{4}  & \overline{5}   \\
\hline  \overline{2} & \overline{0}  & \overline{2}  &
\overline{4}
& \overline{0} & \overline{2}  & \overline{4}   \\
\hline  \overline{3} & \overline{0}  & \overline{3}  &
\overline{0}
& \overline{3} & \overline{0}  & \overline{3}   \\
\hline  \overline{4} & \overline{0}  & \overline{4}  &
\overline{2}
& \overline{0} & \overline{4}  & \overline{2}   \\
\hline  \overline{5} & \overline{0}  & \overline{5}  &
\overline{4}
& \overline{3} & \overline{2}  & \overline{1}  \\
\hline
\end{array}
$$\footnotesize\hangcaption{Multiplication table for $\BBZ_6$.} \label{tab:mult_z6}\end{minipage}\hfill
\end{table}

\bigskip

We notice that even though $\overline{2} \neq \overline{0}$ and
$\overline{3} \neq \overline{0}$ we have $\overline{2}\cdot
\overline{3} = \overline{0}$ in $\BBZ_6$. This prompts the following
definition.
\begin{df}[Zero Divisor] An element $a\neq \overline{0}$ of
$\BBZ_n$ is called a {\em zero divisor} if $ab = \overline{0}$ for
some $b\in \BBZ_n$. \index{zero divisor!Zn}
\end{df}
We will extend the concept of zero divisor later on to various
algebras.


\begin{exa}
 Let $$\BBZ_7 = \{\overline{0}, \overline{1}, \overline{2}, \overline{3},
\overline{4}, \overline{5}, \overline{6}\}$$be the residue classes
modulo $7$. Construct the natural addition $+$ table for
$\BBZ_7$. Also, construct the natural multiplication $\cdot$ table for $\BBZ_7$\\

\end{exa}
\begin{solu}The required tables are given in tables \ref{tab:add_z7}
and \ref{tab:mult_z7}.
\end{solu}
\begin{table}[h]
\begin{minipage}{6cm}
$$
\begin{array}{|c||c|c|c|c|c|c|c|}
\hline + & \overline{0}  & \overline{1}  & \overline{2}  &
\overline{3}  & \overline{4}  & \overline{5}  & \overline{6}  \\
\hline  \overline{0} & \overline{0}  & \overline{1}  &
\overline{2}
& \overline{3} & \overline{4}  & \overline{5}  & \overline{6} \\
\hline  \overline{1} & \overline{1}  & \overline{2}  &
\overline{3}
& \overline{4} & \overline{5}  & \overline{6}  & \overline{0} \\
\hline  \overline{2} & \overline{2}  & \overline{3}  &
\overline{4}
& \overline{5} & \overline{6}  & \overline{0}  & \overline{1} \\
\hline  \overline{3} & \overline{3}  & \overline{4}  &
\overline{5}
& \overline{6} & \overline{0}  & \overline{1}  & \overline{2} \\
\hline  \overline{4} & \overline{4}  & \overline{5}  &
\overline{6}
& \overline{0} & \overline{1}  & \overline{2}  & \overline{3} \\
\hline  \overline{5} & \overline{5}  & \overline{6}  &
\overline{0}
& \overline{1} & \overline{2}  & \overline{3}  & \overline{4} \\
\hline  \overline{6} & \overline{6}  & \overline{0}  &
\overline{1}
& \overline{2} & \overline{3}  & \overline{4}  & \overline{5} \\
\hline
\end{array} $$\footnotesize\hangcaption{Addition table for $\BBZ_7$.} \label{tab:add_z7}\end{minipage}\hfill
\begin{minipage}{6cm}$$
\begin{array}{|c||c|c|c|c|c|c|c|}
\hline \cdot & \overline{0}  & \overline{1}  & \overline{2}  &
\overline{3}  & \overline{4}  & \overline{5}  & \overline{6}  \\
\hline  \overline{0} & \overline{0}  & \overline{0}  &
\overline{0}
& \overline{0} & \overline{0}  & \overline{0}  & \overline{0} \\
\hline  \overline{1} & \overline{0}  & \overline{1}  &
\overline{2}
& \overline{3} & \overline{4}  & \overline{5}  & \overline{6} \\
\hline  \overline{2} & \overline{0}  & \overline{2}  &
\overline{4}
& \overline{6} & \overline{1}  & \overline{3}  & \overline{5} \\
\hline  \overline{3} & \overline{0}  & \overline{3}  &
\overline{6}
& \overline{2} & \overline{5}  & \overline{1}  & \overline{4} \\
\hline  \overline{4} & \overline{0}  & \overline{4}  &
\overline{1}
& \overline{5} & \overline{2}  & \overline{6}  & \overline{3} \\
\hline  \overline{5} & \overline{0}  & \overline{5}  &
\overline{3}
& \overline{1} & \overline{6}  & \overline{4}  & \overline{2} \\
\hline  \overline{6} & \overline{0}  & \overline{6}  &
\overline{5}
& \overline{4} & \overline{3}  & \overline{2}  & \overline{1} \\
\hline
\end{array}
$$ \footnotesize\hangcaption{Multiplication table for $\BBZ_7$.} \label{tab:mult_z7} \end{minipage}\hfill
\end{table}
\begin{exa}
 Solve the equation $$\overline{5}x =
\overline{3}$$in $\BBZ_{11}$. \label{exa:lineqmod11}\end{exa}
\begin{solu}Multiplying by $\overline{9}$ on both sides
$$\overline{45}x = \overline{27},$$that is,
$$x = \overline{5}.$$
\end{solu}


We will use the following result in the next section.
\begin{df}
Let  $a$, $b$ be integers with one of them different from $0$. The
greatest common divisor $d$ of $a, b$, denoted by $d = \gcd(a, b)$
is the largest positive integer that divides both $a$ and $b$.
\end{df}
\begin{thm}[Bachet-Bezout Theorem] The greatest common divisor of any
two  integers $a, b$ can be written as a linear combination of $a$
and $b$, i.e., there are integers $x, y$ with $$  \gcd(a, b) = ax
+ by.$$ \index{greatest common divisor} \label{thm:bachet_bezout}
\index{theorem!Bachet-Bezout}
\end{thm}
\begin{pf} Let $A = \{ ax + by :  ax + by
> 0, x, y \in \BBZ\}$. Clearly one of $\pm a, \pm b$ is in
$A$, as one of $a, b$ is not zero. By the Well Ordering Principle,
$A$ has a smallest element, say $d$. Therefore, there are $x_0,
y_0$ such that $d = ax_0 + by_0.$ We prove that $d = \gcd(a, b).$
To do this we prove that $d$ divides $a$ and $b$ and that if $t$
divides $a$ and $b$, then $t$ must also divide then $d$.

\bigskip
 We first prove that $d$ divides $a.$ By the Division Algorithm, we
can find integers $q, r, 0 \leq r < d$ such that $a = dq + r.$
Then $$ r = a - dq = a(1 - qx_0) - by_0.$$If $r > 0,$ then $r \in
A$ is smaller than the smaller element of $A$, namely $d,$ a
contradiction. Thus $r = 0.$ This entails $dq = a,$ i.e. $d$
divides $a.$ We can similarly prove that $d$ divides $b.$

\bigskip

Assume that $t$ divides $a$ and  $b$. Then $a = tm, b = tn$ for
integers $m, n.$ Hence $d = ax_0 + bx_0 = t(mx_0 + ny_0),$ that
is, $t$ divides $d.$ The theorem is thus proved.  \end{pf}


\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
Write the addition and multiplication tables of $\BBZ_{11}$ under
natural addition and multiplication modulo $11$.\begin{answer} The
tables appear in tables \ref{tab:add_z11} and \ref{tab:mult_z11}.
\begin{table}
\centering
\begin{minipage}{7cm}
$$ {\tiny
\begin{array}{|c||c|c|c|c|c|c|c|c|c|c|c|}
\hline + & \overline{0}  & \overline{1}  & \overline{2}  &
\overline{3}  & \overline{4}  & \overline{5}  & \overline{6}  &
\overline{7}  & \overline{8}  & \overline{9}  & \overline{10} \\
\hline  \overline{0} & \overline{0}  & \overline{1}  &
\overline{2} & \overline{3} & \overline{4}  & \overline{5}  &
\overline{6} &
\overline{7}  & \overline{8}  & \overline{9}  & \overline{10} \\
\hline  \overline{1} & \overline{1}  & \overline{2}  &
\overline{3} & \overline{4} & \overline{5}  & \overline{6}  &
\overline{7} &
\overline{8}  & \overline{9}  & \overline{10}  & \overline{0} \\
\hline  \overline{2} & \overline{2}  & \overline{3}  &
\overline{4} & \overline{5} & \overline{6}  & \overline{7}  &
\overline{8}&
\overline{9}  & \overline{10}  & \overline{0}  & \overline{1}  \\
\hline  \overline{3} & \overline{3}  & \overline{4}  &
\overline{5} & \overline{6} & \overline{7}  & \overline{8}  &
\overline{9} &
\overline{10}  & \overline{0}  & \overline{1}  & \overline{2} \\
\hline  \overline{4} & \overline{4}  & \overline{5}  &
\overline{6} & \overline{7} & \overline{8}  & \overline{9}  &
\overline{10} &
\overline{0}  & \overline{1}  & \overline{2}  & \overline{3} \\
\hline  \overline{5} & \overline{5}  & \overline{6}  &
\overline{7} & \overline{8} & \overline{9}  & \overline{10}  &
\overline{0}&
\overline{1}  & \overline{2}  & \overline{3}  & \overline{4}  \\
\hline  \overline{6} & \overline{6}  & \overline{7}  &
\overline{8} & \overline{9} & \overline{10}  & \overline{0}  &
\overline{1} &
\overline{2}  & \overline{3}  & \overline{4}  & \overline{5} \\
\hline  \overline{7} & \overline{7}  & \overline{0}  &
\overline{9} & \overline{10} & \overline{0}  & \overline{1}  &
\overline{2} &
\overline{3}  & \overline{4}  & \overline{5}  & \overline{6} \\
\hline  \overline{8}& \overline{8} & \overline{9}  & \overline{10}
& \overline{0} & \overline{1} & \overline{2}  & \overline{3}  &
\overline{4} &
\overline{5}  & \overline{6}  & \overline{7}   \\
\hline   \overline{9} &\overline{9} & \overline{10}  &
\overline{0} & \overline{1} & \overline{2} & \overline{3}  &
\overline{4}  & \overline{5}&
\overline{6}  & \overline{7}  & \overline{8}    \\
\hline  \overline{10} &  \overline{10}& \overline{0}  &
\overline{1} & \overline{2} & \overline{3} & \overline{4}  &
\overline{5}  & \overline{6} &
\overline{7}  & \overline{8}  & \overline{9}   \\
\hline
\end{array}}
$$ \footnotesize\hangcaption{Addition table for $\BBZ_{11}$.}
\label{tab:add_z11}
\end{minipage}\hspace{2cm}
\begin{minipage}{7cm}
$${\tiny
\begin{array}{|c||c|c|c|c|c|c|c|c|c|c|c|}
\hline \cdot & \overline{0}  & \overline{1}  & \overline{2}  &
\overline{3}  & \overline{4}  & \overline{5}  & \overline{6}  &
\overline{7}  & \overline{8}  & \overline{9}  & \overline{10}  \\
\hline  \overline{0} & \overline{0}  & \overline{0}  &
\overline{0} & \overline{0} & \overline{0}  & \overline{0}  &
\overline{0} &
\overline{0}  & \overline{0}  & \overline{0}  & \overline{0}  \\
\hline  \overline{1} & \overline{0}  & \overline{1}  &
\overline{2} & \overline{3} & \overline{4}  & \overline{5}  &
\overline{6}&
\overline{7}  & \overline{8}  & \overline{9}  & \overline{10}  \\
\hline  \overline{2} & \overline{0}  & \overline{2}  &
\overline{4} & \overline{6} & \overline{8}  & \overline{10}  &
\overline{1}&
\overline{3}  & \overline{5}  & \overline{7}  & \overline{9}  \\
\hline  \overline{3} & \overline{0}  & \overline{3}  &
\overline{6} & \overline{9} & \overline{1}  & \overline{4}  &
\overline{7} &
\overline{10}  & \overline{2}  & \overline{5}  & \overline{8} \\
\hline  \overline{4} & \overline{0}  & \overline{4}  &
\overline{8} & \overline{1} & \overline{5}  & \overline{9}  &
\overline{2} &
\overline{6}  & \overline{10}  & \overline{3}  & \overline{7} \\
\hline  \overline{5} & \overline{0}  & \overline{5}  &
\overline{10} & \overline{4} & \overline{9}  & \overline{3}  &
\overline{8} &
\overline{2}  & \overline{7}  & \overline{1}  & \overline{6} \\
\hline  \overline{6} & \overline{0}  & \overline{6}  &
\overline{1} & \overline{7} & \overline{2}  & \overline{8}  &
\overline{3} &
\overline{9}  & \overline{4}  & \overline{10}  & \overline{5} \\
\hline  \overline{7} & \overline{0}  & \overline{7}  &
\overline{3} & \overline{10} & \overline{6}  & \overline{2}  &
\overline{9} &
\overline{5}  & \overline{1}  & \overline{8}  & \overline{4} \\
\hline  \overline{8} & \overline{0}  & \overline{8}  &
\overline{5} & \overline{2} & \overline{10}  & \overline{7}  &
\overline{4} &
\overline{1}  & \overline{9}  & \overline{6}  & \overline{3} \\
\hline  \overline{9} & \overline{0}  & \overline{9}  &
\overline{7} & \overline{5} & \overline{3}  & \overline{1}  &
\overline{10}&
\overline{8}  & \overline{6}  & \overline{4}  & \overline{2}  \\
\hline  \overline{10} & \overline{0}  & \overline{10}  &
\overline{9} & \overline{8} & \overline{7}  & \overline{6}  &
\overline{5} &
\overline{4}  & \overline{3}  & \overline{2}  & \overline{1} \\
\hline
\end{array}}
$$  \footnotesize\hangcaption{Multiplication table $\BBZ_{11}$.}\label{tab:mult_z11} \end{minipage}
\end{table}
\end{answer}
\end{pro}
\begin{pro}
Solve the equation $\overline{3}x^2 - \overline{5}x + \overline{1} =
\overline{0}$ in $\BBZ_{11}$.
\begin{answer}
Observe that $$\overline{3}x^2 - \overline{5}x + \overline{1} =
\overline{0} \implies \overline{4}(\overline{3}x^2 - \overline{5}x +
\overline{1}) = \overline{4}\overline{0} \implies x^2 +
\overline{2}x + \overline{1} + \overline{3} = \overline{0} \implies
(x+\overline{1})^2 = \overline{8}.$$ We need to know whether
$\overline{8}$ is a perfect square modulo $11$. Observe that
$(\overline{11}-\overline{a})^2 = \overline{a}$, so we just need to
check half the elements and see that
$$\overline{1}^2 = \overline{1};\quad  \overline{2}^2 = \overline{4};\quad
\overline{3}^2 = \overline{9};\quad \overline{4}^2 =
\overline{5};\quad \overline{5}^2 = \overline{3},      $$ whence
$\overline{8}$ is not a perfect square modulo $11$ and so there are
no solutions.
\end{answer}
\end{pro}
\begin{pro}Solve the equation
$$\overline{5}x^2 = \overline{3}$$in $\BBZ_{11}$.
\begin{answer} From example \ref{exa:lineqmod11}
$$x^2 = \overline{5}.$$
Now, the squares modulo $11$ are $\overline{0}^2 = \overline{0}$,
$\overline{1}^2 = \overline{1}$, $\overline{2}^2 = \overline{4}$,
$\overline{3}^2 = \overline{9}$, $\overline{4}^2 = \overline{5}$,
$\overline{5}^2 = \overline{3}$. Also, $(\overline{11 - 4})^2 =
\overline{7}^2 = \overline{5}$. Hence the solutions are $x =
\overline{4}$ or $x = \overline{7}$.
\end{answer}
\end{pro}
\begin{pro}
Prove that if $n > 0$ is a composite integer, $\BBZ_n$ has zero
divisors.
\end{pro}
\begin{pro}
How many solutions does the equation
$x^4+x^3+x^2+x+\overline{1}=\overline{0}$ have in $\BBZ_{11}$?
\begin{answer}
Put  $f(x)= x^4+x^3+x^2+x+\overline{1}$. Then
$$\begin{array} {l|l|l|}
f(0) =1 \equiv 1 \mod 11 & f(1) =5 \equiv 5 \mod 11 & f(2) =31 \equiv 9 \mod 11\\
f(3) =121 \equiv 0 \mod 11 & f(4) =341 \equiv 0 \mod 11 & f(5) =781 \equiv 0 \mod 11\\
f(6) =1555 \equiv 4 \mod 11 & f(7) =2801 \equiv 7 \mod 11 & f(8) =4681 \equiv 6 \mod 11\\
f(9) =7381 \equiv 0 \mod 11 & f(10) =11111 \equiv 1 \mod 11 & \\
\end{array}$$
\end{answer}
\end{pro}

\end{multicols}

\section{Fields}
\index{field}
\begin{df}
Let $\BBF$ be a set having at least two elements $0_{\BBF }$ and
$1_{\BBF }$ ($0_{\BBF } \neq 1_{\BBF }$) together with two
operations $\cdot$ (multiplication, which we usually represent via
juxtaposition) and $+$ (addition). A {\em field} $\field{ \BBF
}{\cdot}{+}$ is a triplet satisfying the following axioms $\forall
(a,b, c)\in \BBF ^3$:
\begin{enumerate}
\item[F1] Addition and multiplication are associative:
\begin{equation}(a + b)+c = a+(b+c), \ \ \ (ab)c = a(bc)\label{fa:associative}\end{equation} \item[F2]
Addition and multiplication are commutative:
\begin{equation}\label{fa:commutative}a + b = b + a, \ \ \ ab = ba\end{equation} \item[F3] The
multiplicative operation distributes over addition:
\begin{equation}\label{fa:distributive}a(b+c) = ab + ac\end{equation} \item[F4] $0_{\BBF }$ is the additive
identity: \begin{equation}\label{fa:existence_of_0}0_{\BBF } + a = a
+ 0_{\BBF } = a\end{equation} \item[F5] $1_{\BBF }$ is the
multiplicative identity:
\begin{equation}\label{fa:existence_of_1}1_{\BBF }a = a1_{\BBF } =
a\end{equation} \item[F6] Every element has an additive inverse:
\begin{equation}\label{fa:existence_of_additive_inverse}\exists -a\in \BBF, \ \ a
+ (-a) = (-a)+a = 0_{\BBF }\end{equation} \item[F7] Every non-zero
element has a multiplicative inverse: if $a\neq 0_{\BBF
}$\begin{equation}\label{fa:existence_of_multiplicative_inverse}\exists
a^{-1}\in \BBF, \ \ \ aa^{-1} = a^{-1}a =1_{\BBF }\end{equation}
\end{enumerate}
The elements of a field are called {\em scalars}. \index{scalar}
\end{df}
An important property of fields is the following.\index{zero
divisor!in a field}
\begin{thm}\label{thm:field_no_zero_divisors}
A field does not have zero divisors.
\end{thm}
\begin{pf}
Assume that $ab = 0_{\BBF }$. If $a\neq 0_{\BBF }$ then it has a
multiplicative inverse $a^{-1}$. We deduce $$a^{-1}ab =
a^{-1}0_{\BBF } \implies b = 0_{\BBF }.
$$This means that the only way of obtaining a zero product is if
one of the factors is $0_{\BBF }$.  \end{pf}
\begin{exa}
$\field{\BBQ}{\cdot}{+}$, $\field{\BBR}{\cdot}{+}$, and
$\field{\BBC}{\cdot}{+}$ are all fields. The multiplicative identity
in each case is $1$ and the additive identity is $0$.
\end{exa}
\begin{exa}
Let
$$\BBQ (\sqrt{2}) = \{a + \sqrt{2}b : (a, b)\in \BBQ^2\}$$
and define addition on this set as
$$(a + \sqrt{2}b) + (c + \sqrt{2}d) = (a + c) + \sqrt{2}(b +
d),$$and multiplication as
$$(a + \sqrt{2}b)  (c + \sqrt{2}d) = (ac + 2bd) + \sqrt{2}(ad +
bc).$$Then $\field{\BBQ + \sqrt{2}\BBQ}{}{+}$ is a field. Observe
$0_{\BBF } = 0$, $1_{\BBF } = 1$, that the additive inverse of $a +
\sqrt{2}b$ is $-a - \sqrt{2}b$, and the multiplicative inverse of $a
+ \sqrt{2}b, (a, b) \neq (0, 0)$ is
$$(a + \sqrt{2}b)^{-1} = \frac{1}{a + \sqrt{2}b} = \frac{a - \sqrt{2}b}{a^2 - 2b^2} = \frac{a}{a^2 - 2b^2} -
\frac{\sqrt{2}b}{a^2 - 2b^2}. $$ Here $a^2 - 2b^2 \neq 0$ since
$\sqrt{2}$ is irrational.
\end{exa}
\begin{thm}\label{thm:Zmodp_is_a_field}
If $p$ is a prime,  $\field{\BBZ_p}{\cdot}{+}$ is a field under
$\cdot$  multiplication modulo $p$ and $+$  addition modulo $p$.
\end{thm}
\begin{pf}
Clearly the additive identity is $\overline{0}$ and the
multiplicative identity is $\overline{1}$. The additive inverse of
$\overline{a}$ is $\overline{p - a}$. We must prove that every
$\overline{a} \in \BBZ _p \setminus \{ \overline{0}\}$ has a
multiplicative inverse. Such an $a$ satisfies $\gcd (a, p) =1$ and
by the Bachet-Bezout Theorem \ref{thm:bachet_bezout}, there exist
integers $x, y$ with $px + ay = 1.$ In such case we have
$$\overline{1} = \overline{px + ay} = \overline{ay} = \overline{a}\cdot \overline{y},$$whence $(\overline{a})^{-1} =
\overline{y}.$
\end{pf}
\begin{df}
A field is said to be of  {\em characteristic} $p \neq 0$ if for
some positive integer $p$ we have $\forall a\in \BBF, pa = 0_{\BBF
}$, and no positive integer smaller than $p$ enjoys this
property.\end{df} If the field does not have characteristic $p \neq
0$ then we say that it is of {\em characteristic } $0$. Clearly
$\BBQ, \BBR$ and $\BBC$ are of characteristic $0$, while $\BBZ_p$
for prime $p$, is of characteristic $p$. \index{characteristic of a
field}

\begin{thm}
The characteristic of a field is either $0$ or a prime.
\end{thm}
\begin{pf}
If the characteristic of the field is $0$, there is nothing to
prove. Let $p$ be the least positive integer for which $\forall a\in
\BBF, pa = 0_{\BBF }$. Let us prove that $p$ must be a prime. Assume
that instead we had $p = st$ with integers $s>1, t>1$. Take
$a=1_{\BBF }$. Then we must have $(st)1_{\BBF } = 0_{\BBF }$, which
entails $(s1_{\BBF })(t1_{\BBF }) = 0_{\BBF }$. But in a field there
are no zero-divisors by Theorem \ref{thm:field_no_zero_divisors},
hence either $s1_{\BBF } = 0_{\BBF }$ or $t1_{\BBF } = 0_{\BBF }$.
But either of these equalities contradicts the minimality of $p$.
Hence $p$ is a prime.  \end{pf}

\section*{\psframebox{Homework}}
\begin{pro}
Consider the set of numbers $$\BBQ (\sqrt{2}, \sqrt{3}, \sqrt{6}) =
\{a + b\sqrt{2} + c\sqrt{3} + d\sqrt{6}: (a, b, c, d)\in\BBQ^4\}.
$$Assume that $\BBQ
(\sqrt{2}, \sqrt{3}, \sqrt{6})$ is a field under ordinary addition
and multiplication. What is  the multiplicative inverse of the
element $ \sqrt{2} + 2\sqrt{3} + 3\sqrt{6}$? \begin{answer} We have
$$\begin{array}{lll}   \dfrac{1}{\sqrt{2} + 2\sqrt{3} + 3\sqrt{6}} & = & \dfrac{ \sqrt{2} + 2\sqrt{3} - 3\sqrt{6}}{ (\sqrt{2} + 2\sqrt{3})^2 - (3\sqrt{6})^2} \\
& = & \dfrac{ \sqrt{2} + 2\sqrt{3} - 3\sqrt{6}}{2 + 12+ 4\sqrt{6}
- 54} \\
& = & \dfrac{ \sqrt{2} + 2\sqrt{3} - 3\sqrt{6}}{-40 + 4\sqrt{6}} \\
& = & \dfrac{ (\sqrt{2} + 2\sqrt{3} - 3\sqrt{6})(-40 - 4\sqrt{6})}{40^2 - (4\sqrt{6})^2} \\
& = & \dfrac{ (\sqrt{2} + 2\sqrt{3} - 3\sqrt{6})(-40 - 4\sqrt{6})}{1504} \\
& = & -\dfrac{16\sqrt{2}+22\sqrt{3}-30\sqrt{6}-18}{376}
\end{array}
$$
\end{answer}
\end{pro}
\begin{pro}
Let $\BBF$ be a field and $a, b$ two non-zero elements of $\BBF$.
Prove that $$-(ab^{-1}) = (-a)b^{-1} = a(-b^{-1}).$$
\begin{answer}
Since $$(-a)b^{-1} + ab^{-1} = (-a + a)b^{-1} = 0_{\BBF }b^{-1} =
0_{\BBF },
$$we obtain by adding $-(ab^{-1})$ to both sides that $$ (-a)b^{-1}  = -(ab^{-1}). $$
Similarly, from $$a(-b^{-1}) + ab^{-1} = a(-b^{-1}+ b^{-1}) =
a0_{\BBF } = 0_{\BBF },
$$we obtain by adding $-(ab^{-1})$ to both sides that $$ a(-b^{-1})  = -(ab^{-1}). $$
\end{answer}
\end{pro}
\begin{pro}
Let $\BBF$ be a field and $a\neq 0_{\BBF }$. Prove that $$(-a)^{-1}
= -(a^{-1}).$$
\end{pro}
\begin{pro}
Let $\BBF$ be a field and $a, b$ two non-zero elements of $\BBF$.
Prove that $$ab^{-1} = (-a)(-b^{-1}).$$
\end{pro}
\section{Functions}
\begin{df}
By a {\em function} or a {\em mapping} from one set to another, we
mean a rule or mechanism that assigns to every input element of
the first set a unique output element of the second set. We shall
call the set of inputs the {\em domain} of the function, the set
of {\em possible} outputs the {\em target set} of the function,
and the set of {\em actual} outputs the {\em image} of the
function.
\end{df}
We will generally refer to a function with the following notation:
$$\fun{f}{x}{f(x)}{D}{T}.$$
Here $f$ is the {\em name of the function}, $D$ is its domain, $T$
is its target set, $x$ is the name of a typical input and $f(x)$
is the output or {\em image of $x$ under $f$}. We call the
assignment $x \mapsto f(x)$ the {\em assignment rule} of the
function. Sometimes $x$ is also called the {\em independent
variable.} The set $f(D) = \{f(a)|a\in D\}$ is called the {\em
image} of $f$. Observe that $f(D) \subseteq T.$ \vspace{1cm}
\begin{figure}[htb]
\centering \begin{minipage}{6cm}
$$
\psset{unit=.75cm} \rput(-1.5,0){\rput(1.5, .8){\alpha} \rput(0,
.5){1} \psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(.2,.5)(2.9,.5) \rput(3,.5){2} \rput(0,0){2}
\psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(.2,0)(2.9,0) \rput(3,0){8} \rput(0, -.5){3}
\psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(0.2,-.5)(2.9,-.5) \rput(3,-.5){4}
\psellipse(1,2) \psellipse(3,0)(1,2)}
$$\vspace{1cm} \footnotesize\hangcaption{An injection.} \label{injection}
\end{minipage}
\begin{minipage}{6cm}$$ \psset{unit=.75cm} \rput(-1.5,0){\rput(1.5, .8){\beta} \psellipse(1,2)
\psellipse(3,0)(1,2) \rput(0,0){2} \psline[linewidth=.4pt,
labels=none, showpoints=true]{*->>}(.2,0)(2.9,0) \rput(3,0){2}
\rput(0, .5){1} \psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(.2,.5)(2.9,.5) \rput(3,.5){4} \rput(0,
-.5){3} \psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(0.2,-.5)(2.9,.5)}
$$\vspace{1cm} \footnotesize \hangcaption{Not an injection}
\label{not_injection}
\end{minipage}
\end{figure}
\begin{df}
A function $\dis{\fun{f}{x}{f(x)}{X}{Y}}$ is said to be {\em
injective} or {\em one-to-one} if $\forall (a, b) \in X^2,$ we
have
$$ a \neq b \implies   f(a) \neq f(b).$$ This is equivalent to saying that
$$f(a) = f(b) \implies   a = b.$$
\end{df}
\begin{exa}
The function  $\alpha$ in the diagram \ref{injection} is an
injective function. The function $\beta$ represented by the
diagram \ref{not_injection}, however, is not injective, $\beta (3)
= \beta (1) = 4$, but $3 \neq 1$.

\end{exa}
\begin{exa} Prove that $$\fun{t}{x}{\frac{x + 1}{x - 1}}{\BBR \setminus \{1\}}{\BBR \setminus \{1\}}
$$ is an injection.\end{exa} \begin{solu}
Assume $t(a) = t(b).$ Then
$${\everymath{\dis}\begin{array}{ccccccc}
t(a) & = & t(b) &
\implies   &  \frac{a + 1}{a - 1} & = & \frac{b + 1}{b - 1} \\
& & & \implies   & (a + 1)(b - 1) & = & (b + 1)(a - 1) \\
& & & \implies   & ab - a + b - 1 &  = & ab - b + a - 1 \\
& & & \implies   & 2a & = & 2b \\
& & & \implies   & a & = & b \\
\end{array}}
$$ We have proved that $t(a) = t(b) \implies a = b$, which shews
that $t$ is injective. \end{solu}\vspace{1cm}
\begin{figure}[h]
\centering
\begin{minipage}{6cm}
$$
\psset{unit=.75cm} \rput(-1.5,0){\rput(0,0){2} \rput(0, .5){1}
\rput(0, -.5){3} \rput(3,0){2} \rput(3,.5){4} \rput(1.5,
.9){\beta} \psellipse(1,2) \psellipse(3,0)(1,2)
\psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(.2,0)(2.9,0) \psline[linewidth=.4pt,
labels=none, showpoints=true]{*->>}(.2,.5)(2.9,.5)
\psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(0.2,-.5)(2.9,.5)}
$$\vspace{1cm}
\footnotesize\hangcaption{A surjection}
\label{surjection}\end{minipage}
\begin{minipage}{6cm}
$$
\psset{unit=.75cm} \rput(-1.5,0){\rput(3, -1){8} \rput(1.5,
.8){\gamma} \psellipse(1,2) \psellipse(3,0)(1,2) \rput(0,0){2}
\psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(.2,0)(2.9,0) \rput(3,0){2} \rput(0, .5){1}
\psline[linewidth=.4pt, labels=none,
showpoints=true]{*->>}(.2,.5)(2.9,.5) \rput(3,.5){4}}
$$\vspace{.3in}
\footnotesize\hangcaption{Not a surjection} \label{not_surjection}

\end{minipage}\end{figure}
\begin{df}
A function $f: A \rightarrow B$ is said to be {\em surjective} or
{\em onto} if  $(\forall b \in B) \ (\exists a \in A) : f(a) = b.$
That is, each element of $B$ has a  pre-image in $A$.

\end{df}
\begin{rem}A function is surjective if its image coincides with its target set.
It is easy to see that a graphical criterion for a function to be
surjective is that every horizontal line passing through a point
of the target set (a subset of the $y$-axis) of the function must
also meet the curve.
\end{rem}

\begin{exa}

The function $\beta$ represented by diagram \ref{surjection} is
surjective. The function $\gamma$ represented by  diagram
\ref{not_surjection} is not surjective as $8$ does not have a
preimage.
\end{exa}
\begin{exa}
Prove that $\fun{t}{x}{x^3}{\BBR}{\BBR}$ is a surjection.
\end{exa}
\begin{solu} Since the graph of $t$ is that of a cubic polynomial with only
one zero, every horizontal line passing through a point in $\BBR$
will eventually meet the graph of $g$, whence $t$ is surjective. To
prove this analytically, proceed as follows. We must prove that
$(\forall \ b \in\BBR) \ (\exists a)$ such that $t(a) = b.$ We
choose $a$ so that $a = b^{1/3}$. Then
$$t(a) = t(b^{1/3}) = (b^{1/3})^3 = b.$$Our
choice of $a$ works and hence the function is surjective.
\end{solu}
\begin{df}
A function is {\em bijective} if it is both injective and
surjective.
\end{df}

\section*{\psframebox{Homework}}
\begin{pro}
Prove that $$\fun{h}{x}{x^3}{\BBR}{\BBR}$$ is an injection.
\begin{answer} Assume $h(b) = h(a).$ Then
$$\begin{array}{lllll}
h(a) & = & h(b)&  \implies   & a^3 = b^3 \\
& & & \implies   & a^3 - b^3  = 0 \\
& & & \implies   & (a - b)(a^2 + ab + b^2)  =  0 \\
\end{array}
$$
 Now, $$b^2 + ab + a^2 = \left(b + \frac{a}{2}\right)^2 +
\frac{3a^2}{4}.$$ This shews that $b^2 + ab + a^2$ is positive
unless both $a$ and $b$ are zero. Hence $a -  b = 0$ in all cases.
We have shewn that $h(b) = h(a) \implies   a = b$, and the
function is thus injective.
\end{answer}
\end{pro}

\begin{pro}
Shew that
$$\fun{f}{x}{\frac{6x}{2x - 3}}{\BBR\setminus \left\{\frac{3}{2}\right\}}{\BBR\setminus
\{3\}}$$is a bijection. \begin{answer} We have
$$\begin{array}{lll} f(a) = f(b) & \iff & \dfrac{6a}{2a - 3} = \dfrac{6b}{2b - 3} \\
& \iff & 6a(2b - 3) = 6b(2a - 3) \\
& \iff & 12ab - 18a = 12ab - 18b \\ & \iff & -18a = -18b \\ & \iff
& a = b,
\end{array}$$
proving that $f$ is injective. Now, if $$f(x) = y, \ \ y\neq 3,
$$then
$$\frac{6x}{2x - 3} = y,$$that is $6x = y(2x - 3)$. Solving for
$x$ we find $$x = \frac{3y}{2y - 6}.$$Since $2y - 6 \neq 0$, $x$
is a real number, and so $f$ is surjective. On combining the
results we deduce that $f$ is bijective.

\end{answer}
\end{pro}
\chapter{Matrices and Matrix Operations}
\section{The Algebra of Matrices}
\begin{df}\index{matrix}
Let $ \field{ \BBF }{\cdot}{+}$ be a field.  An  $m\times n$ ($m$ by
$n$) {\em matrix} $A$ with $m$ rows and $n$ columns with entries
over $\BBF$ is a rectangular array of the form
$$A = \begin{bmatrix}a_{11} & a_{12} & \cdots & a_{1n} \cr a_{21} & a_{22} & \cdots & a_{2n} \cr
\vdots & \vdots & \cdots & \vdots \cr a_{m1} & a_{m2} & \cdots &
a_{mn} \cr \end{bmatrix},$$ where $\forall (i, j) \in \{1, 2, \ldots
, m \}\times \{1, 2, \ldots , n\}, \ \ a_{ij}\in \BBF$.
\index{matrix}
\end{df}
\begin{rem}
As a shortcut, we often use the notation $A = [a_{ij}]$ to denote
the matrix $A$ with entries $a_{ij}$. Notice that when we refer to
the matrix we put parentheses---as in ``$[a_{ij}]$,'' and when we
refer to a specific entry we do not use the surrounding
parentheses---as in ``$a_{ij}$.''
\end{rem}
\begin{exa}
$$A = \begin{bmatrix}0 & -1 & 1 \cr 1 & 2 & 3 \cr\end{bmatrix}$$is a $2\times 3$  matrix and
$$B = \begin{bmatrix}-2 & 1 \cr 1 & 2 \cr 0 & 3 \cr\end{bmatrix}$$ is a $3\times 2$
matrix.
\end{exa}
\begin{exa}
Write out explicitly the $4\times 4$ matrix $A = [a_{ij}]$ where
$a_{ij} = i^2 - j^2$.
\end{exa}
\begin{solu} This is $$A = \begin{bmatrix} 1^2 - 1^1 & 1^2 - 2^2 & 1^2 -
3^2 & 1^2 - 4^2 \cr 2^2 - 1^2 & 2^2 - 2^2 & 2^2 - 3^2 & 2^2 - 4^2
\cr 3^2 - 1^2 & 3^2 - 2^2 & 3^2 - 3^2 & 3^2 - 4^2 \cr 4^2 - 1^2 &
4^2 - 2^2 & 4^2 - 3^2 & 4^2 - 4^2 \cr
\end{bmatrix} =
\begin{bmatrix}   0 & -3& -8 & -15 \cr
 3 & 0 & -5 & -12 \cr
 8 & 5 & 0 & -7 \cr
 15 & 12 & 7 & 0 \cr\end{bmatrix}.   $$
 \end{solu}
\begin{df}
Let $ \field{ \BBF }{\cdot}{+}$ be a field. We denote by
$\mat{m\times n}{ \BBF }$ the set of all $m\times n$ matrices with
entries over $\BBF$. $\mat{n\times n}{ \BBF }$ is, in particular,
the set of all square matrices of size $n$ with entries over $\BBF$.
\end{df}

\begin{df}
The $m\times n$ {\em zero matrix} ${\bf 0}_{m\times
n}\in\mat{m\times n}{ \BBF }$ is the matrix with $0_{\BBF }$'s
everywhere,
$${\bf 0}_{m\times n} = \begin{bmatrix}0_{\BBF } & 0_{\BBF } & 0_{\BBF } & \cdots & 0_{\BBF } \cr 0_{\BBF } & 0_{\BBF } & 0_{\BBF } & \cdots & 0_{\BBF } \cr 0_{\BBF } & 0_{\BBF } & 0_{\BBF }
 & \cdots & 0_{\BBF } \cr
\vdots & \vdots & \vdots & \cdots & \vdots \cr 0_{\BBF } & 0_{\BBF }
& 0_{\BBF } & \cdots & 0_{\BBF } \cr\end{bmatrix}.$$ When $m = n$ we
write ${\bf 0}_n$ as a shortcut for ${\bf 0}_{n\times
n}$.\index{matrix!zero}
\end{df}
\begin{df} \index{matrix!identity}
The $n\times n$ {\em identity matrix} ${\bf I}_n\in\mat{n\times n}{
\BBF }$ is the matrix with $1_{\BBF }$'s on the  main diagonal and
$0_{\BBF }$'s everywhere else,
$${\bf I}_n = \begin{bmatrix}1_{\BBF } & 0_{\BBF } & 0_{\BBF } & \cdots & 0_{\BBF } \cr 0_{\BBF } & 1_{\BBF } & 0_{\BBF } & \cdots & 0_{\BBF } \cr 0_{\BBF } & 0_{\BBF } & 1_{\BBF }
& \cdots & 0_{\BBF } \cr \vdots & \vdots & \vdots & \cdots & \vdots
\cr 0_{\BBF } & 0_{\BBF } & 0_{\BBF } & \cdots & 1_{\BBF }
\cr\end{bmatrix}.$$
\end{df}

\begin{df}[Matrix Addition and Multiplication of a Matrix by a Scalar]\index{matrix!addition}\index{matrix!scalar multiplication}
Let $A = [a_{ij}] \in \mat{m\times n}{ \BBF }$, $B = [b_{ij}] \in
\mat{m\times n}{ \BBF }$ and $\alpha\in \BBF$. The matrix $A +
\alpha B$ is the matrix $C \in \mat{m\times n}{ \BBF }$ with entries
$C = [c_{ij}]$ where $c_{ij} = a_{ij}+\alpha  b_{ij}$.
\end{df}

\begin{exa}
For $A = \begin{bmatrix} 1 & 1 \cr -1 & 1 \cr 0 & 2 \cr
\end{bmatrix}$ and $B = \begin{bmatrix} -1 & 1 \cr 2 & 1 \cr 0 & -1 \cr
\end{bmatrix}$ we have $$A + 2B = \begin{bmatrix} -1 & 3 \cr 3 & 3 \cr 0 & 0. \cr
\end{bmatrix}.   $$
\end{exa}
\begin{thm}Let $(A, B, C)\in (\mat{m\times n}{ \BBF })^3$ and $(\alpha, \beta) \in
\BBF ^2$. Then
\begin{enumerate}
\item[M1] $\mat{m\times n}{ \BBF }$ is close under matrix addition
and scalar multiplication
\begin{equation}\label{m:closure} A + B \in \mat{m\times n}{ \BBF }, \
\ \ \ \alpha A \in \mat{m\times n}{ \BBF }
\end{equation}
\item[M2] Addition of matrices is commutative
\begin{equation}\label{m:commutative} A + B = B + A
\end{equation}
\item[M3] Addition of matrices is associative
\begin{equation}\label{m:associative} A + (B + C) = (A+B)+C
\end{equation}
\item[M4]   There is a matrix ${\bf 0}_{m\times n}$ such that
\begin{equation}\label{m:existence0}A+ {\bf 0}_{m\times n}
\end{equation}
\item[M5] There is a matrix $-A$ such that
\begin{equation}\label{m:existence_additive_inverse} A + (-A) = (-A) + A =
{\bf 0}_{m\times n}
\end{equation}
\item[M6] Distributive law
\begin{equation}\label{m:distributive_law1} \alpha (A + B) =
\alpha A + \alpha B
\end{equation}
\item[M7] Distributive law
\begin{equation}\label{m:distributive_law2} (\alpha + \beta)A =
\alpha A + \beta B
\end{equation}
\item[M8]   \begin{equation}\label{m:1A} 1_{\BBF }A = A
\end{equation}
\item[M9]   \begin{equation}\label{m:abA} \alpha (\beta A) =
(\alpha\beta)A
\end{equation}

\end{enumerate}
\end{thm}
\begin{pf}
The theorem follows at once by reducing each statement to an
entry-wise and appealing to the field axioms. \end{pf}

\section*{\psframebox{Homework}}



\begin{pro}
Write out explicitly the $3\times 3$ matrix $A = [a_{ij}]$ where
$a_{ij} = i^j$.
\begin{answer} $A = \begin{bmatrix} 1 & 1 & 1 \cr
2 & 4 & 8 \cr 3 & 9 & 27 \cr \end{bmatrix}.$  \end{answer}
\end{pro}
\begin{pro}
Write out explicitly the $3\times 3$ matrix $A = [a_{ij}]$ where
$a_{ij} = ij$.
\begin{answer} $A = \begin{bmatrix} 1 & 2 & 3 \cr
2 & 4 & 6 \cr 3 & 6 & 9 \cr \end{bmatrix}.$  \end{answer}
\end{pro}

\begin{pro}
Let $$M = \begin{bmatrix}a & -2a & c \cr  0 & -a & b \cr a + b & 0 &
-1 \cr\end{bmatrix},\ \ \ \ N = \begin{bmatrix}1 & 2a & c \cr  a & b
- a & -b \cr a - b & 0 & -1 \cr \end{bmatrix}$$ be square matrices
with entries over $\BBR$. Find $M+N$ and $2M$.
\begin{answer}
$M + N = \begin{bmatrix}a + 1 & 0 & 2c \cr  a &  b - 2a & 0 \cr 2a
& 0 & -2 \cr\end{bmatrix}, \ \ \ 2M = \begin{bmatrix}2a & -4a & 2c
\cr 0 & -2a & 2b \cr 2a + 2b & 0 & -2 \cr\end{bmatrix}.$
\end{answer}
\end{pro}
\begin{pro}
Determine $x$ and $y$ such that $$\begin{bmatrix} 3 & x & 1 \cr 1
& 2 & 0 \cr
\end{bmatrix} + 2\begin{bmatrix} 2 & 1 & 3 \cr 5 & x & 4 \cr\end{bmatrix} = \begin{bmatrix}
7 & 3 & 7 \cr 11 & y & 8 \cr\end{bmatrix}.$$
\begin{answer} $x = 1$ and $y = 4$.  \end{answer}
\end{pro}
\begin{pro}
Determine $2\times 2$ matrices $A$ and $B$ such that
$$2A - 5B = \begin{bmatrix} 1 & -2 \cr 0 & 1 \cr \end{bmatrix}, \ \ \ -2A + 6B = \begin{bmatrix} 4 & 2 \cr 6 & 0 \cr\end{bmatrix}.   $$
\begin{answer} $A = \begin{bmatrix}13 & -1 \cr 15 & 3 \cr \end{bmatrix}$, $B = \begin{bmatrix}5 & 0 \cr 6 & 1 \cr \end{bmatrix}$ \end{answer}
\end{pro}
\begin{pro}
Let $A=[a_{ij}]\in\mat{n\times n}{\BBR}$. Prove that
$$\min_j\max_ia_{ij}\geq\max_i\min_ja_{ij}.$$
\end{pro}
\begin{pro}
A person goes along the rows of a movie theater and asks the
tallest person of each row to stand up. Then he selects the
shortest of these people, who we will call the {\em shortest
giant}. Another person goes along the rows and asks the shortest
person to stand up and from these he selects the tallest, which we
will call the {\em tallest midget}. Who is taller, the tallest
midget or the shortest giant?
\end{pro}

\begin{pro}[{\red\bf Putnam Exam, 1959}]
Choose five elements from the matrix $$\begin{bmatrix}11 & 17 & 25
& 19 & 16 \cr 24 & 10 & 13 & 15 & 3 \cr 12 & 5 & 14 & 2 & 18 \cr
23 & 4 & 1 & 8 & 22 \cr 6 & 20 & 7 & 21 & 9
\end{bmatrix},$$ no two coming from the same row or column, so that
the minimum of these five elements is as large as possible.
\begin{answer}
The set of border elements is the union of two rows and two columns.
Thus we may choose at most four elements from the border, and at
least one from the central $3\times 3$ matrix. The largest element
of this $3\times 3$ matrix is $15$, so any allowable choice of does
not exceed $15$. The choice $25$, $15$, $18$,l $23$, $20$ shews that
the largest minimum is indeed $15$.\end{answer}
\end{pro}


\section{Matrix Multiplication}
\begin{df}\index{matrix!multiplication of}
Let $A = [a_{ij}] \in \mat{m\times n}{ \BBF }$ and $B = [b_{ij}] \in
\mat{n\times p}{ \BBF }$. Then the matrix product $AB$ is defined as
the matrix $C = [c_{ij}]\in \mat{m\times p}{ \BBF }$ with entries
$c_{ij} = \sum _{l=1} ^{n} a_{il} b_{lj}$:
$$\begin{bmatrix}a_{11} & a_{12} & \cdots & a_{1n} \cr a_{21} & a_{22} & \cdots & a_{2n}
\cr \vdots & \vdots & \cdots & \vdots  \cr  {\red a_{i1}} & {\red
a_{i2}} & {\red \cdots} & {\red a_{in}}\cr  \vdots & \vdots &
\cdots & \vdots \cr a_{m1} & a_{m2} & \cdots & a_{mn} \cr
\end{bmatrix}\begin{bmatrix}b_{11} &\cdots & {\blue b_{1j}} & \cdots & b_{1p}
\cr b_{21} &\cdots & {\blue b_{2j}} & \cdots & b_{2p} \cr \vdots
&\cdots & {\blue \vdots} & \cdots & \vdots \cr b_{n1} &\cdots &
{\blue b_{nj}} & \cdots & b_{np} \cr
\end{bmatrix} = \begin{bmatrix} c_{11} & \cdots & c_{1p} \cr c_{21} & \cdots & c_{2p} \cr
 \vdots & \cdots & \vdots \cr
 \cdots & {\green c_{ij}} & \cdots \cr \vdots & \cdots & \vdots \cr c_{m1} & \cdots & c_{mp} \cr\end{bmatrix}.$$
\end{df}
\begin{rem}
Observe that we use juxtaposition rather than a special symbol to
denote matrix multiplication. This will simplify notation.In order
to obtain the $ij$-th entry of the matrix $AB$ we multiply
elementwise the $i$-th row of $A$ by the $j$-th column of $B$.
Observe that $AB$ is a $m\times p$ matrix.
\end{rem}


\begin{exa}
Let $M = \begin{bmatrix}1 & 2  \cr 3 & 4\cr\end{bmatrix}$ and $N =
\begin{bmatrix}5 & 6  \cr 7& 8 \cr \end{bmatrix}$ be matrices
over $\BBR$. Then
$$MN =\begin{bmatrix}1 & 2  \cr 3 & 4\cr\end{bmatrix}\begin{bmatrix}5 & 6  \cr 7& 8 \cr \end{bmatrix}
= \begin{bmatrix} 1\cdot 5 + 2\cdot 7 &  1\cdot 6 + 2\cdot 8 \cr
3\cdot 5 + 4\cdot 7 & 3\cdot 6 + 4 \cdot 8 \cr\end{bmatrix} =
\begin{bmatrix}19 & 22  \cr 43 & 50 \cr \end{bmatrix},$$ and
$$NM =
\begin{bmatrix}5 & 6  \cr 7& 8 \cr
\end{bmatrix}\begin{bmatrix}1 & 2  \cr 3 & 4\cr\end{bmatrix}=
\begin{bmatrix}5\cdot 1 + 6\cdot 3 &  5\cdot 2 + 6\cdot 4 \cr 7\cdot 1 + 8\cdot 3 & 7\cdot 2 + 8\cdot 4\cr
\end{bmatrix} = \begin{bmatrix}23 & 34  \cr 31& 46 \cr \end{bmatrix}.$$Hence, in particular, matrix
multiplication is not necessarily commutative.

\end{exa}

\begin{exa}
We have $$\begin{bmatrix} {1} & {1} & {1} \cr
 {1}  & {1} & 1 \cr
{1}  & {1} & {1} \cr
\end{bmatrix}
\begin{bmatrix}
{2}  & -{1} & -{1} \\ \vspace{1mm} -{1} & {2} & -{1}
\\ \vspace{1mm}
-{1}  & -{1} & {2} \\
\end{bmatrix}  = \begin{bmatrix}
0  & 0 & 0 \cr \vspace{1mm} 0  & 0 & 0 \cr \vspace{1mm} 0  & 0 & 0
\cr
\end{bmatrix},$$ over $\BBR$. Observe then that  the product of two non-zero matrices may be
the zero matrix.
\end{exa}
\begin{exa}
Consider the matrix
$$A = \begin{bmatrix} \overline{2} & \overline{1} & \overline{3} \cr
 \overline{0} & \overline{1} & \overline{1} \cr
  \overline{4} & \overline{4} & \overline{0} \cr
  \end{bmatrix}$$with entries over $\BBZ_5$. Then
 $$\begin{array}{lll}A^2 & = &
\begin{bmatrix} \overline{2} & \overline{1} & \overline{3} \cr
 \overline{0} & \overline{1} & \overline{1} \cr
  \overline{4} & \overline{4} & \overline{0} \cr
  \end{bmatrix}\begin{bmatrix} \overline{2} & \overline{1} & \overline{3} \cr
 \overline{0} & \overline{1} & \overline{1} \cr
  \overline{4} & \overline{4} & \overline{0} \cr
  \end{bmatrix} \vspace{2mm}\\
  & = & \begin{bmatrix} \overline{1} & \overline{0} & \overline{2} \cr
 \overline{4} & \overline{0} & \overline{1} \cr
  \overline{3} & \overline{3} & \overline{1} \cr
  \end{bmatrix}.
 \end{array} $$
\end{exa}

\begin{rem}
Even though matrix multiplication is not necessarily commutative,
it is associative.
\end{rem}
\begin{thm}
If $(A, B, C) \in \mat{m\times n }{ \BBF }\times\mat{n\times r}{
\BBF }\times\mat{r \times s}{ \BBF }$ we have $$(AB)C = A(BC),$$
i.e., matrix multiplication is associative.\end{thm}\begin{pf} To
shew this we only need to consider the $ij$-th entry of each side,
appeal to the associativity of the underlying field $\BBF$  and
verify that both sides are indeed equal to
$$\sum _{k = 1} ^{n}\sum _{k' = 1} ^{r} a_{ik}b_{kk'}c_{k'j}.$$


\end{pf}
\begin{rem}
By virtue of associativity, a square matrix commutes with its
powers, that is, if $A\in\mat{n\times n}{ \BBF }$, and $(r,
s)\in\BBN^2$, then $(A^r)(A^s) = (A^s)(A^r) = A^{r + s}$.
\end{rem}



\begin{exa}
Let $A\in\mat{3\times 3}{\BBR}$ be given by$$A =
\begin{bmatrix} 1 & 1 & 1 \cr 1 & 1 & 1 \cr 1 & 1 & 1 \cr
\end{bmatrix}.$$Demonstrate, using induction, that  $A^n = 3^{n - 1}A$ for $n \in \BBN , n \geq
1$.\end{exa} \begin{solu} The assertion is trivial for $n = 1$.
Assume its truth for $n -1$, that is, assume $A^{n - 1} = 3^{n -
2}A$. Observe that
$$A^2 = \begin{bmatrix} 1 & 1 & 1 \cr 1 & 1 & 1 \cr 1 & 1 & 1 \cr
\end{bmatrix}\begin{bmatrix} 1 & 1 & 1 \cr 1 & 1 & 1 \cr 1 & 1 & 1 \cr
\end{bmatrix} = \begin{bmatrix} 3 & 3 & 3 \cr 3 & 3 & 3 \cr 3 & 3 & 3 \cr
\end{bmatrix} = 3A.$$Now
$$A^n = AA^{n - 1} = A(3^{n - 2}A) = 3^{n - 2}A^2 = 3^{n - 2}3A = 3^{n - 1}A, $$
and so the assertion is proved by induction.
\end{solu}





\begin{thm}
Let $A\in\mat{n\times n}{ \BBF }$. Then there is a unique identity
matrix. That is, if $E\in\mat{n\times n}{ \BBF }$  is such that $AE
= EA = A$, then $E = {\bf I}_n$.
\end{thm}
\begin{pf}
It is clear that for any $A\in\mat{n\times n}{ \BBF }$, $A{\bf I}_n
= {\bf I}_nA = A$. Now because $E$ is an identity, $E{\bf I}_n =
{\bf I}_n$. Because ${\bf I}_n$ is an identity, $E{\bf I}_n =  E$.
Whence
$${\bf
I}_n = E{\bf I}_n =  E,$$demonstrating uniqueness.
\end{pf}

\begin{exa}Let $A = [a_{ij}]\in \mat{n\times n}{\BBR}$ be such that $a_{ij} = 0$
for $i > j$ and $a_{ij} = 1$ if $i \leq j$. Find $A^2$. \end{exa}
\begin{solu} Let $A^2 = B = [b_{ij}]$. Then
$$b_{ij} = \sum_{k = 1} ^n a_{ik}a_{kj}.$$Observe that the $i$-th
row of $A$ has $i - 1$ $0$'s followed by $n - i + 1$ $1$'s, and
the $j$-th column of $A$ has $j$ $1$'s followed by $n - j$ 0's.
Therefore if $i - 1 > j$, then $b_{ij} = 0$. If $i \leq j + 1$,
then $$b_{ij} = \sum_{k = i} ^{j} a_{ik}a_{kj} = j - i + 1.$$ This
means that $$A^2 = \begin{bmatrix} 1 & 2 & 3 & 4 & \cdots & n - 1
& n \cr 0 & 1 & 2 & 3 & \cdots & n - 2 & n - 1 \cr 0 & 0 & 1 & 2 &
\cdots & n - 3 & n - 2 \cr \vdots & \vdots & \vdots & \vdots &
\cdots &\vdots & \vdots \cr 0 & 0 & 0 & 0 & \cdots & 1 & 2 \cr 0 &
0 & 0 & 0 & \cdots & 0 & 1\cr
\end{bmatrix}.$$
\end{solu}




\section*{\psframebox{Homework}}




\begin{pro}
Determine the product $$\begin{bmatrix} 1 & -1 \cr 1 &
1\cr\end{bmatrix}\begin{bmatrix}-2 & 1 \cr 0 & -1 \cr
\end{bmatrix}\begin{bmatrix} 1 & 1 \cr 1 & 2 \cr
\end{bmatrix}$$.
\begin{answer}$\begin{bmatrix} 2 & 2 \cr 0 & -2 \cr
\end{bmatrix}$   \end{answer}
\end{pro}
\begin{pro}
Let  $A = \begin{bmatrix}1 & 0 & 0 \cr 1 & 1 & 0 \cr 1 & 1 & 1 \cr
\end{bmatrix},  \ \ B = \begin{bmatrix}
a & b & c \cr c & a & b \cr b & c & a \cr \end{bmatrix}$. Find
$AB$ and $BA$.
\begin{answer}  $$AB = \begin{bmatrix} a & b & c \cr c+a & a + b & b + c
\cr a + b + c & a + b + c & a + b + c \cr
 \end{bmatrix}, \ BA = \begin{bmatrix} a + b + c & b + c & c
 \cr a + b + c & a + b & b \cr a + b + c & c + a & a \cr
\end{bmatrix}
 $$ \end{answer}
\end{pro}
\begin{pro}
Find $a+b+c$ if $\begin{bmatrix}1 & 2 & 3 \cr 2 & 3 & 1 \cr 3 & 1 &
2 \cr \end{bmatrix}
\begin{bmatrix}1 & 1 & 1 \cr 2 & 2 & 2 \cr 3 & 3 & 3 \cr \end{bmatrix} =
\begin{bmatrix}a & a & a \cr b & b & b \cr c & c & c \cr \end{bmatrix}$.
\begin{answer}
$\begin{bmatrix}1 & 2 & 3 \cr 2 & 3 & 1 \cr 3 & 1 & 2 \cr
\end{bmatrix}
\begin{bmatrix}1 & 1 & 1 \cr 2 & 2 & 2 \cr 3 & 3 & 3 \cr \end{bmatrix} =
\begin{bmatrix}14 & 14 & 14 \cr 11 & 11 & 11 \cr 11 & 11 & 11 \cr \end{bmatrix}$, whence $a+b+c=36$.
\end{answer}
\end{pro}
\begin{pro}
Let $N = \begin{bmatrix}0 & -2  & -3 & -4 \cr 0 & 0 & -2 & -3  \cr
0 & 0 & 0 & -2 \cr 0 & 0 & 0 & 0 \cr
\end{bmatrix}$. Find $N^{2008}$.
\begin{answer}
An easy computation leads to  $N^2= \begin{bmatrix}0 & 0  & 4 & 12
\cr 0 & 0 & 0& 4  \cr  0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 \cr
\end{bmatrix}$,  $N^3= \begin{bmatrix}0 & 0  & 0 & -8 \cr 0 & 0 & 0&
0  \cr  0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 \cr
\end{bmatrix}$ and $ N^4=\begin{bmatrix}0 & 0  & 0 & 0 \cr 0 & 0 & 0&
0  \cr  0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 \cr
\end{bmatrix}$. Hence any power---from the fourth on---is the zero matrix.
\end{answer}
\end{pro}
\begin{pro}
Let
$$ A = \begin{bmatrix}
\overline{2} & \overline{3} & \overline{4} & \overline{1} \cr
 \overline{1} & \overline{2} & \overline{3} & \overline{4} \cr
  \overline{4} & \overline{1} & \overline{2} & \overline{3} \cr
   \overline{3} & \overline{4} & \overline{1} & \overline{2} \cr
\end{bmatrix}, \ \ \ B = \begin{bmatrix}
\overline{1} & \overline{1} & \overline{1} & \overline{1} \cr
\overline{1} & \overline{1} & \overline{1} & \overline{1} \cr
\overline{1} & \overline{1} & \overline{1} & \overline{1} \cr
\overline{1} & \overline{1} & \overline{1} & \overline{1} \cr
\end{bmatrix}$$ be matrices in $\mat{4\times 4}{\BBZ_5}$ . Find the
products $AB$ and $BA$.
\begin{answer}
$AB = {\bf 0}_4$ and $ BA = \begin{bmatrix} \overline{0} &
\overline{2} & \overline{0} & \overline{3} \cr \overline{0} &
\overline{2} & \overline{0} & \overline{3} \cr \overline{0} &
\overline{2} & \overline{0} & \overline{3} \cr \overline{0} &
\overline{2} & \overline{0} & \overline{3} \cr

\end{bmatrix}.$

\end{answer}
\end{pro}
\begin{pro}
Let $x$ be a real number, and put $$m(x) = \begin{bmatrix}1 & 0 &
x\cr -x & 1 & -\dfrac{x^2}{2} \cr 0 &0 &1\cr \end{bmatrix}.  $$If
$a, b$ are real numbers, prove that
\begin{enumerate}
\item $m(a)m(b)=m(a+b)$.
\item $m(a)m(-a)={\bf I}_3$, the $3\times 3$ identity matrix.
\end{enumerate}
\begin{answer}
For the first part, observe that $$\begin{array}{lll}m(a)m(b) & = &
\begin{bmatrix}1 & 0 & a\cr -a & 1 & -\dfrac{a^2}{2} \cr 0 &0 &1\cr
\end{bmatrix}\begin{bmatrix}1 & 0 & b\cr
-b & 1 & -\dfrac{b^2}{2} \cr 0 &0 &1\cr \end{bmatrix}  \\
& = & \begin{bmatrix}1 & 0 & a+b\cr -a-b & 1 &
-\dfrac{a^2}{2}-\dfrac{b^2}{2}+ab \cr 0 &0
&1\cr \end{bmatrix}\\
 \\
& = & \begin{bmatrix}1 & 0 & a+b\cr -(a+b) & 1 & -\dfrac{(a+b)^2}{2}
\cr 0 &0
&1\cr \end{bmatrix}\\

& = & m(a+b)\end{array}$$ For the second part, observe that using
the preceding part of the problem, $$m(a)m(-a)= m(a-a) = m(0)
=\begin{bmatrix}1 & 0 & 0\cr -0 & 1 & -\dfrac{0^2}{2} \cr 0 &0 &1\cr
\end{bmatrix} ={\bf I}_3,$$giving the result.
\end{answer}
\end{pro}
\begin{pro}
A square matrix $X$ is called {\em idempotent} if $X^2=X$. Prove
that if $AB=A$ and $BA=B$ then $A$ and $B$ are idempotent. \\
\begin{answer}
Observe that
$$A^2 = (AB)(AB) = A(BA)B = A(B)B = (AB)B = AB =A. $$
Similarly,
$$B^2 = (BA)(BA) = B(AB)A = B(A)A = (BA)A = BA =B. $$
\end{answer}
\end{pro}
\begin{pro}
Let $$ A = \begin{bmatrix} 0 & \dfrac{1}{2} & 0 \cr \dfrac{1}{2}  &
0 & 0 \cr 0 & 0 & \dfrac{1}{2} \cr \end{bmatrix}. $$ Calculate the
value of the infinite series
$${\bf I}_3 + A + A^2 +A^3 + \cdots .  $$
\begin{answer}
For this problem you need to recall that if $|r|<1$, then
$$a+ar + ar^2+ar^3+\cdots + \cdots =\dfrac{a}{1-r}.  $$
This gives
$$ 1 + \tfrac{1}{4} +  \tfrac{1}{4^2}+  \tfrac{1}{4^3}+\cdots    = \dfrac{1}{1-\tfrac{1}{4}} = \dfrac{4}{3}, $$
$$\tfrac{1}{2} +  \tfrac{1}{2^3}+ \tfrac{1}{2^5}+\cdots  = \dfrac{\tfrac{1}{2}}{1-\tfrac{1}{4}} = \dfrac{2}{3},  $$
and
$$1 + \tfrac{1}{2} +  \tfrac{1}{2^2}+  \tfrac{1}{2^3}+\cdots  = \dfrac{1}{1-\tfrac{1}{2}}  = 2.  $$
 By looking at a few small cases, it is easy to establish by
induction that for $n\geq 1$
$$ A^{2n-1}= \begin{bmatrix} 0 & \dfrac{1}{2^{2n-1}} & 0 \cr \dfrac{1}{2^{2n-1}}  & 0 & 0 \cr 0 & 0 & \dfrac{1}{2^{2n-1}} \cr \end{bmatrix},
\qquad  A^{2n} = \begin{bmatrix}\dfrac{1}{2^{2n}} & 0  & 0 \cr 0 &
\dfrac{1}{2^{2n}} & 0 \cr 0 & 0 & \dfrac{1}{2^{2n}} \cr
\end{bmatrix}. $$
This gives
$${\bf I}_3 + A + A^2 +A^3 + \cdots = \begin{bmatrix}1 + \tfrac{1}{4} +  \tfrac{1}{4^2}+  \tfrac{1}{4^3}+\cdots   &  \tfrac{1}{2} +  \tfrac{1}{2^3}+ \tfrac{1}{2^5}+\cdots     & 0 \cr
\tfrac{1}{2} +  \tfrac{1}{2^3}+ \tfrac{1}{2^5}+\cdots   & 1 +
\tfrac{1}{4} +  \tfrac{1}{4^2}+  \tfrac{1}{4^3}+\cdots  & 0 \cr 0 &
0 & 1 + \tfrac{1}{2} +  \tfrac{1}{2^2}+  \tfrac{1}{2^3}+\cdots  \cr
\end{bmatrix} = \begin{bmatrix} \tfrac{4}{3} & \tfrac{2}{3} & 0 \cr \tfrac{2}{3} & \tfrac{4}{3} & 0 \cr
0 & 0 & 2 \end{bmatrix}.  $$
\end{answer}


\end{pro}

\begin{pro}
Solve the equation $$\begin{bmatrix} -4 & x \cr -x & 4
\end{bmatrix}^2 = \begin{bmatrix} -1 & 0 \cr 0 & -1
\end{bmatrix}$$ over $\BBR$. \begin{answer} Observe that $$
\begin{bmatrix} -4 & x \cr -x & 4
\end{bmatrix}^2 =\begin{bmatrix} -4 & x \cr -x & 4
\end{bmatrix}\begin{bmatrix} -4 & x \cr -x & 4
\end{bmatrix} =  \begin{bmatrix} 16 - x^2 & 0 \cr 0 & 16 - x^2 \end{bmatrix}, $$ and so we must have
$16 - x^2 = -1$ or $x = \pm\sqrt{17}$.
\end{answer}
\end{pro}
\begin{pro}
Prove or disprove! If  $(A,B)\in(\mat{n\times n}{ \BBF })^2$ are
such that $AB = {\bf 0}_n$, then also $BA = {\bf 0}_n.$
\begin{answer}
Disprove! Take $\dis{A = \left[\begin{array}{ll}1 & 0 \\ 0 & 0
\end{array}\right]}$ and $\dis{B = \left[\begin{array}{ll}0 & 1 \\ 0 & 0
\end{array}\right]}$. Then $AB = B$, but
$BA = {\bf 0}_2$.
\end{answer}
\end{pro}
\begin{pro}
Prove or disprove! For all matrices $(A,B)\in(\mat{n\times n}{ \BBF
})^2$,
$$(A+B)(A-B) = A^2 - B^2.$$
\begin{answer}
Disprove! Take for example $A = \begin{bmatrix} 0 & 0 \cr 1 & 1
\cr\end{bmatrix}$ and $B = \begin{bmatrix} 1 & 0 \cr 1 & 0
\cr\end{bmatrix}$. Then $$A^2 - B^2 = \begin{bmatrix} -1 & 0 \cr 0 &
1 \cr\end{bmatrix} \neq  \begin{bmatrix} -1 & 0 \cr -2 & 1
\cr\end{bmatrix}= (A+B)(A-B). $$
\end{answer}
\end{pro}
\begin{pro}
Consider the matrix $A=\begin{bmatrix}1 & 2 \cr 3 & x \end{bmatrix}$, where
$x$ is a real number. Find the value of $x$ such that there are non-zero $2\times 2 $ matrices $B$ such that $AB = \begin{bmatrix}0 & 0 \cr 0 & 0 \end{bmatrix}$.
\begin{answer}
$x=6$.
\end{answer}
\end{pro}

\begin{pro}
Prove, using mathematical induction, that $\begin{bmatrix}1 & 1
\cr 0 & 1 \cr
\end{bmatrix}^n=\begin{bmatrix}1 & n \cr 0 & 1 \cr
\end{bmatrix}$.
\end{pro}
\begin{pro}
Let $M = \begin{bmatrix} 1 & -1  \cr -1 & 1 \cr\end{bmatrix}$.
Find $M^6$.
\begin{answer} $ \begin{bmatrix} 32 & -32  \cr -32 & 32 \cr\end{bmatrix}$.\end{answer}
\end{pro}
\begin{pro}
Let $A = \begin{bmatrix} 0 & 3 \cr 2 & 0 \cr \end{bmatrix}$. Find,
with proof, $A^{2003}$.
\begin{answer}
$A^{2003} = \begin{bmatrix} 0 & 2^{1001}3^{1002} \cr
2^{1002}3^{1001} & 0 \cr
\end{bmatrix}$.
\end{answer}
\end{pro}
\begin{pro}
Let $(A, B, C)\in \mat{l\times m}{ \BBF }\times \mat{m\times n}{
\BBF }\times \mat{m\times n}{ \BBF }$ and $ \alpha\in \BBF$. Prove
that
$$A(B+C) = AB + AC,$$ $$(A+B)C = AC + BC,$$ $$\alpha (AB) = (\alpha A)B
= A(\alpha B).$$
\end{pro}

\begin{pro}
  Let $A \in \mat{2\times 2}{\BBR}$ be given by $$A =
\begin{bmatrix} \cos \alpha & -\sin \alpha \cr \sin \alpha & \cos \alpha\end{bmatrix}.$$Demonstrate, using induction,
that for $n \in \BBN , n \geq 1$.
$$A^n = \begin{bmatrix} \cos n\alpha & -\sin n\alpha \cr \sin n\alpha & \cos n\alpha \cr \end{bmatrix}.$$\begin{answer}The assertion is clearly true for $n = 1.$
Assume that is it true for $n$, that is, assume
$$A^{n} = \begin{bmatrix} \cos (n)\alpha & -\sin (n)\alpha \cr \sin (n)\alpha & \cos (n)\alpha \cr \end{bmatrix}.$$
Then $$\begin{array}{lll} A^{n+1} & = & AA^{n} \\
& = & \begin{bmatrix} \cos \alpha & -\sin \alpha \cr \sin \alpha &
\cos \alpha \cr \end{bmatrix}\begin{bmatrix} \cos (n)\alpha &
-\sin (n)\alpha \cr \sin (n)\alpha & \cos (n)\alpha \cr
\end{bmatrix}\vspace{2mm} \\
& = & \begin{bmatrix} \cos\alpha\cos (n)\alpha  - \sin\alpha\sin
(n)\alpha& -\cos\alpha\sin (n)\alpha  - \sin\alpha\cos
(n)\alpha\cr \sin\alpha\cos (n)\alpha  + \cos\alpha\sin (n)\alpha&
-\sin\alpha\sin (n)\alpha + \cos\alpha\cos (n)\alpha \cr
\end{bmatrix} \vspace{2mm} \\
& = & \begin{bmatrix} \cos (n+1)\alpha & -\sin (n+1)\alpha \cr
\sin (n+1)\alpha & \cos (n+1)\alpha \cr \end{bmatrix},
\end{array}$$and the result follows by induction.
\end{answer}
\end{pro}

\begin{pro}
A matrix $A = [a_{ij}]\in \mat{n\times n}{\BBR}$ is said to be {\em
checkered}  if $a_{ij} = 0$ when $(j - i)$ is odd. Prove that the
sum and the product of two checkered matrices is checkered.
\begin{answer}Let $A = [a_{ij}], B = [b_{ij}]$ be checkered $n\times n$
matrices. Then $A + B = (a_{ij} + b_{ij})$. If $j - i$ is odd,
then $a_{ij} + b_{ij} = 0 + 0 = 0$, which shows that $A + B$ is
checkered. Furthermore, let $AB = [c_{ij}]$ with $c_{ij} = \sum
_{k = 1} ^n a_{ik}b_{kj}$. If $i$ is even and $j$ odd, then
$a_{ik} = 0$ for odd $k$ and $b_{kj} = 0$ for even $k$. Thus
$c_{ij} = 0$ for $i$ even and $j$ odd. Similarly, $c_{ij} = 0$ for
odd $i$ and even $j$. This proves that $AB$ is checkered.
\end{answer}
\end{pro}

\begin{pro}
Let $A\in\mat{3\times 3}{\BBR}$, $$A = \begin{bmatrix} 1 & 1 & 1 \cr
0 & 1 & 1 \cr 0 & 0 & 1 \cr
\end{bmatrix}. $$Prove that
$$A^n = \begin{bmatrix} 1 & n & \frac{n(n + 1)}{2} \cr 0 & 1 & n \cr
0 & 0 & 1 \cr
\end{bmatrix}.$$
\begin{answer} Put $$ J =    \begin{bmatrix} 0 & 1 & 1 \cr 0 & 0 & 1 \cr 0 & 0 & 0 \cr
\end{bmatrix}. $$We first notice that
$$J^2 = \begin{bmatrix} 0 & 0 & 1 \cr 0 &  0 & 0 \cr 0 &  0 &  0 \cr\end{bmatrix}, \ \ J^3 = {\bf 0}_3.$$
This means that the sum in the binomial expansion $$A^n = ({\bf
I}_3 + J)^n = \sum _{k = 0} ^n \binom{n}{k}{\bf I}^{n - k}J^k$$ is
a sum of zero matrices for $k \geq 3.$ We thus have
$$\begin{array}{lll}A^n & = & {\bf I}^n _3 + n{\bf I}^{n - 1} _3J + \binom{n}{2}{\bf I}^{n - 2}_3J^2 \vspace{2mm}\\
&  = &
\begin{bmatrix} 1 & 0 & 0 \cr 0 &  1 & 0 \cr 0 &  0 &  1 \cr\end{bmatrix} +
\begin{bmatrix} 0 & n & n \cr 0 &  0 & n \cr 0 &  0 &  0 \cr\end{bmatrix}
+ \begin{bmatrix} 0 & 0 & \binom{n}{2} \cr 0 &  0 & 0 \cr 0 &  0 &
0 \cr\end{bmatrix} \vspace{2mm}\\
&  = &  \begin{bmatrix} 1 & n & \frac{n(n + 1)}{2} \cr 0 & 1 & n
\cr 0 & 0 & 1 \cr
\end{bmatrix}, \end{array}$$giving the result, since $\binom{n}{2} = \frac{n(n - 1)}{2}$ and $n + \binom{n}{2} = \frac{n(n + 1)}{2}$.
\end{answer}
\end{pro}


\begin{pro}
Let $(A,B)\in(\mat{n\times n}{ \BBF })^2$ and $k$ be a positive
integer such that $A^k = {\bf 0}_n$. If   $AB = B$ prove that $B =
{\bf 0}_n$.
\begin{answer}
Argue inductively, $$\begin{array}{l} A^2B = A(AB) = AB = B \\
A^3B = A(A^2B) = A(AB) = AB = B \\ \vdots \\ A^mB = AB = B.
\end{array}$$Hence $B = A^mB = {\bf 0}_nB = {\bf 0}_n$.
\end{answer}
\end{pro}

\begin{pro} \label{pro:cayley_hamilton_2x2}
Let $A = \begin{bmatrix}a & b \cr c & d \cr \end{bmatrix}$.
Demonstrate that $$A^2 - (a + d)A + (ad - bc){\bf I}_2= {\bf 0}_2
$$.
\end{pro}

\begin{pro}
Let $A\in\mat{2}{ \BBF }$ and let $k\in\BBZ, k >2$. Prove that $A^k
= {\bf 0}_2 $ if and only if $A^2 = {\bf 0}_2$.
\begin{answer}
Put $A =
\begin{bmatrix}a & b \cr c & d \cr \end{bmatrix}.$ Using
\ref{pro:cayley_hamilton_2x2}, deduce by iteration that $$A^k = (a
+d)^{k-1}A.
$$
\end{answer}
\end{pro}
\begin{pro}Find all matrices $A\in\mat{2\times 2}{\BBR}$ such that $A^2 = {\bf 0}_2$
\begin{answer} $\begin{bmatrix}a & b \cr c & -a \end{bmatrix}, \ bc = -a^2$\end{answer}
\end{pro}

\begin{pro}Find all matrices $A\in\mat{2\times 2}{\BBR}$ such that $A^2 = {\bf I}_2$
\begin{answer} $\pm {\bf I}_2, \begin{bmatrix}a & b \cr c & -a \end{bmatrix}, \ \ a^2 = 1 -bc$ \end{answer}
\end{pro}

\begin{pro}
Find a solution $X\in\mat{2\times 2}{\BBR}$ for
$$X^2 - 2X = \left[\begin{array}{rl} -1 & 0 \\ 6 & 3\end{array}\right].$$
\begin{answer}
We complete squares by putting $\dis{Y = \begin{bmatrix} a & b \\
c & d
\end{bmatrix} = X - I}$. Then
$$\begin{bmatrix} a^2 + bc & b(a + d) \\ c(a + d) & bc + d^2\end{bmatrix} = Y^2 = X^2 - 2X + I = (X - I)^2 =
\begin{bmatrix} -1 & 0 \\ 6 & 3\end{bmatrix} + I = \begin{bmatrix} 0 & 0 \\ 6 & 4\end{bmatrix}.$$
This entails $a = 0, b = 0, cd = 6, d^2 = 4.$  Using $X = Y + I$,
we find that there are two solutions,
$$\begin{bmatrix} 1 & 0 \\ 3 & 3\end{bmatrix},\ \  \begin{bmatrix} 1 & 0 \\ -3 &  -1\end{bmatrix}.$$
\end{answer}
\end{pro}
\begin{pro}
Find, with proof, a $4\times 4$ {\bf non-zero} matrix $A$ such that
$$A\begin{bmatrix}1 & 1 & 1 & 1 \cr 1 & 1 & 1 & 1 \cr 1 & 1 & 1 & 1 \cr 1 & 1 & 1 & 1 \cr \end{bmatrix} = \begin{bmatrix}1 & 1 & 1 & 1 \cr 1 & 1 & 1 & 1 \cr 1 & 1 & 1 & 1 \cr 1 & 1 & 1 & 1 \cr \end{bmatrix}A =
\begin{bmatrix}0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 \cr \end{bmatrix}.
$$
\begin{answer}
The matrix $$A=\begin{bmatrix}1 & -1 & 1 & -1 \cr -1 & 1 & -1 & 1
\cr -1 & 1 & -1 & 1 \cr 1 & -1 & 1 & -1 \cr \end{bmatrix} $$ clearly
satisfies the conditions.
\end{answer}
\end{pro}
\begin{pro}
Let $X$ be a $2\times 2$ matrices with real number entries. Solve
the equation $$X^2 + X = \begin{bmatrix} 1& 1 \cr 1 & 1 \cr
\end{bmatrix}.
$$
\begin{answer}
Put $X = \begin{bmatrix} a & b \cr c & d \cr\end{bmatrix}$. Then
$$X^2 + X = \begin{bmatrix} 1& 1 \cr 1 & 1 \cr
\end{bmatrix} \iff \left\{\begin{array}{l} a^2 + bc + a = 1 \\  ab + bd + b =1\\ ca + dc + c
=1\\
cb + d^2 + d = 1 \end{array}\right. \iff \left\{\begin{array}{l} a^2
+ bc + a = 1 \\  b(a + d + 1) =1\\ c(a + d + 1)
=1\\
(d-a)(a+d+1) = 0 \end{array}\right. \iff \left\{\begin{array}{l} d =
a \neq \dfrac{1}{2}
\\  c=b = \dfrac{1}{2a+1}\\ a^2 + \dfrac{1}{(2a+1)^2 +a =1}\\
 \end{array}\right.
$$The last equation holds
$$\iff 4a^4 + 8a^3 +a^2-3a = 0 \iff a\in\{-\frac{3}{2},-1,0,\frac{1}{2}\}.
$$Thus the set of solutions is
$$ \{\begin{bmatrix} -1& -1 \cr -1 & -1 \cr
\end{bmatrix}, \begin{bmatrix} 0& 1 \cr 1 & 0 \cr
\end{bmatrix}, \begin{bmatrix} 1/2& 1/2 \cr 1/2 & 1/2 \cr
\end{bmatrix}, \begin{bmatrix} -3/2& -1/2 \cr -1/2 & -3/2 \cr
\end{bmatrix}\} $$
\end{answer}
\end{pro}
\begin{pro}
Prove, by means of induction that for the following $n\times n$ we
have
$$\begin{bmatrix} 1 & 1 & 1 & \cdots & 1 \cr 0 & 1 & 1 & \cdots & 1 \cr
0 & 0 & 1 & \cdots & 1 \cr \vdots & \vdots & \vdots & \cdots &
\vdots \cr 0 & 0 & 0 & \cdots & 1 \cr
\end{bmatrix}^3 = \begin{bmatrix}
1 & 3 & 6 & \cdots & \frac{n(n+1)}{2} \cr 0 & 1 & 3 & \cdots &
\frac{(n-1)n}{2} \cr 0 & 0 & 1 & \cdots & \frac{(n-2)(n-1)}{2} \cr
\vdots & \vdots & \cdots & \cdots & \vdots \cr 0 & 0 & 0 & \cdots &
1 \cr\end{bmatrix}.
$$
\end{pro}
\begin{pro}
Let $$A = \begin{bmatrix} 1 & -1 & -1 \cr -1 & 1 & -1 \cr -1 & -1 &
1 \cr\end{bmatrix}.$$Conjecture a formula for $A^n$ and prove it
using induction.
\begin{answer}
Observe that $A = 2{\bf I}_3 - J$, where ${\bf I}_3$  is the
$3\times 3$ identity matrix and $$J = \begin{bmatrix} 1 & 1 & 1 \cr
1 & 1 & 1 \cr 1 & 1 & 1 \cr\end{bmatrix}.$$ Observe that $J^k =
3^{k-1}J$ for integer $k \geq 1$. Using the binomial theorem we have
$$\begin{array}{lll}A^n &= & (2{\bf I}_3 - J)^n \\
&= & \sum _{k=0} ^n \binom{n}{k} (2{\bf I}_3)^{n-k}(-1)^kJ^k \\
& = & 2^n{\bf I}_3+\dfrac{1}{3}J\sum _{k=1} ^n \binom{n}{k}
2^{n-k}(-1)^k3^{k} \\
& = & 2^n{\bf I}_3+ \dfrac{1}{3}J((-1)^n-2^n) \\
 & = & \dfrac{1}{3}\begin{bmatrix} (-1)^n+2^{n+1} & (-1)^n-2^n & (-1)^n-2^n \cr
(-1)^n-2^n& (-1)^n+2^{n+1} & (-1)^n-2^n \cr (-1)^n-2^n & (-1)^n-2^n
& (-1)^n+2^{n+1} \cr\end{bmatrix}.\end{array}$$
\end{answer}
\end{pro}


\section{Trace and Transpose}
\begin{df}
Let $A = [a_{ij}] \in \mat{n\times n}{ \BBF }$. Then the {\em trace}
of $A$, denoted by $\tr{A}$ is the sum of the diagonal elements of
$A$, that is
$$\tr{A} = \sum _{k = 1} ^n a_{kk}.$$ \index{trace}
\end{df}
\begin{thm}
Let $A = [a_{ij}] \in \mat{n\times n}{ \BBF }, B = [b_{ij}]\in
\mat{n\times n}{ \BBF }$. Then
\begin{equation}  \tr{A + B} = \tr{A} + \tr{B},\end{equation}
\begin{equation}  \tr{AB} = \tr{BA}.\end{equation}
\label{tracethm}\end{thm}
\begin{pf}
The first assertion is trivial. To prove the second, observe that
$AB = (\sum _{k = 1} ^n a_{ik}b_{kj})$ and $BA = (\sum _{k = 1} ^n
b_{ik}a_{kj})$. Then
$$\tr{AB} = \sum _{i = 1} ^n \sum _{k = 1} ^n a_{ik}b_{ki} =
 \sum_{k = 1} ^n \sum _{i = 1} ^n b_{ki}a_{ik} = \tr{BA},
 $$whence the theorem follows.
\end{pf}
\begin{exa}
Let $A\in \mat{n\times n}{\BBR}$. Shew that $A$ can be written as
the sum of two matrices whose trace is different from $0$.
\end{exa}
\begin{solu} Write $$A = (A - \alpha {\bf I}_n) + \alpha {\bf I}_n.$$ Now,
$\tr{A - \alpha {\bf I}_n}  = \tr{A} - n\alpha$ and $\tr{\alpha {\bf
I}_n} = n\alpha$. Thus it suffices to take $\alpha \neq
\dfrac{\tr{A}}{n}, \alpha \neq 0$. Since $\BBR$ has infinitely many
elements, we can find such an $\alpha$.

\end{solu}
\begin{exa}
Let $A, B$ be square matrices of the same size and over the same
field of characteristic $0$. Is it possible that $AB - BA = {\bf
I}_n$? Prove or disprove!
\end{exa}
\begin{solu} This is impossible. For if, taking traces on both sides
$$0 = \tr{AB}-\tr{BA} = \tr{AB - BA} = \tr{{\bf I}_n} = n$$ a
contradiction, since $n >0$.
\end{solu}
\begin{df} \index{transpose}
The {\em transpose} of a matrix of a matrix $A = [a_{ij}] \in
\mat{m\times n}{ \BBF }$ is the matrix $A^T = B = [b_{ij}] \in
\mat{n\times m}{ \BBF }$, where $b_{ij} = a_{ji}$.
\index{matrix!transpose}
\end{df}
\begin{exa}
If $$M = \begin{bmatrix}a & b & c \cr  d & e & f \cr g & h & i
\cr\end{bmatrix},$$ with entries in $\BBR$, then
$$ M^T = \begin{bmatrix}a & d & g \cr  b & e & h \cr c & f & i
\cr\end{bmatrix}.$$
\end{exa}
\begin{thm}\label{thm:properties_transpose}
Let $$A = [a_{ij}] \in \mat{m\times n}{ \BBF },\  B = [b_{ij}] \in
\mat{m\times n}{ \BBF },\  C = [c_{ij}] \in \mat{n\times r}{ \BBF
},\ \alpha \in \BBF, u\in\BBN .$$Then
\begin{equation} A^{TT} = A,\end{equation}
\begin{equation} (A + \alpha B)^T =\  A^T + \alpha B^T,\end{equation}
\begin{equation} (AC)^T =\   C^TA^T,\end{equation}
\begin{equation} (A^{u})^T = (A^T)^u.\end{equation}

\end{thm}
\begin{pf}
The first two assertions are obvious, and the fourth follows from
the third by using induction. To prove the third put $A^T =
(\alpha_{ij}), \ \alpha _{ij} = a_{ji}$, $C^T = (\gamma_{ij}), \
\gamma _{ij} = c_{ji}$, $AC = (u_{ij})$ and $C^TA^T = (v_{ij})$.
Then
$$u_{ij} = \sum _{k = 1} ^n a_{ik}c_{kj} = \sum _{k = 1} ^n \alpha _{ki}\gamma _{jk} =
\sum _{k = 1} ^n \gamma _{jk}\alpha _{ki} = v_{ji},$$ whence the
theorem follows.
\end{pf}
\begin{df}\index{matrix!skew-symmetric} \index{matrix!symmetric}
A square matrix $A\in\mat{n\times n}{ \BBF }$ is {\em symmetric} if
$A^T = A.$ A matrix $B\in\mat{n\times n}{ \BBF }$ is {\em
skew-symmetric} if $B^T = -B.$
\end{df}


\begin{exa} Let $A, B$ be square matrices of the same size, with $A$
symmetric and $B$ skew-symmetric. Prove that the matrix $A^2BA^2$ is
skew-symmetric. \end{exa} \begin{solu} We have
$$(A^2BA^2)^T = (A^2)^T (B)^T(A^2)^T = A^2(-B)A^2 = -A^2BA^2.$$
\end{solu}
\begin{thm}
Let $\BBF$ be a field of characteristic different from $2$.  Then
any square matrix $A$ can be written as the sum of a symmetric and a
skew-symmetric matrix.
\end{thm}
\begin{pf} Observe that
$$(A + A^T)^T = A^T + A^{TT} = A^T + A,$$and so
$A + A^T$ is symmetric. Also,
$$(A - A^T)^T = A^T - A^{TT} = -(A - A^T),$$and so $A - A^T$ is
skew-symmetric. We only need to write $A$ as
$$A = (2^{-1})(A + A^T) +  (2^{-1})(A - A^T)$$to prove the
assertion.
\end{pf}
\begin{exa}
Find,  with proof, a square matrix $A$ with entries in $\BBZ _2$
such $A$ is not the sum of a symmetric and and anti-symmetric
matrix. \end{exa}
\begin{solu}In $\BBZ_2$ every symmetric matrix is also
anti-symmetric, since $-x = x$. Thus it is enough to take a
non-symmetric matrix, for example,  take $A =
\begin{bmatrix} \overline{0} & \overline{1} \cr \overline{0} &
\overline{0} \cr
\end{bmatrix}$.
\end{solu}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Write $$ A =
\begin{bmatrix} 1 & 2 & 3 \cr 2 & 3 & 1 \cr 3 & 1 & 2
\cr\end{bmatrix}\in\mat{3\times 3}{\BBR}$$ as the sum of two
$3\times 3$ matrices ${\bf E}_1, {\bf E}_2$, with $\tr{{\bf E}_2} =
10$.
\begin{answer} There are infinitely many solutions. Here is one: $$A =
\begin{bmatrix} 1 & 2 & 3 \cr 2 & 3 & 1 \cr 3 & 1 & 2
\cr\end{bmatrix} =
\begin{bmatrix} -9 & 2 & 3 \cr 2 & 3 & 1 \cr 3 & 1 & 2
\cr\end{bmatrix} +  \begin{bmatrix} 10 & 0 & 0 \cr 0 & 0 & 0 \cr 0
& 0 & 0 \cr
\end{bmatrix}.$$\end{answer}
\end{pro}
\begin{pro}
Give an example of two matrices $A\in\mat{2\times 2}{\BBR}$ and
$B\in \mat{2\times
 2}{\BBR}$ that {\em simultaneously} satisfy the following
 properties:
 \begin{enumerate}
\item $A\neq \begin{bmatrix}0 & 0 \cr 0 & 0 \cr  \end{bmatrix}$ and $B\neq \begin{bmatrix}0 & 0 \cr 0 & 0 \cr
\end{bmatrix}$.
\item $AB=\begin{bmatrix}0 & 0 \cr 0 & 0 \cr  \end{bmatrix}$ and $BA=\begin{bmatrix}0 & 0 \cr 0 & 0 \cr
\end{bmatrix}$.
\item $\tr{A}=\tr{B}=2$.
\item $A=A^T$ and $B=B^T$.
 \end{enumerate}
\begin{answer}
There are infinitely many examples. One could take
$A=\begin{bmatrix} 1 & 1 \cr 1 & 1
\end{bmatrix}$ and $B=\begin{bmatrix} 1 & -1 \cr -1 & 1
\end{bmatrix}$. Another set is $A=\begin{bmatrix} 2 & 0 \cr 0 & 0
\end{bmatrix}$ and $B=\begin{bmatrix} 0 & 0 \cr 0 & 2
\end{bmatrix}$.
\end{answer}
\end{pro}

\begin{pro}

Shew that there are no matrices $(A, B, C, D)\in (\mat{n\times
n}{\BBR})^4$ such that
$$AC + DB = {\bf I}_n,$$
$$CA + BD = {\bf 0}_n.$$
\begin{answer}If such matrices existed, then  by the first equation
$$\tr{AC} + \tr{DB} = n.$$ By the second equation and by Theorem
\ref{tracethm}, $$0 = \tr{CA} + \tr{BD} = \tr{AC} + \tr{DB} = n,$$
a contradiction, since $n \geq 1.$
\end{answer}
\end{pro}
\begin{pro}
Let $(A, B) \in (\mat{2\times 2}{\BBR})^2$ be symmetric matrices.
Must their product $AB$ be symmetric? Prove or disprove!
\begin{answer}Disprove! This is not generally true. Take $A =
\dis{\begin{bmatrix} 1 & 1 \cr 1 & 2
\end{bmatrix}}$ and $B =
\dis{\begin{bmatrix} 3 & 0 \cr 0 & 1
\end{bmatrix}}$. Clearly $A^T = A$ and $B^T = B$. We have
$$AB  = \begin{bmatrix} 3 & 1 \cr 3 & 2\cr\end{bmatrix}$$but
$$(AB)^T  = \begin{bmatrix} 3 & 3 \cr 1 & 2\cr\end{bmatrix}.$$
\end{answer}
\end{pro}
\begin{pro}
Given square matrices $(A, B) \in (\mat{7\times 7}{\BBR})^2$ such
that $\tr{A^2} = \tr{B^2} = 1$, and
$$ (A - B)^2 = 3{\bf I}_7, $$find $\tr{BA}$.
\end{pro}
\begin{pro}
 Consider the matrix $A = \begin{bmatrix} a & b
\cr c & d \cr\end{bmatrix} \in \mat{2\times 2}{\BBR}$. Find
necessary and sufficient conditions on $a, b, c, d$ so that
$\tr{A^2} = (\tr{A})^2$. \begin{answer} We have $$\tr{A^2} =
\tr{\begin{bmatrix} a & b \cr c & d \cr\end{bmatrix}\begin{bmatrix}
a & b \cr c & d \cr\end{bmatrix}} = \tr{\begin{bmatrix} a^2 + bc &
ab + bd \cr ca + cd & d^2 + cb \cr\end{bmatrix}} = a^2 + d^2 + 2bc
$$and $$\left(\tr{\begin{bmatrix} a
& b \cr c & d \cr\end{bmatrix}}\right)^2 = (a+d)^2.  $$ Thus $$
\tr{A^2} = (\tr{A})^2 \iff a^2 + d^2 + 2bc = (a + d)^2 \iff bc =
ad,
$$is the condition sought.
\end{answer}
\end{pro}
\begin{pro}
Given a square matrix $A \in \mat{4\times 4}{\BBR}$ such that
$\tr{A^2} = -4$, and $$ (A - {\bf I}_4)^2 = 3{\bf I}_4, $$find
$\tr{A}$.
\begin{answer}
$$\begin{array}{lll}\tr{(A - {\bf I}_4)^2} & = &  \tr{A^2 - 2A + {\bf I}_4} \\
& = &  \tr{A^2} - 2\tr{A} + \tr{{\bf I}_4} \\
&  = &  -4 - 2\tr{A} + 4 \\
& = &  -2\tr{A}, \end{array}$$ and $\tr{3{\bf I}_4} = 12. $ Hence
$-2\tr{A} = 12$ or $\tr{A} = -6$.
\end{answer}
\end{pro}
\begin{pro}
Prove or disprove! If $A, B$ are square matrices of the same size,
then it is always true that $\tr{AB} = \tr{A}\tr{B}$.
\begin{answer} Disprove! Take $A = B = {\bf I}_n$ and $n>1$. Then $\tr{AB} = n< n^2 = \tr{A}\tr{B}$.  \end{answer}
\end{pro}

\begin{pro}
Prove or disprove! If $(A, B, C)\in(\mat{3\times 3}{ \BBF })^3$ then
$\tr{ABC} = \tr{BAC}$. \begin{answer}Disprove! Take $A
=\begin{bmatrix}1 & 0 \cr 0 & 0 \cr
\end{bmatrix}$, $B =\begin{bmatrix}0 & 1
\cr 0 & 0 \cr
\end{bmatrix}$, $C =\begin{bmatrix}0 & 0
\cr 1 & 0 \cr
\end{bmatrix}$. Then $\tr{ABC} = 1$ but $\tr{BAC} = 0$.
\end{answer}
\end{pro}

\begin{pro}
Let $A$ be a square matrix. Prove that the matrix $AA^T$ is
symmetric.
\begin{answer} We have
$$(AA^T)^T = (A^T)^TA^T = AA^T.$$
\end{answer}
\end{pro}
\begin{pro}
Let $A, B$ be square matrices of the same size, with $A$ symmetric
and $B$ skew-symmetric. Prove that the matrix $AB - BA$ is
symmetric. \begin{answer} We have
$$(AB - BA)^T = (AB)^T - (BA)^T = B^TA^T - A^TB^T = -BA - A(-B) = AB - BA.$$
\end{answer}
\end{pro}
\begin{pro}
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$. Prove that
$\tr{AA^T} = \sum _{i = 1} ^n \sum _{j = 1} ^n a_{ij} ^2$.
\end{pro}

\begin{pro}
Let $X\in\mat{n\times n}{\BBR}$. Prove that if $XX^T = {\bf 0}_n$
then $X={\bf 0}_n$.
\begin{answer}
Let $X=[x_{ij}]$ and put $XX^T = [c_{ij}]$. Then $$0 = c_{ii} =
\sum _{k = 1} ^n x_{ik}^2\implies x_{ik} = 0.$$
\end{answer}
\end{pro}
\begin{pro}
Let $m,n,p$ be positive integers and $A\in\mat{m\times n}{\BBR}$,
$B\in\mat{n\times p}{\BBR}$, $C\in\mat{p\times m}{\BBR}$. Prove that
$(BA)^TA = (CA)^TA \implies BA = CA$.
\end{pro}
\end{multicols}

\section{Special Matrices}
\begin{df}
 The {\em main diagonal} of a square matrix  $A = [a_{ij}]\in\mat{n\times n}{ \BBF }$ is  the set
 $\{a_{ii}: i\leq n\}$. The {\em counter diagonal} of a square matrix  $A = [a_{ij}]\in\mat{n\times n}{ \BBF }$ is  the set
 $\{a_{(n -i + 1)i}: i\leq n\}$.\end{df} \begin{exa}The
main diagonal of the matrix $$A = \begin{bmatrix} 0 & 1 & 5 \cr 3
& 2 & 4 \cr 9 & 8 & 7 \cr
\end{bmatrix}
$$is the set $\{0, 2, 7\}$. The
counter diagonal of  $A$ is the set $\{5, 2, 9\}$.
\end{exa}
\begin{df}
A square matrix is a {\em diagonal} matrix if every entry off its
main diagonal is $0_{\BBF }$. \index{matrix!diagonal}
\end{df}
\begin{exa}The
 matrix $$A = \begin{bmatrix} 1 & 0 & 0  \cr 0 & 2 & 0\cr 0 & 0 &
 3
\cr
\end{bmatrix}
$$is a diagonal matrix.
\end{exa}
\begin{df}
A square matrix is a {\em scalar} matrix if it is of the form
$\alpha {\bf I}_n$ for some scalar $\alpha$. \index{matrix!scalar}
\end{df}
\begin{exa}The
 matrix $$A = \begin{bmatrix} 4 & 0 & 0  \cr 0 & 4 & 0\cr 0 & 0 &
 4
\cr
\end{bmatrix} = 4{\bf I}_3
$$is a scalar matrix.
\end{exa}
 \begin{df} $A\in \mat{m\times n}{ \BBF }$ is said to be {\em
upper triangular} if $$(\forall (i, j)\in \{1, 2, \cdots , n\}^2),
(i > j, \ a_{ij} = 0_{\BBF }),$$ that is, every element below the
main diagonal is $0_{\BBF }$. Similarly, $A$ is {\em lower
triangular} if
$$(\forall (i, j) \in \{1, 2, \cdots , n\}^2), (i < j, \ a_{ij} = 0_{\BBF }),$$ that is, every element
above the main diagonal is $0_{\BBF }$.
\end{df}
\begin{exa}
The matrix $A\in\mat{3\times 4}{\BBR}$ shewn is upper triangular and
$B\in\mat{4\times 4}{\BBR}$ is lower triangular.
$$A = \begin{bmatrix} 1 & a & b & c \cr 0 & 2 & 3 & 0 \cr
0 & 0 & 0 & 1 \cr \end{bmatrix} \ \ B = \begin{bmatrix} 1 & 0 & 0&
0 \cr 1 & a & 0& 0 \cr 0 & 2 & 3 & 0 \cr 1 & 1 & t & 1 \cr
\end{bmatrix}$$
\end{exa}
\begin{df}\index{Kronecker delta}
The {\em Kronecker delta } $\delta _{ij}$ is defined by $$\delta
_{ij} = \left\{\begin{array}{ll} 1_{\BBF } & {\rm if}\ i = j\\
0_{\BBF } & {\rm if}\ i \neq j\end{array}\right.
$$
\end{df}
\begin{df}\index{matrix!elementary}
The set of matrices ${\bf E}_{ij}\in\mat{m\times n}{ \BBF }, {\bf
E}_{ij} = ( e_{rs})$ such that $e_{ij} = 1_{\BBF }$ and
$e_{i'j'}=0_{\BBF }, (i',j')\neq (i,j)$ is called the set of {\em
elementary matrices}. Observe that in fact $e_{rs} = \delta
_{ir}\delta _{sj}$.
\end{df}
Elementary matrices have interesting effects when we pre-multiply
and post-multiply a matrix by them.
\begin{exa}
Let $$A = \begin{bmatrix} 1 & 2 & 3 & 4\cr 5 & 6 & 7 & 8 \cr 9 &
10 & 11 & 12 \cr
\end{bmatrix}, \ \ \  {\bf E}_{23} = \begin{bmatrix} 0 & 0 & 0 \cr 0 & 0 & 1 \cr 0 & 0 & 0 \cr 0 & 0 & 0 \cr   \end{bmatrix}.
$$
Then $${\bf E}_{23}A =\begin{bmatrix}  0 & 0 & 0 & 0 \cr 9 & 10 &
11 & 12 \cr 0 & 0 & 0 & 0 \cr
\end{bmatrix}, \ \ \ AE_{23} =\begin{bmatrix} 0 & 0 & 2 \cr 0 & 0 & 6 \cr 0 & 0 & 10\cr
\end{bmatrix}.
$$
\end{exa}
\begin{thm}[Multiplication by Elementary Matrices]\label{thm:mult_elem_matrix}
Let ${\bf E}_{ij}\in\mat{m\times n}{ \BBF }$ be an elementary
matrix, and let $A\in\mat{n\times m}{ \BBF }$. Then ${\bf E}_{ij}A$
has as its $i$-th row the  $j$-th row of $A$ and  $0_{\BBF }$'s
everywhere else. Similarly, $A{\bf E}_{ij}$ has as its $j$-th column
the $i$-th column of $A$ and  $0_{\BBF }$'s everywhere else.
\end{thm}
\begin{pf}
Put $(\alpha _{uv}) = {\bf E}_{ij}A$. To obtain ${\bf E}_{ij}A$ we
multiply the rows of ${\bf E}_{ij}$ by the columns of $A$. Now $$
\alpha _{uv} = \sum _{k = 1} ^n e_{uk}a_{kv} = \sum _{k = 1} ^n
\delta _{ui}\delta _{kj}a_{kv} = \delta _{ui}a_{jv}.$$Therefore, for
$u \neq i, \ \alpha _{uv} = 0_{\BBF }$, i.e., off of the $i$-th row
the entries of ${\bf E}_{ij}A$ are $0_{\BBF }$,  and $\alpha _{iv} =
\alpha _{jv}$, that is, the $i$-th row of ${\bf E}_{ij}A$ is the
$j$-th row of $A$. The case for $A{\bf E}_{ij}$ is similarly
argued.\end{pf} The following corollary is immediate.
\begin{cor}
Let $({\bf E}_{ij}, {\bf E}_{kl})\in (\mat{n\times n}{ \BBF })^2$,
be square elementary matrices. Then
$${\bf
E}_{ij}{\bf E}_{kl} = \delta _{jk}{\bf E}_{il}.  $$
\end{cor}
\begin{exa}
Let $M\in\mat{n\times n}{ \BBF }$ be a matrix such that $AM = MA$
for all matrices $A\in\mat{n\times n}{ \BBF }$. Demonstrate that $M
= a{\bf I}_n$ for some $a\in \BBF$, i.e. $M$ is a scalar matrix.
\label{ex:scalarmatrix}\end{exa} \begin{solu} Assume $(s, t)\in\{1,
2, \ldots , n\}^2$.
 Let $M = (m_{ij})$
and ${\bf E}_{st}\in\mat{n\times n}{ \BBF }$. Since $M$ commutes
with ${\bf E}_{st}$ we have
$$
\begin{bmatrix} 0 & 0 & \ldots & 0 \cr \vdots & \vdots & \ldots &
\vdots \cr m_{t1} & m_{t2} & \ldots & m_{tn} \cr \vdots & \vdots &
\ldots & \vdots \cr 0 & 0 & \ldots & 0 \cr \end{bmatrix} = {\bf
E}_{st}M = M{\bf E}_{st} = \begin{bmatrix} 0 & 0 & \ldots & m_{1s}
& \ldots & 0\cr 0 & 0 & \vdots & m_{2s} & \vdots & 0\cr \vdots &
\vdots & \vdots & \vdots & \vdots & \vdots\cr 0 & 0 & \vdots &
m_{(n - 1)s} & \vdots & 0\cr 0 & 0 & \vdots & m_{ns} & \vdots &
0\cr
\end{bmatrix}$$ For arbitrary $s \neq t$ we have shown that  $m_{st}
= m_{ts} = 0$, and that $m_{ss} = m_{tt}$. Thus the entries off
the main diagonal are zero and the diagonal entries are all equal
to one another, whence $M$ is a scalar matrix.
\end{solu}

\begin{df}
Let $\lambda\in \BBF$ and ${\bf E}_{ij}\in \mat{n\times n}{ \BBF }$.
A square matrix in $\mat{n\times n}{ \BBF }$ of the form ${\bf I}_n
+ \lambda {\bf E}_{ij}$ is called a {\em transvection}.
\end{df}
\begin{exa}
The matrix $$T = {\bf I}_3 + 4{\bf E}_{13} = \begin{bmatrix}1 & 0
& 4 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix}
$$is a transvection. Observe that if $$A = \begin{bmatrix}1 & 1 & 1 \cr 5 & 6 & 7 \cr 1 &
2 & 3 \cr
\end{bmatrix}  $$then $$TA = \begin{bmatrix}1 & 0 & 4 \cr 0 & 1 & 0 \cr 0 & 0
& 1 \cr
\end{bmatrix}\begin{bmatrix}1 & 1 & 1 \cr 5 & 6 & 7 \cr 1 &
2 & 3 \cr
\end{bmatrix} = \begin{bmatrix}5 & 9 & 13 \cr 5 & 6 & 7 \cr 1 & 2
& 3 \cr
\end{bmatrix},$$that is, pre-multiplication by $T$ adds $4$ times
the third row of $A$ to the first row of $A$. Similarly, $$AT =
\begin{bmatrix}1 & 1 & 1 \cr 5 & 6 & 7 \cr 1 &
2 & 3 \cr
\end{bmatrix}\begin{bmatrix}1 & 0 & 4 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix} = \begin{bmatrix}1 & 1 & 5 \cr 5 & 6 & 27 \cr 1 & 2
& 7 \cr
\end{bmatrix},$$that is, post-multiplication by $T$ adds $4$ times
the first column of $A$ to the third row of $A$.
\end{exa}
In general, we have the following theorem.
\begin{thm}[Multiplication by a Transvection
Matrix]\label{thm:mult_by_transvection_matrix}\index{matrix!transvection}
Let ${\bf I}_n + \lambda {\bf E}_{ij}\in \mat{n\times n}{ \BBF }$ be
a transvection and let $A\in \mat{n\times m}{ \BBF }$. Then $({\bf
I}_n + \lambda {\bf E}_{ij})A$ adds the $j$-th row of $A$ to its
$i$-th row and leaves the other rows unchanged. Similarly, if $B\in
\mat{p\times n}{ \BBF }$, $B({\bf I}_n + \lambda {\bf E}_{ij})$ adds
the $i$-th column of $B$ to the $j$-th column and leaves the other
columns unchanged.
\end{thm}
\begin{pf}
Simply observe that $({\bf I}_n + \lambda {\bf E}_{ij})A = A +
\lambda {\bf E}_{ij}A$ and $A({\bf I}_n + \lambda {\bf E}_{ij}) =
A + \lambda A{\bf E}_{ij}$ and apply Theorem
\ref{thm:mult_elem_matrix}.
\end{pf}
Observe that the particular transvection ${\bf I}_n + (\lambda -
1_{\BBF }) {\bf E}_{ii}\in\mat{n\times n}{ \BBF }$ consists of a
diagonal matrix with $1_{\BBF }$'s everywhere on the diagonal,
except on the $ii$-th position, where it has a $\lambda$.
\begin{df}\index{matrix!dilatation}
 If $\lambda \neq 0_{\BBF }$, we call the matrix ${\bf I}_n
+ (\lambda - 1_{\BBF }) {\bf E}_{ii}$ a {\em dilatation matrix}.
\end{df}
\begin{exa}
The matrix $$S = {\bf I}_3 + (4 - 1){\bf E}_{11} =
\begin{bmatrix}4 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix}
$$is a dilatation matrix. Observe that if $$A = \begin{bmatrix}1 & 1 & 1 \cr 5 & 6 & 7 \cr 1 &
2 & 3 \cr
\end{bmatrix}  $$then $$SA = \begin{bmatrix}4 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0
& 1 \cr
\end{bmatrix}\begin{bmatrix}1 & 1 & 1 \cr 5 & 6 & 7 \cr 1 &
2 & 3 \cr
\end{bmatrix} = \begin{bmatrix}4 & 4 & 4 \cr 5 & 6 & 7 \cr 1 & 2
& 3 \cr
\end{bmatrix},$$that is, pre-multiplication by $S$ multiplies by $4$
the first row of $A$. Similarly, $$AS =
\begin{bmatrix}1 & 1 & 1 \cr 5 & 6 & 7 \cr 1 &
2 & 3 \cr
\end{bmatrix}\begin{bmatrix}4 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix} = \begin{bmatrix}4 & 1 & 1 \cr 20 & 6 & 7  \cr 4 & 2
& 3 \cr
\end{bmatrix},$$that is, post-multiplication by $S$ multiplies by $4$
the first column of $A$.
\end{exa}
\begin{thm}[Multiplication by a Dilatation Matrix]\label{thm:mult_by_dilatation_matrix}
Pre-multiplication of\\ the~matrix$A\in\mat{n\times m}{ \BBF }$ by
the~dilatation~matrix ${\bf I}_n + (\lambda - 1_{\BBF }) {\bf
E}_{ii}\in\mat{n\times n}{ \BBF }$ multiplies the $i$-th row of $A$
by $\lambda$ and leaves the other rows of $A$ unchanged. Similarly,
if $B\in \mat{p\times n}{ \BBF }$ post-multiplication of $B$ by
${\bf I}_n + (\lambda - 1_{\BBF }) {\bf E}_{ii}$ multiplies the
$i$-th column of $B$ by $\lambda$ and leaves the other columns of
$B$ unchanged.
\end{thm}
\begin{pf}
This follows by direct application of Theorem
\ref{thm:mult_by_transvection_matrix}. \end{pf}

\begin{df}
We write ${\bf I}^{ij} _n$ for the matrix which permutes the
$i$-th row  with the $j$-th row of the identity matrix. We call
${\bf I}^{ij} _n$ a {\em transposition matrix}.
\end{df}
\begin{exa}
We have $${\bf I}^{(23)} _4 =  \begin{bmatrix}1 & 0 & 0 & 0 \cr 0
& 0 & 1 & 0 \cr 0 & 1 & 0 & 0 \cr 0 & 0 & 0 & 1 \cr
\end{bmatrix}.
$$ If $$A = \begin{bmatrix} 1 & 2 & 3 & 4\cr 5 & 6 & 7 & 8 \cr 9 &
10 & 11 & 12 \cr 13 & 14 & 15 & 16 \cr
\end{bmatrix},   $$then $${\bf I}^{(23)} _4A =  \begin{bmatrix} 1 & 2 & 3 & 4\cr  9 &
10 & 11 & 12 \cr 5 & 6 & 7 & 8 \cr 13 & 14 & 15 & 16 \cr
\end{bmatrix}, $$and
 $$A{\bf I}^{(23)} _4 =  \begin{bmatrix} 1 & 3 & 2 & 4\cr  5 & 7 & 6 & 8 \cr 9 & 11
& 10 & 12 \cr 13 & 15 & 14 & 16 \cr
\end{bmatrix}.$$
\end{exa}
\begin{thm}[Multiplication by a Transposition
Matrix]\label{thm:mult_by_transposition_matrix}\index{matrix!transposition}
If $A\in \mat{n\times m}{ \BBF }$, then ${\bf I}^{ij} _nA$ is the
matrix obtained from $A$ permuting the the $i$-th row with the
$j$-th row of $A$. Similarly, if $B\in \mat{p\times n}{ \BBF }$,
then $B{\bf I}^{ij} _n$ is the matrix obtained from $B$ by permuting
the $i$-th column with the $j$-th column of $B$.\end{thm}
\begin{pf}
 We must prove that ${\bf I}^{ij} _n A$ exchanges the $i$-th and $j$-th rows but leaves the
other rows unchanged. But this  follows upon observing that
$${\bf I}^{ij} _n = {\bf I}_n + {\bf E}_{ij} + {\bf E}_{ji} - {\bf
E}_{ii} - {\bf E}_{jj}$$and appealing to Theorem
\ref{thm:mult_elem_matrix}.

\end{pf}
\begin{df}\index{matrix!elimination}
A square matrix which is either a transvection matrix, a
dilatation matrix or a transposition matrix is called an {\em
elimination matrix.}
\end{df}
\begin{rem}
In a very loose way, we may associate pre-multiplication of a
matrix $A$ by another matrix with an operation on the  rows of
$A$, and post-multiplication of a matrix $A$ by another with an
operation on the columns of $A$.
\end{rem}



\section*{\psframebox{Homework}}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900



\begin{pro}
Consider the matrices
$$A = \begin{bmatrix} 1 & 0 & 1 & 0 \cr 0 & 1 & 0 & 1 \cr -1 & 1 & 1 &  1 \cr 1 & -1 & 1 & 1 \cr   \end{bmatrix}, \ \ \ \
B =  \begin{bmatrix} 4 & -2 & 4 & 2 \cr 0 & 1 & 0 & 1 \cr 1 & 1 &
-1 & 1 \cr 1 & -1 & 1 & 1\cr
\end{bmatrix}.   $$Find   a specific dilatation matrix $D$,
a specific transposition matrix $P$, and a specific transvection
matrix $T$ such that $B = TDAP$.\\
\begin{answer} Here is one possible approach.  If we perform $C_1 \leftrightarrow C_3$
on $A$ we obtain

$$A_1 = \begin{bmatrix} 1 & 0 & 1 & 0 \cr 0 & 1 & 0 & 1 \cr 1 & 1 & -1 &  1 \cr 1 & -1 & 1 & 1 \cr
\end{bmatrix} \ \ \ \mathrm{so\ take\ } \ \ \ P =  \begin{bmatrix} 0 & 0 & 1 & 0 \cr 0 & 1 & 0 & 0 \cr 1 & 0 & 0 &  0 \cr 0 & 0 & 0 & 1 \cr   \end{bmatrix}. $$
Now perform $2R_1 \rightarrow R_1$ on $A_1$ to obtain
$$A_2 = \begin{bmatrix} 2 & 0 & 2 & 0 \cr 0 & 1 & 0 & 1 \cr 1 & 1 & -1 &  1 \cr 1 & -1 & 1 & 1 \cr
\end{bmatrix}  \ \ \ \mathrm{so\ take\ } \ \ \ D =  \begin{bmatrix} 2 & 0 & 0 & 0 \cr 0 & 1 & 0 & 0 \cr 0 & 0 & 1 &  0 \cr 0 & 0 & 0 & 1 \cr   \end{bmatrix}. $$
Finally, perform $R_1 + 2R_4 \rightarrow R_1$ on $A_2$ to obtain
$$B = \begin{bmatrix} 4 & -2 & 4 & 2 \cr 0 & 1 & 0 & 1 \cr 1 & 1 & -1 &  1 \cr 1 & -1 & 1 & 1 \cr
\end{bmatrix}  \ \ \ \mathrm{so\ take\ } \ \ \ T =  \begin{bmatrix} 1 & 0 & 0 & 2 \cr 0 & 1 & 0 & 0 \cr 0 & 0 & 1 &  0 \cr 0 & 0 & 0 & 1 \cr   \end{bmatrix}. $$
\end{answer}
\end{pro}

\begin{pro}
The matrix
$$ A =
\begin{bmatrix} a & b & c \cr d & e & f \cr g & h & i \cr
\end{bmatrix}
$$ is transformed into the matrix
 $$ B =  \begin{bmatrix} h - g & g & i \cr e - d & d & f \cr 2b - 2a
 & 2a & 2c \cr
\end{bmatrix}
$$by a series of row  and column operations. Find  explicit permutation
matrices $P, P'$, an explicit dilatation matrix $D$, and an
explicit transvection matrix $T$ such that $$B = DPAP'T.   $$
\begin{answer} Here is one possible approach.
\begin{eqnarray*}
\begin{bmatrix} a & b & c \cr d & e & f \cr g & h & i \cr
\end{bmatrix}  & \grstep{P: \rho_3 \swap \rho_1} &      \begin{bmatrix}   g & h & i
\cr d & e & f \cr a & b & c \cr
\end{bmatrix} \\
   & \grstep{P': C_1 \swap C_2} &   \begin{bmatrix}   h & g & i \cr e
& d & f \cr b & a & c \cr
\end{bmatrix} \\ &  \grstep{T: C_1 - C_2 \longrightarrow C_1} &   \begin{bmatrix} h -
g & g & i \cr e - d & d & f \cr b - a & a & c \cr
\end{bmatrix} \\
   & \grstep{D: 2\rho_3 \longrightarrow \rho_3} &   \begin{bmatrix} h
- g & g & i \cr e - d & d & f \cr 2b - 2a & 2a & 2c \cr
\end{bmatrix} \end{eqnarray*}
Thus we take
$$P = \begin{bmatrix} 0 & 0 & 1 \cr 0 & 1 & 0 \cr 1 & 0 & 0 \cr    \end{bmatrix}, \ \ P' = \begin{bmatrix} 0 & 1 & 0 \cr 1 & 0 & 0 \cr 0 & 0 & 1 \cr    \end{bmatrix} ,      $$
$$T = \begin{bmatrix} 1 & 0 & 0 \cr -1 & 1 & 0 \cr 0 & 0 & 1 \cr  \end{bmatrix}, \ \ \ D = \begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 2 \cr     \end{bmatrix}.      $$

\end{answer}
\end{pro}
\begin{pro}
Let $A\in \mat{n\times n}{ \BBF }$. Prove that if $$(\forall X\in
\mat{n\times n}{ \BBF }), \ (\tr{AX} =  \tr{BX}),$$ then $A = B$.
\begin{answer} Let ${\bf
E}_{ij}\in\mat{n\times n}{ \BBF }$.  Then
$$
A{\bf E}_{ij} = \begin{bmatrix} 0 & 0 & \ldots & a_{1i} & \ldots &
0\cr 0 & 0 & \vdots & a_{2i} & \vdots & 0\cr \vdots & \vdots &
\vdots & \vdots & \vdots & \vdots\cr 0 & 0 & \vdots & a_{n - 1i} &
\vdots & 0\cr 0 & 0 & \vdots & a_{ni} & \vdots &
0\cr\end{bmatrix}, $$where the entries appear on the $j$-column.
Then we see that $\tr{A{\bf E}_{ij}} = a_{ji}$ and similarly, by
considering $B{\bf E}_{ij}$, we see that $\tr{B{\bf E}_{ij}} =
b_{ji}.$ Therefore $\forall i,j, \ \ a_{ji} = b_{ji}$, which
implies that $A = B$.
\end{answer}
\end{pro}
\begin{pro}
Let $A\in \mat{n\times n}{\BBR}$ be such that $$(\forall X\in
\mat{n\times n}{\BBR}),\ ((XA)^2 = {\bf 0}_{n}).$$ Prove that $A =
{\bf 0}_{ n}$.
\begin{answer} Let ${\bf E}_{st}\in\mat{n\times n}{\BBR}$. Then
$$ {\bf E}_{ij}A =
\begin{bmatrix} 0 & 0 & \ldots & 0 \cr \vdots & \vdots & \ldots & \vdots
\cr a_{j1} & a_{j2} & \ldots & a_{jn} \cr \vdots & \vdots & \ldots
& \vdots \cr 0 & 0 & \ldots & 0 \cr\end{bmatrix},$$where the
entries appear on the $i$-th row. Thus
$$ ({\bf E}_{ij}A)^2 =  \begin{bmatrix} 0 & 0 &
\ldots & 0 \cr \vdots & \vdots & \ldots & \vdots \cr a_{ji}a_{j1}
& a_{ji}a_{j2} & \ldots & a_{ji}a_{jn} \cr \vdots & \vdots &
\ldots & \vdots \cr 0 & 0 & \ldots & 0 \cr\end{bmatrix},$$which
means that $\forall i, j, a_{ji}a_{jk} = 0$. In particular,
$a_{ji} ^2 = 0,$ which means that $\forall i, j,  a_{ji} = 0$,
i.e., $A = {\bf 0}_{n}.$
\end{answer}
\end{pro}




\end{multicols}



\section{Matrix Inversion}
\begin{df}
Let $A\in\mat{m\times n}{ \BBF }$. Then $A$ is said to be {\em
left-invertible} if $\exists L\in\mat{n\times m}{ \BBF }$ such that
$LA = {\bf I}_{n}$. $A$ is said to be {\em right-invertible} if
$\exists R\in\mat{n\times m}{ \BBF }$ such that $AR = {\bf I}_{m}$.
A matrix is said to be {\em invertible} if it possesses a right and
a left inverse. A matrix which is not invertible is said to be {\em
singular}.
\end{df}
\begin{exa}
The matrix $A\in \mat{2\times 3}{\BBR}$
$$A = \begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr  \end{bmatrix}$$
has infinitely many right-inverses of the form
$$ R_{(x, y)} = \begin{bmatrix} 1 & 0 \cr 0
& 1 \cr x & y \cr
\end{bmatrix}.
$$For $$\begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr  \end{bmatrix}
\begin{bmatrix} 1 & 0 \cr 0
& 1 \cr x & y \cr
\end{bmatrix} = \begin{bmatrix} 1 & 0 \cr 0 & 1
\end{bmatrix},$$regardless of the values of $x$ and $y$. Observe,
however, that $A$ does not have a left inverse, for
$$\begin{bmatrix}a & b \cr c & d\cr f & g \cr  \end{bmatrix}    \begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr  \end{bmatrix}
= \begin{bmatrix}a & b & 0 \cr c & d & 0 \cr f & g & 0
\end{bmatrix},$$which will never give ${\bf I}_3$ regardless of
the values of $a,b,c,d,f,g$.
\end{exa}
\begin{exa}
If $\lambda \neq 0$, then the scalar matrix  $\lambda {\bf I}_n$
is invertible, for $$ \left(\lambda {\bf
I}_n\right)\left(\lambda^{-1} {\bf I}_n\right)  = {\bf I}_n=
\left(\lambda^{-1} {\bf I}_n\right)\left(\lambda {\bf
I}_n\right).$$
\end{exa}

\begin{exa}
The zero matrix ${\bf 0}_n$ is singular.
\end{exa}
\begin{thm}\label{thm:inverse_square_matrices}
Let $A\in\mat{n\times n}{ \BBF }$ a square matrix possessing a left
inverse $L$ and a right inverse $R$. Then $L = R$. Thus an
invertible square matrix possesses a unique inverse.
\end{thm}
\begin{pf}
Observe that we have $LA = {\bf I}_n = AR$. Then
$$L = L{\bf I}_n = L(AR) = (LA)R = {\bf I}_nR = R.
$$
\end{pf}
\begin{df}
The subset of $\mat{n\times n}{ \BBF }$ of all invertible $n\times
n$ matrices is denoted by $\gl{n}{ \BBF }$, read ``the linear group
of rank $n$ over $\BBF$.''
\end{df}
\begin{cor}\label{cor:inverse_of_product_of_matrices}
Let $(A, B)\in (\gl{n}{ \BBF })^2$. Then $AB$ is also invertible and
$$(AB)^{-1} = B^{-1}A^{-1}.
$$
\end{cor}
\begin{pf}
Since $AB$ is a square matrix, it suffices to notice that
$$B^{-1}A^{-1}(AB) = (AB)B^{-1}A^{-1} = {\bf I}_n$$ and that since
the inverse of a square matrix is unique, we must have
$B^{-1}A^{-1} = (AB)^{-1}.$
\end{pf}
\begin{cor}
If a square matrix $S\in\mat{n\times n}{ \BBF }$ is invertible, then
$S^{-1}$ is also invertible and $(S^{-1})^{-1} = S$, in view of the
uniqueness of the inverses of square matrices.
\end{cor}
\begin{cor}
If a square matrix $A\in\mat{n\times n}{ \BBF }$ is invertible, then
$A^T$ is also invertible and $(A^T)^{-1} = (A^{-1})^T$.
\end{cor}
\begin{pf}
We claim that $(A^T)^{-1}=(A^{-1})^T$. For $$AA^{-1} = {\bf I}_n
\implies (AA^{-1})^T = {\bf I}_n ^T \implies (A^{-1})^TA^T = {\bf
I}_n,
$$where we have used Theorem \ref{thm:properties_transpose}.
\end{pf}
The next few theorems will prove that elimination matrices are
 invertible matrices.
\begin{thm}[Invertibility of Transvections] \label{thm:inverse_transvections}
Let ${\bf I}_n + \lambda {\bf E}_{ij}\in \mat{n\times n}{ \BBF }$ be
a transvection, and let $i \neq j$. Then $$({\bf I}_n + \lambda {\bf
E}_{ij})^{-1} = {\bf I}_n -\lambda  {\bf E}_{ij}.$$
\end{thm}
\begin{pf}
Expanding the product $$\begin{array}{lll} ({\bf I}_n + \lambda
{\bf E}_{ij})({\bf I}_n -\lambda  {\bf E}_{ij}) & = & {\bf I}_n +
\lambda {\bf E}_{ij} -\lambda  {\bf E}_{ij} -\lambda ^2 {\bf
E}_{ij}{\bf E}_{ij} \\
& = & {\bf I}_n  -\lambda^2\delta _{ij}{\bf E}_{ij}\\
& = & {\bf I}_n,\end{array}$$since $i \neq j$.
\end{pf}
\begin{exa}
By Theorem \ref{thm:inverse_transvections}, we have
$$\begin{bmatrix}1 & 0 & 3 \cr 0 & 1 & 0 \cr 0 & 0 & 1    \end{bmatrix}\begin{bmatrix}1 & 0 & -3 \cr 0 & 1 & 0 \cr 0 & 0 & 1    \end{bmatrix}
=\begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1
\end{bmatrix}.   $$
\end{exa}
\begin{thm}[Invertibility of Dilatations]\label{thm:inver_dilatation}
Let $\lambda \neq 0_{\BBF }$. Then $$({\bf I}_n + (\lambda - 1_{\BBF
}) {\bf E}_{ii})^{-1} = {\bf I}_n + (\lambda^{-1} - 1_{\BBF }) {\bf
E}_{ii}.$$\end{thm}
\begin{pf}
Expanding the product $$\begin{array}{lll}({\bf I}_n + (\lambda -
1_{\BBF }) {\bf E}_{ii})({\bf I}_n + (\lambda^{-1} - 1_{\BBF }) {\bf
E}_{ii}) & = & {\bf I}_n + (\lambda - 1_{\BBF }) {\bf
E}_{ii} \\
& & \quad + (\lambda^{-1} - 1_{\BBF }) {\bf E}_{ii} \\ & & \qquad + (\lambda - 1_{\BBF })(\lambda^{-1} - 1_{\BBF }){\bf E}_{ii}  \\
& = & {\bf I}_n  + (\lambda - 1_{\BBF }){\bf E}_{ii} \\
& & \quad + (\lambda^{-1} - 1_{\BBF }){\bf E}_{ii} \\ & & \qquad + (\lambda - 1_{\BBF })(\lambda^{-1} - 1_{\BBF })){\bf E}_{ii}\\
& = & {\bf I}_n  + (\lambda - 1_{\BBF } + \lambda^{-1} - 1_{\BBF } +
1_{\BBF }
\\ &
& \qquad - \lambda  - \lambda^{-1} - 1_{\BBF })){\bf E}_{ii}\\
 & = & {\bf I}_n,\end{array}$$proving the assertion.
\end{pf}
\begin{exa}
By Theorem \ref{thm:inver_dilatation}, we have
$$\begin{bmatrix}1 & 0 & 0 \cr 0 & 2 & 0 \cr 0 & 0 & 1    \end{bmatrix}\begin{bmatrix}1 & 0 & 0 \cr 0 & \frac{1}{2} & 0 \cr 0 & 0 & 1    \end{bmatrix}
=\begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1
\end{bmatrix}.   $$
\end{exa}
Repeated applications of Theorem \ref{thm:inver_dilatation} gives
the following corollary.
\begin{cor}
If $\lambda_1\lambda_2\lambda_3 \cdots \lambda_n \neq 0_{\BBF },$
then
$$\begin{bmatrix}\lambda_1 & 0 & 0 & 0 & \cdots & 0 \cr
0 & \lambda_2 & 0 & 0 & \cdots & 0 \cr 0 & 0 & \lambda_3 & 0 &
\cdots & 0 \cr \vdots & \vdots & \vdots & \vdots & \cdots & \vdots
\cr 0 & 0 & 0 & 0 & \cdots & \lambda_n \cr
 \end{bmatrix} $$is invertible and
 $$\begin{bmatrix}\lambda_1 & 0 & 0 & 0 & \cdots & 0 \cr
0 & \lambda_2 & 0 & 0 & \cdots & 0 \cr 0 & 0 & \lambda_3 & 0 &
\cdots & 0 \cr \vdots & \vdots & \vdots & \vdots & \cdots & \vdots
\cr 0 & 0 & 0 & 0 & \cdots & \lambda_n \cr
 \end{bmatrix}^{-1} = \begin{bmatrix}\lambda_1 ^{-1} & 0 & 0 & 0 & \cdots & 0 \cr
0 & \lambda_2 ^{-1} & 0 & 0 & \cdots & 0 \cr 0 & 0 & \lambda_3
^{-1}& 0 & \cdots & 0 \cr \vdots & \vdots & \vdots & \vdots &
\cdots & \vdots \cr 0 & 0 & 0 & 0 & \cdots & \lambda_n ^{-1} \cr
 \end{bmatrix}  $$
\end{cor}
\begin{thm}[Invertibility of Permutation Matrices]\label{thm:inverse_permutation_matrix}
Let $\tau\in S_n$ be a permutation.  Then$$ ({\bf I}^{ij} _n)^{-1}
 = ({\bf I}^{ij} _n)^T.  $$
\end{thm}
\begin{pf}
By Theorem \ref{thm:mult_by_transposition_matrix}
pre-multiplication of ${\bf I}^{ij} _n$ by ${\bf I}^{ij} _n$
exchanges the $i$-th row with the $j$-th row, meaning that they
return to the original position in ${\bf I}_n$. Observe in
particular that ${\bf I}^{ij} _n = ({\bf I}^{ij} _n)^T$, and so
${\bf I}^{ij} _n({\bf I}^{ij} _n)^T = {\bf I}_n$.
\end{pf}
\begin{exa}
By Theorem \ref{thm:inverse_permutation_matrix}, we have
$$\begin{bmatrix}1 & 0 & 0 \cr 0 & 0 & 1 \cr 0 & 1 & 0    \end{bmatrix}\begin{bmatrix}1 & 0 & 0 \cr 0 & 0 & 1 \cr 0 & 1 & 0
\end{bmatrix}
=\begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1
\end{bmatrix}.   $$
\end{exa}
\begin{cor}
If a square matrix can be represented as the product of
elimination matrices of the same size, then it is invertible.
\end{cor}
\begin{pf}This follows from  Corollary \ref{cor:inverse_of_product_of_matrices},
and  Theorems \ref{thm:inverse_transvections},
\ref{thm:inver_dilatation}, and
\ref{thm:inverse_permutation_matrix}.
\end{pf}
\begin{exa}
Observe that $$A = \begin{bmatrix} 1 & 0 & 0 \cr 0 & 3 & 4 \cr 0 &
0  & 1 \cr
\end{bmatrix}
$$is the transvection ${\bf I}_3 + 4{\bf E}_{23}$ followed by the dilatation of the second
column of this transvection by $3$. Thus
$$\begin{bmatrix} 1 & 0 & 0 \cr 0 & 3 & 4 \cr 0 &
0  & 1 \cr
\end{bmatrix} = \begin{bmatrix} 1 & 0 & 0 \cr 0 & 1 & 4 \cr 0 &
0  & 1 \cr
\end{bmatrix}\begin{bmatrix} 1 & 0 & 0 \cr 0 & 3 & 0 \cr 0 &
0 &  1 \cr
\end{bmatrix},   $$and so

$$\begin{array}{lll}\begin{bmatrix} 1 & 0 & 0 \cr 0 & 3 & 4 \cr 0 &
 0 & 1 \cr
\end{bmatrix}^{-1} & = & \begin{bmatrix} 1 & 0 & 0 \cr 0 & 3 & 0 \cr 0 &
0 & 1 \cr
\end{bmatrix}^{-1}\begin{bmatrix} 1 & 0 & 0 \cr 0 & 1 & 4 \cr 0 &
0 &  1 \cr
\end{bmatrix}^{-1} \vspace{2mm} \\
& = & \begin{bmatrix} 1 & 0 & 0 \cr 0 & \frac{1}{3} & 0 \cr 0 & 0
& 1 \cr
\end{bmatrix}\begin{bmatrix} 1 & 0 & 0 \cr 0 & 1 & -4 \cr 0 &
0 &  1 \cr
\end{bmatrix}\vspace{2mm}\\
& = & \begin{bmatrix} 1 & 0 & 0 \cr 0 & \frac{1}{3} & -\frac{4}{3}
\cr 0 & 0 &  1 \cr
\end{bmatrix}.

\end{array}$$

\end{exa}

\begin{exa}
We have $$\begin{bmatrix}1 & 1 & 1 \cr 0 & 1 & 1 \cr 0 & 0 & 1 \cr
\end{bmatrix} = \begin{bmatrix}1 & 1 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix}\begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 1 \cr 0 & 0 & 1 \cr
\end{bmatrix},
$$hence
$$\begin{array}{lll}\begin{bmatrix}1 & 1 & 1 \cr 0 & 1 & 1 \cr 0 & 0 & 1 \cr
\end{bmatrix}^{-1} & = & \begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & 1 \cr 0 & 0 & 1 \cr
\end{bmatrix}^{-1}\begin{bmatrix}1 & 1 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix}^{-1}\vspace{2mm}\\
& = & \begin{bmatrix}1 & 0 & 0 \cr 0 & 1 & -1 \cr 0 & 0 & 1 \cr
\end{bmatrix}\begin{bmatrix}1 & -1 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix}\vspace{2mm}\\
& = &\begin{bmatrix}1 & -1 & 0 \cr  0 & 1 & -1 \cr 0 & 0 & 1 \cr
\end{bmatrix}.
\end{array}
$$

\end{exa}\bigskip

In the next section we will give a general method that will permit
us to find the inverse of a square matrix when it exists.
\begin{exa}Let $\dis{T = \begin{bmatrix}a & b \cr c & d
\cr\end{bmatrix}\in\mat{2\times 2}{\BBR}}$. Then $$ \begin{bmatrix}a
& b \cr c & d \cr\end{bmatrix}\begin{bmatrix} d & -b \cr -c & a
\cr\end{bmatrix} = (ad - bc)\begin{bmatrix}1 & 0 \cr 0 & 1
\cr\end{bmatrix}$$Thus if $ad - bc \neq 0$ we see that
$$T^{-1} = \begin{bmatrix}\frac{d}{ad - bc} & -\frac{b}{ad - bc} \cr -\frac{c}{ad - bc} & \frac{a}{ad - bc} \cr \end{bmatrix}.$$
\end{exa}
\begin{exa}
If $$A = \begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix},    $$
then $A$ is invertible, for an easy computation shews that
$$ A^2 = \begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix}^2 = 4{\bf I}_4,     $$whence the inverse sought is
$$ A^{-1} = \frac{1}{4}A = \frac{1}{4}\begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix}  = \begin{bmatrix} 1/4 & 1/4 & 1/4 & 1/4  \cr
1/4 & 1/4 & -1/4 & -1/4 \cr 1/4 & -1/4 & 1/4 & -1/4  \cr 1/4 &
-1/4 & -1/4 & 1/4  \cr\end{bmatrix}. $$

\end{exa}
\begin{exa}\index{matrix!nilpotent}
A matrix $A\in\mat{n\times n}{\BBR}$  is said to be {\em nilpotent}
of index $k$ if satisfies $A \neq {\bf 0}_n, A^2 \neq {\bf 0}_n,
\ldots , A^{k - 1} \neq {\bf 0}_n$ and $A^k = {\bf 0}_n$ for integer
$k \geq 1.$ Prove that if $A$ is nilpotent, then ${\bf I}_n - A$ is
invertible and find its inverse.
\end{exa}
\begin{solu} To motivate the solution, think that instead of a matrix, we
had a real number $x$ with $|x| < 1$. Then the inverse of $1 - x$ is
$$(1 - x)^{-1} = \frac{1}{1 - x} = 1 + x + x^2 + x^3 + \cdots
.$$Notice now that since $A^{k} = {\bf 0}_n,$ then $A^p = {\bf 0}_n$
for $p \geq k$. We conjecture thus that
$$({\bf I}_n - A)^{-1} =  {\bf I}_n + A + A^2 + \cdots + A^{k - 1}.$$The
conjecture is easily verified, as
$$\begin{array}{lll}({\bf I}_n - A)({\bf I}_n + A + A^2 + \cdots + A^{k - 1}) &  = &
 {\bf I}_n + A + A^2 + \cdots + A^{k - 1} \\
 & & \qquad - (A + A^2 + A^3 + \cdots + A^{k}) \\
 &  = & {\bf I}_n\end{array}$$
and
$$\begin{array}{lll}({\bf I}_n + A + A^2 + \cdots + A^{k - 1})({\bf I}_n - A) &
=& {\bf I}_n - A + A - A^2  + A^3 - A^4 + \cdots  \\
& & \qquad \cdots + A^{k - 2} - A^{k - 1} + A^{k - 1} - A^{k} \\
& = &{\bf I}_n.
\end{array}$$
\end{solu}
\begin{exa}
The inverse of $A\in\mat{3\times 3}{\BBZ_5}$,
$$A = \begin{bmatrix}\overline{2} & \overline{0} & \overline{0} \cr
\overline{0} & \overline{3} & \overline{0} \cr \overline{0} &
\overline{0} & \overline{4} \cr \end{bmatrix}$$ is
$$A^{-1} = \begin{bmatrix}\overline{3} & \overline{0} & \overline{0} \cr
\overline{0} & \overline{2} & \overline{0} \cr \overline{0} &
\overline{0} & \overline{4} \cr \end{bmatrix},$$as
$$AA^{-1} = \begin{bmatrix}\overline{2} & \overline{0} & \overline{0} \cr
\overline{0} & \overline{3} & \overline{0} \cr \overline{0} &
\overline{0} & \overline{4} \cr
\end{bmatrix}\begin{bmatrix}\overline{3} & \overline{0} &
\overline{0} \cr \overline{0} & \overline{2} & \overline{0} \cr
\overline{0} & \overline{0} & \overline{4} \cr \end{bmatrix} =
\begin{bmatrix}\overline{1} & \overline{0} & \overline{0} \cr
\overline{0} & \overline{1} & \overline{0} \cr \overline{0} &
\overline{0} & \overline{1} \cr \end{bmatrix}$$


\end{exa}
\begin{exa}[Putnam Exam, 1991] Let $A$ and $B$ be different $n \times n$ matrices with
real entries. If $A^3 = B^3$ and $A^2B = B^2A$, prove that $A^2 +
B^2$ is not invertible.
\end{exa}
\begin{solu}Observe that $$ (A^2 + B^2)(A - B) = A^3 - A^2B + B^2A -
B^3 = {\bf 0}_n.
$$If $A^2 + B^2$ were invertible, then we would have
$$  A - B =  (A^2 + B^2)^{-1}(A^2 + B^2)(A - B) = {\bf
0}_n,$$contradicting the fact that $A$ and $B$ are different
matrices.
\end{solu}


\begin{lem}\label{lem:inverse_and_zero_row}
If $A\in\mat{n\times n}{ \BBF }$ has a row or a column consisting
all of $0_{\BBF }$'s, then $A$ is singular.
\end{lem}
\begin{pf}
If $A$ were invertible, the $(i,i)$-th entry of the product of its
inverse with $A$ would be $1_{\BBF }$. But if the $i$-th row of $A$
is all $0_{\BBF }$'s, then $\sum _{k = 1} ^n a_{ik}b_{ki} = 0_{\BBF
}$, so the $(i,i)$ entry of any matrix product with $A$ is $0_{\BBF
}$, and never $1_{\BBF }$.
\end{pf}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
The inverse of the matrix $A = \begin{bmatrix}1 & 1& 1 \cr 1 & 1 &
2 \cr 1 & 2 & 3 \cr
\end{bmatrix}$ is the matrix $A^{-1} = \begin{bmatrix}
a &  1& -1 \cr 1 & b & 1 \cr -1 & 1 & 0 \cr
\end{bmatrix}$. Determine $a$ and $b$. \begin{answer} $a = 1$, $b=-2$.
\end{answer}
\end{pro}
\begin{pro}
A square matrix $A$ satisfies $A^3 \neq {\bf 0}_n$
 but $A^4 = {\bf 0}_n$. Demonstrate that ${\bf I}_n + A$ is
 invertible and find, with proof, its inverse. \\
\begin{answer} Claim: $A^{-1} = {\bf I}_n - A + A^2 - A^3.$ For observe
that
$$({\bf I}_n +  A)({\bf I}_n - A + A^2 - A^3) = {\bf I}_n - A + A^2 - A^3 - A + A^2 - A^3 +A^4 = {\bf I}_n,    $$
proving the claim.
\end{answer}
\end{pro}
\begin{pro}
Prove or disprove! If $(A,B,A+B)\in(\gl{n}{\BBR})^3$ then
$(A+B)^{-1} = A^{-1} + B^{-1}$.
\begin{answer}
Disprove! It is enough to take $A=B = 2{\bf I}_n$. Then $(A+
B)^{-1} = (4{\bf I}_n)^{-1} = \frac{1}{4}{\bf I}_n$ but $A^{-1} +
B^{-1} = \frac{1}{2}{\bf I}_n + \frac{1}{2}{\bf I}_n = {\bf I}_n$.
\end{answer}
\end{pro}
\begin{pro}
Let $S\in\gl{n}{ \BBF }$, $(A,B)\in(\mat{n\times n}{ \BBF })^2$, and
$k$ a positive integer. Prove that if $B = SAS^{-1}$ then $B^k
=SA^kS^{-1}$.
\end{pro}
\begin{pro}
Let $A\in\mat{n\times n}{ \BBF }$ and let $k$ be a positive integer.
Prove that $A$ is invertible if and only if $A^k$ is invertible.
\end{pro}
\begin{pro}
Let $S\in\gl{n}{\BBC}$, $A\in\mat{n\times n}{\BBC}$ with $A^k = {\bf
0}_n$ for some positive integer $k$. Prove that both ${\bf I}_n -
SAS^{-1}$
 and ${\bf I}_n - S^{-1}AS$ are invertible and find their
 inverses.
\end{pro}
\begin{pro}
Let $A$ and $B$ be square matrices of the same size such that both
$A - B$ and $A+B$ are invertible. Put $C = (A-B)^{-1} +
(A+B)^{-1}$. Prove that $$ACA - ACB +BCA - BCB = 2A.   $$
\end{pro}
\begin{pro}
Let $A, B, C$ be non-zero square matrices of the same size over
the same field and such that $ABC = {\bf 0}_n$. Prove that at
least two of these three matrices are not invertible.
\begin{answer}
We argue by contradiction. If exactly one of the matrices is not
invertible, the identities
$$ A =A{\bf I}_n = (ABC)(BC)^{-1} = {\bf 0}_n,  $$
$$ B ={\bf I}_nB{\bf I}_n = (A)^{-1}(ABC)C^{-1} = {\bf 0}_n,  $$
$$ C ={\bf I}_nC = (AB)^{-1}(ABC) = {\bf 0}_n,  $$
shew a contradiction depending on which of the matrices are
invertible. If all the matrices are invertible then

$${\bf 0}_n = {\bf 0}_nC^{-1}B^{-1}A^{-1}= (ABC)C^{-1}B^{-1}A^{-1} = {\bf I}_n,$$
also gives a contradiction.
\end{answer}
\end{pro}
\begin{pro}
Let $(A,B)\in (\mat{n\times n}{ \BBF })^2$ be such that $A^2 = B^2 =
(AB)^2 = {\bf I}_n$. Prove that $AB = BA$.
\begin{answer} Observe that $A, B, AB$ are invertible. Hence
 $$\begin{array}{lll} A^2B^2 = {\bf I}_n  = (AB)^2&
\implies & AABB = ABAB \\
& \implies & AB = BA,
\end{array}$$by cancelling $A$ on the left and $B$ on the right.
One can also argue that $A = A^{-1}$, $B= B^{-1}$, and so $AB =
(AB)^{-1} = B^{-1}A^{-1} = BA$.
\end{answer}
\end{pro}
\begin{pro}
Let $A = \begin{bmatrix}a & b & b & \cdots & b \cr b & a & b &
\cdots & b \cr b & b & a & \cdots & b \cr \vdots & \vdots & \vdots
& \cdots & \vdots \cr b & b & b & \cdots & a \cr
\end{bmatrix}\in\mat{n\times n}{ \BBF }$, $n > 1$, $(a, b)\in \BBF ^2$.
Determine when $A$ is invertible and find this inverse when it
exists.
\begin{answer}
Observe that $A = (a - b){\bf I}_n + bU$, where $U$ is the $n\times
n$ matrix with $1_{\BBF }$'s everywhere. Prove that $$A^2 = (2(a-b)
+ nb)A - ((a-b)^2 + nb(a-b)){\bf I}_n.$$
\end{answer}
\end{pro}
\begin{pro} \index{magic square matrices}
Let $(A,B)\in (\mat{n\times n}{ \BBF })^2$ be matrices such that
$A+B =AB$. Demonstrate that $A - {\bf I}_n$ is invertible and find
this inverse.
\begin{answer}
Compute $(A - {\bf I}_n)(B - {\bf I}_n)$.
\end{answer}
\end{pro}
\begin{pro}
Let $S\in\gl{n}{ \BBF }$ and $A\in\mat{n\times n}{ \BBF }$. Prove
that $\tr{A} = \tr{SAS^{-1}}$.
\begin{answer}By Theorem \ref{tracethm} we have $\tr{SAS^{-1}} = \tr{S^{-1}SA} =
\tr{A}$.
\end{answer}
\end{pro}
\begin{pro}
Let $A\in\mat{n\times n}{\BBR}$  be a skew-symmetric matrix. Prove
that ${\bf I}_n + A$ is invertible. Furthermore, if $B = ({\bf I}_n
- A)({\bf I}_n + A)^{-1}$, prove that $B^{-1} = B^T$.
\end{pro}
\begin{pro}
A matrix $A\in\mat{n\times n}{ \BBF }$ is said to be a {\em magic
square} if the sum of each individual row equals the sum of each
individual column. Assume that $A$ is a magic square and invertible.
Prove that $A^{-1}$ is also a magic square.
\end{pro}

\end{multicols}
\section{Block Matrices}

\begin{df}
Let $A\in\mat{m\times n}{ \BBF }$, $B\in\mat{m\times s}{ \BBF }$,
$C\in\mat{r\times n}{ \BBF }$, $D\in\mat{r\times s}{ \BBF }$. We use
the notation
$$L = \begin{bmat}{c|c}A & B \cr\hline C & D  \end{bmat} $$for the {\em block matrix}
$L\in\mat{(m + r)\times (n + s)}{ \BBF }$.
\end{df}
\begin{rem}
If $(A,A')\in (\mat{m}{ \BBF })^2$, $(B,B')\in (\mat{m\times n}{
\BBF })^2$, $(C,C')\in (\mat{n\times m}{ \BBF })^2$,  $(D,D')\in
(\mat{m}{ \BBF })^2$, and
$$S = \begin{bmat}{c|c}A & B \cr\hline C
& D
\end{bmat}, \ \ \ T = \begin{bmat}{c|c}A' & B' \cr\hline C'
& D'
\end{bmat},
$$then it is easy to verify that
$$ ST = \begin{bmat}{c|c}AA' + BC' & AB' + BD' \cr\hline CA' + DC'
& CB' + DD'
\end{bmat}. $$
\end{rem}

\begin{lem}\label{lem:inverse_block_matrices}
Let $L\in\mat{(m + r)\times (m + r)}{ \BBF }$ be the square block
matrix $$L  = \begin{bmat}{c|c}A & C \cr\hline {\bf 0}_{r\times m} &
B
\end{bmat},
$$with square matrices $A\in\mat{m}{ \BBF }$ and
$B\in\mat{r\times r}{ \BBF }$, and a matrix $C\in\mat{m\times r}{
\BBF }$. Then $L$ is invertible if and only if $A$ and $B$ are, in
which case
$$L^{-1} =    \begin{bmat}{c|c}A^{-1} & -A^{-1}CB^{-1} \cr\hline {\bf 0}_{r\times m} &
B^{-1}
\end{bmat}  $$
\end{lem}
\begin{pf}
Assume first that $A$, and $B$ are invertible. Direct calculation
yields
$$\begin{array}{lll}\begin{bmat}{c|c}A & C \cr\hline {\bf 0}_{r\times m} & B
\end{bmat}\begin{bmat}{c|c}A^{-1} & -A^{-1}CB^{-1} \cr\hline {\bf 0}_{r\times m} &
B^{-1}
\end{bmat}  & = & \begin{bmat}{c|c}AA^{-1} & -AA^{-1}CB^{-1} + CB^{-1} \cr\hline {\bf 0}_{r\times m} &
BB^{-1} \cr
\end{bmat} \vspace{2mm} \\
& = &  \begin{bmat}{c|c}{\bf I}_m & {\bf 0}_{m\times r} \cr\hline
{\bf 0}_{r\times m} & {\bf I}_{r} \cr
\end{bmat}\\
& = & {\bf I}_{m + r}.  \end{array}  $$ Assume now that $L$ is
invertible, $L^{-1} =\dis{\begin{bmat}{c|c}E & H \cr\hline J& K
\end{bmat}} $, with  $E\in\mat{m}{ \BBF }$ and
$K\in\mat{r\times r}{ \BBF }$, but that, say,  $B$ is singular. Then
$$\begin{array}{lll}
\begin{bmat}{c|c}{\bf I}_m & {\bf 0}_{m\times r} \cr\hline {\bf
0}_{r\times m} & {\bf I}_{r}
\end{bmat} & = & LL^{-1}
\\
& = & \begin{bmat}{c|c}A & C \cr\hline {\bf 0}_{r\times m} & B
\end{bmat}\begin{bmat}{c|c}E & H \cr\hline J& K
\end{bmat} \vspace{2mm} \\
& = & \begin{bmat}{c|c}AE + CJ& AH + BK \cr\hline BJ& BK
\end{bmat},
\end{array}  $$which gives $BK = {\bf I}_r$, i.e., $B$ is
invertible, a contradiction.
\end{pf}





\section{Rank of a Matrix}
\begin{df}
Let $(A, B)\in (\mat{m\times n}{ \BBF })^2$. We say that $A$ is {\em
row-equivalent} to $B$ if there exists a matrix $R\in\gl{m}{ \BBF }$
such that $B = RA$. Similarly, we say that $A$ is {\em
column-equivalent} to $B$ if there exists a matrix $C\in\gl{m}{ \BBF
}$ such that $B = AC$. We say that $A$ and $B$ are {\em equivalent}
if $\exists (P,Q)\in\gl{m}{ \BBF }\times \gl{n}{ \BBF }$ such that
$B = PAQ$.
\end{df}
\begin{thm}
Row equivalence, column equivalence, and equivalence are
equivalence relations.
\end{thm}
\begin{pf}
We prove the result for row equivalence. The result for column
equivalence, and equivalence are analogously proved.

\bigskip
Since ${\bf I}_m\in\gl{m}{ \BBF }$ and $A = {\bf I}_mA$, row
equivalence is a reflexive relation. Assume $(A, B)\in (\mat{m\times
n}{ \BBF })^2$ and that $\exists P\in\gl{m}{ \BBF }$ such that $B =
PA$. Then $A = P^{-1}B$ and since $P^{-1}\in\gl{m}{ \BBF }$, we see
that row equivalence is a symmetric relation. Finally assume $(A, B,
C)\in (\mat{m\times n}{ \BBF })^3$ and that $\exists P\in\gl{m}{
\BBF }, \ \exists P'\in\gl{m}{ \BBF }$ such that $A = PB, B = P'C$.
Then $A = PP'C$. But $PP'\in\gl{m}{ \BBF }$ in view of Corollary
\ref{cor:inverse_of_product_of_matrices}. This completes the proof.
\end{pf}
\begin{thm}\label{thm:normal_form}\index{Hermite normal form}
Let $A\in\mat{m\times n}{ \BBF }$. Then $A$ can be reduced, by means
of pre-multiplication and post-multiplication by elimination
matrices, to a  unique matrix of the form
\begin{equation}D_{m,n,r} =
\begin{bmat}{c|c}{\bf I}_r & {\bf 0}_{r\times (n - r)} \cr\hline
{\bf 0}_{(m - r) \times r} & {\bf 0}_{(m - r)\times (n - r)} \cr
\end{bmat},\end{equation}called the  {\em Hermite normal form} of $A$. Thus there exist $P\in \gl{m}{ \BBF }, Q\in
\gl{n}{ \BBF }$ such that $D_{m,n,r} = PAQ$. The integer $r\geq 0$
is called the {\em rank} of the matrix $A$ which we denote by
$\rank{A}$. \index{rank}
\end{thm}
\begin{pf}
If $A$ is the $m\times n$ zero matrix, then the theorem is
obvious, taking $r = 0$. Assume hence that $A$ is not the zero
matrix. We proceed as follows using the {\em Gau\ss-Jordan
Algorithm}.
\begin{enumerate}
\item[{\bf GJ-1}] Since $A$ is a non-zero matrix,  it has a
non-zero column. By means of permutation matrices we move this
column to the first column. \item[{\bf GJ-2}] Since this column is a
non-zero column, it must have an entry  $a \neq 0_{\BBF }$. Again,
by means of permutation matrices, we move the row on which this
entry is to the first row. \item[{\bf GJ-3}] By means of a
dilatation matrix with scale factor $a^{-1}$, we make this new
$(1,1)$ entry into a $1_{\BBF }$. \item[{\bf GJ-4}] By means of
transvections (adding various multiples of row $1$ to the other
rows) we now annihilate every entry below the entry $(1,1)$.
\end{enumerate}
This process ends up in a matrix of the form
\begin{equation}P_1AQ_1 = \begin{bmat}{c|cccc} 1_{\BBF } &
* & * & \cdots & * \cr\hline 0_{\BBF } &  b_{22} & b_{23} & \cdots & b_{2n}
\cr 0_{\BBF } &  b_{32} & b_{33} & \cdots & b_{3n}   \cr 0_{\BBF } &
\vdots & \vdots & \cdots & \vdots \cr 0_{\BBF } &  b_{m2} & b_{m3} &
\cdots & b_{mn}   \cr
\end{bmat}.   \end{equation}
Here the asterisks represent unknown entries. Observe that the
$b$'s form a $(m - 1)\times (n - 1)$ matrix.
\begin{enumerate}
\item[{\bf GJ-5}] Apply {\bf GJ-1} through {\bf GJ-4} to the
matrix of the $b$'s.
\end{enumerate}
Observe that this results in a matrix of the form
\begin{equation}P_2AQ_2 =\begin{bmat}{cc|ccc} 1_{\BBF } & * & * & \cdots & *
\cr  0_{\BBF } &  1_{\BBF } & * & \cdots & *   \cr\hline 0_{\BBF } &
0_{\BBF } & c_{33} & \cdots & c_{3n}   \cr 0_{\BBF } &  \vdots &
\vdots & \cdots & \vdots \cr 0_{\BBF } & 0_{\BBF } & c_{m3} & \cdots
& c_{mn} \cr
\end{bmat}.   \end{equation}
\begin{enumerate}
\item[{\bf GJ-6}] Add the appropriate multiple of column 1 to
column 2, that is, apply a transvection, in order to make the entry
in the $(1, 2)$ position $0_{\BBF }$.
\end{enumerate}
This now gives a matrix of the form \begin{equation}P_3AQ_3
=\begin{bmat}{cc|ccc} 1_{\BBF } & 0_{\BBF } & * & \cdots & * \cr
0_{\BBF } & 1_{\BBF } &
* & \cdots & *   \cr\hline 0_{\BBF } &  0_{\BBF } & c_{33} & \cdots & c_{3n}   \cr
0_{\BBF } & \vdots & \vdots & \cdots & \vdots \cr 0_{\BBF } &
0_{\BBF } & c_{m3} & \cdots & c_{mn}   \cr
\end{bmat}.   \end{equation}
The matrix of the $c$'s has size $(m - 2)\times (n - 2)$.
\begin{enumerate}

\item[{\bf GJ-7}] Apply  {\bf GJ-1} through {\bf GJ-6} to the
matrix of the $c$'s, etc.
\end{enumerate}
Observe that this process eventually stops, and in fact, it is
clear that $\rank{A} \leq \min (m, n)$.

\bigskip

Suppose now that $A$ were equivalent to a matrix $D_{m,n,s}$ with
$s>r$. Since matrix equivalence is an equivalence relation,
$D_{m,n,s}$ and $D_{m,n,r}$ would be equivalent, and so there would
be $R\in\gl{m}{ \BBF }$, $S\in\gl{n}{ \BBF }$, such that
$RD_{m,n,r}S = D_{m,n,s}$, that is, $RD_{m,n,r} = D_{m,n,s}S^{-1}$.
Partition $R$ and $S^{-1}$ as follows
$$  R = \begin{bmat}{c|c} R_{11} & R_{12} \cr\hline
R_{21} & R_{22} \end{bmat}, \ \ \ S^{-1} = \begin{bmat}{c|cc}
S_{11} & S_{12} & S_{13} \cr \hline S_{21} & S_{22} & S_{23} \cr
S_{31} & S_{32} & S_{33} \cr
\end{bmat},
$$with $(R_{11},S_{11})^2\in(\mat{r\times r}{ \BBF })^2, S_{22}\in\mat{(s - r)\times (s - r)}{ \BBF }$.
We have $$RD_{m,n,r} =  \begin{bmat}{c|c} R_{11} & R_{12}
\cr\hline R_{21} & R_{22} \end{bmat}\begin{bmat}{c|c} {\bf I}_r &
{\bf 0}_{(m - r)\times r} \cr\hline {\bf 0}_{(m - r)\times r} &
{\bf 0}_{r\times (m - r)} \cr\end{bmat}= \begin{bmat}{c|c} R_{11}
& {\bf 0}_{(m - r)\times r} \cr\hline R_{21} & {\bf 0}_{r\times (m
- r)}
\end{bmat}, $$
and $$\begin{array}{lll}D_{m,n,s}S^{-1} &  = &  \begin{bmat}{c|cc}
{\bf I}_r & {\bf 0}_{ r\times (s - r) } & {\bf 0}_{ r \times (n -
s)} \cr\hline {\bf 0}_{(s - r) \times r} & {\bf I}_{s - r} & {\bf
0}_{(s - r) \times (n - s)} \cr {\bf 0}_{(m - s) \times r} & {\bf
0}_{(m - s) \times (s - r)} & {\bf 0}_{(m - s) \times (n - s)}
\cr\end{bmat}\begin{bmat}{c|cc} S_{11} & S_{12} & S_{13} \cr
\hline S_{21} & S_{22} & S_{23} \cr S_{31} & S_{32} & S_{33} \cr
\end{bmat}\vspace{2mm} \\
& = &  \begin{bmat}{c|cc} S_{11} & S_{12} & S_{13} \cr\hline
S_{21} & S_{22} & S_{23} \cr {\bf 0}_{(m - s) \times r} & {\bf
0}_{(m - s) \times (s - r)} & {\bf 0}_{(m - s) \times (n - s)}
\cr\end{bmat} .\end{array}$$ Since we are assuming
$$    \begin{bmat}{c|c} R_{11}
& {\bf 0}_{(m - r)\times r} \cr\hline R_{21} & {\bf 0}_{r\times (m
- r)}
\end{bmat} =      \begin{bmat}{c|cc} S_{11} & S_{12} & S_{13} \cr\hline
S_{21} & S_{22} & S_{23} \cr {\bf 0}_{(m - s) \times r} & {\bf
0}_{(m - s) \times (s - r)} & {\bf 0}_{(m - s) \times (n - s)}
\cr\end{bmat}, $$we must have $S_{12} = {\bf 0}_{r \times (s -
r)}$, $S_{13} = {\bf 0}_{r \times (n - s)}$, $S_{22} = {\bf 0}_{(s
- r) \times (s - r)}$, $S_{23} = {\bf 0}_{(s - r)\times (n - s)}$.
Hence
$$S^{-1}  = \begin{bmat}{c|cc} S_{11} & {\bf 0}_{r \times (s -
r)}& {\bf 0}_{r \times (n - s)} \cr\hline S_{21} & {\bf 0}_{(s -
r) \times (s - r)} & {\bf 0}_{(s - r)\times (n - s)} \cr S_{31} &
S_{32} & S_{33} \cr\end{bmat}.
$$The matrix $$ \begin{bmat}{c|cc} {\bf 0}_{(s -
r) \times (s - r)} & {\bf 0}_{(s - r)\times (n - s)} \cr\hline
S_{32} & S_{33} \cr\end{bmat} $$ is non-invertible, by virtue of
Lemma \ref{lem:inverse_and_zero_row}. This entails that $S^{-1}$
is non-invertible by virtue of Lemma
\ref{lem:inverse_block_matrices}. This is a contradiction, since
$S$ is assumed invertible, and hence $S^{-1}$ must also be
invertible.
\end{pf}
\begin{rem}
Albeit the rank of a matrix is unique, the matrices $P$ and $Q$
appearing in Theorem \ref{thm:normal_form} are not necessarily
unique. For example, the matrix $$\begin{bmatrix} 1 & 0 \cr 0 & 1
\cr 0 & 0 \end{bmatrix}  $$ has rank $2$, the matrix
$$\begin{bmatrix} 1 & 0 & x \cr 0 & 1 & y \cr 0 & 0 & 1
\cr\end{bmatrix}$$is invertible, and an easy computation shews
that
$$\begin{bmatrix}
1 & 0 & x \cr 0 & 1 & y \cr 0 & 0 & 1
\cr\end{bmatrix}\begin{bmatrix} 1 & 0 \cr 0 & 1 \cr 0 & 0
\end{bmatrix} \begin{bmatrix} 1 & 0 \cr 0 & 1 \cr
\end{bmatrix} = \begin{bmatrix} 1 & 0 \cr 0 & 1 \cr 0 & 0
\cr\end{bmatrix},
$$regardless of the values of $x$ and $y$.
\end{rem}
\begin{cor}\label{cor:row_rank_is_column_rank}
Let $A\in\mat{m\times n}{ \BBF }$. Then $\rank{A} = \rank{A^T}$.
\end{cor}
\begin{pf}
Let $P,Q,D_{m,n,r}$ as in Theorem \ref{thm:normal_form}. Observe
that $P^T, Q^T$ are invertible. Then
$$PAQ = D_{m,n,r}  \implies Q^TA^TP^T = D_{m,n,r}^T = D_{n,m,r},
$$and since this last matrix has the same number of $1_{\BBF }$'s as
$D_{m,n,r}$, the corollary is proven.
\end{pf}

\begin{exa}
Shew that
$$A = \begin{bmatrix} 0 & 2 & 3 \cr 0 & 1 & 0 \cr   \end{bmatrix}   $$
has $\rank{A} = 2$ and find invertible matrices $P\in\gl{2}{\BBR}$
and $Q\in\gl{3}{\BBR}$ such that
$$PAQ = \begin{bmatrix} 1 & 0 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix}.   $$


\end{exa}
\begin{solu}We first transpose the first and third columns by
effecting
$$\begin{bmatrix} 0 & 2 & 3 \cr 0 & 1 & 0 \cr
\end{bmatrix}\begin{bmatrix}0 & 0 & 1 \cr  0 & 1 & 0 \cr 1 & 0 & 0 \cr  \end{bmatrix}
= \begin{bmatrix} 3 & 2 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix}.
$$We now subtract twice the second row from the first, by
effecting
$$  \begin{bmatrix} 1 & -2 \cr 0 & 1 \cr   \end{bmatrix}\begin{bmatrix} 3 & 2 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix} =
\begin{bmatrix} 3 & 0 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix}.   $$
Finally, we divide the first row by $3$,
$$ \begin{bmatrix} 1/3 & 0 \cr 0 & 1 \cr   \end{bmatrix}\begin{bmatrix} 3 & 0 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix}
= \begin{bmatrix} 1 & 0 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix}.$$
We conclude that
$$  \begin{bmatrix} 1/3 & 0 \cr 0 & 1 \cr   \end{bmatrix}\begin{bmatrix} 1 & -2 \cr 0 & 1 \cr   \end{bmatrix}\begin{bmatrix} 0 & 2 & 3 \cr 0 & 1 & 0 \cr
\end{bmatrix}\begin{bmatrix}0 & 0 & 1 \cr  0 & 1 & 0 \cr 1 & 0 & 0 \cr  \end{bmatrix} = \begin{bmatrix} 1 & 0 & 0 \cr 0 & 1 & 0 \cr   \end{bmatrix}, $$
from where we may take $$P =  \begin{bmatrix} 1/3 & 0 \cr 0 & 1
\cr   \end{bmatrix}\begin{bmatrix} 1 & -2 \cr 0 & 1 \cr
\end{bmatrix} = \begin{bmatrix} 1/3 & -2/3 \cr 0 & 1 \cr   \end{bmatrix}  $$
and $$Q = \begin{bmatrix}0 & 0 & 1 \cr  0 & 1 & 0 \cr 1 & 0 & 0
\cr  \end{bmatrix}.  $$
\end{solu}

In practice it is easier to do away with the multiplication by
elimination matrices and perform row and column operations on the
{\em augmented $(m + n)\times (m + n)$  matrix}
$$\begin{bmat}{c|c} {\bf I}_n & {\bf 0}_{n\times m} \cr\hline A & {\bf I}_m \cr      \end{bmat}.   $$

\begin{df}
Denote the rows of
 a matrix $A\in
\mat{m\times n}{ \BBF }$ by $R_1, R_2, \ldots , R_m$, and its
columns by $C_1, C_2, \ldots , C_n$. The elimination operations will
be denoted as follows.
\begin{itemize}
\item Exchanging the $i$-th row with the $j$-th row, which we
denote by $R_i \leftrightarrow R_j$, and the $s$-th column by the
$t$-th column by $C_s \leftrightarrow C_t$. \item A dilatation of
the $i$-th row by a non-zero scalar $\alpha\in \BBF \setminus
\{0_{\BBF }\}$, we will denote by $\alpha R_i \rightarrow R_i $.
Similarly, $\beta C_j \rightarrow C_j $ denotes the dilatation of
the $j$-th column by the non-zero scalar $\beta$.
 \item A transvection on the rows will be denoted by $R_i + \alpha R_j
 \rightarrow R_i$, and one on the columns by $C_s + \beta C_t
 \rightarrow C_s$.
\end{itemize}
\end{df}
\begin{exa}
Find the Hermite normal form of $$A = \begin{bmatrix}-1 & 0 \cr 0
& 0 \cr 1 & 1 \cr 1 & 2 \cr
\end{bmatrix}.
$$

\end{exa}
\begin{solu}First observe that $\rank{A} \leq \min (4,2) = 2$, so
the rank can be either $1$ or $2$ (why not $0$?). Form the
augmented matrix
$$\begin{bmat}{cc|cccc}1 & 0 & 0 & 0 & 0 & 0  \cr
0 & 1 & 0 & 0 & 0 & 0  \cr \hline
 -1 & 0 & 1 & 0 & 0 & 0 \cr 0 & 0  & 0 & 1 & 0 & 0 \cr
1 & 1 & 0 & 0 & 1 & 0 \cr 1 & 2  & 0 & 0 & 0 & 1\cr
\end{bmat}.$$
Perform $R_5 + R_3\rightarrow R_5$ and $R_6 + R_3\rightarrow R_6$
successively, obtaining
$$\begin{bmat}{cc|cccc}1 & 0 & 0 & 0 & 0 & 0  \cr
0 & 1 & 0 & 0 & 0 & 0  \cr \hline  -1 & 0 & 1 & 0 & 0 & 0 \cr
 0 & 0  & 0 & 1 & 0 & 0 \cr
0 & 1 & 1 & 0 & 1 & 0 \cr 0 & 2  & 1 & 0 & 0 & 1\cr
\end{bmat}.$$ Perform $R_6 - 2R_5 \rightarrow R_6$
$$\begin{bmat}{cc|cccc}1 & 0 & 0 & 0 & 0 & 0  \cr
0 & 1 & 0 & 0 & 0 & 0  \cr \hline  -1 & 0 & 1 & 0 & 0 & 0 \cr
 0 & 0  & 0 & 1 & 0 & 0 \cr
0 & 1 & 1 & 0 & 1 & 0 \cr 0 & 0  & -1 & 0 & -2 & 1\cr
\end{bmat}.$$Perform $R_4 \leftrightarrow R_5$
$$\begin{bmat}{cc|cccc}1 & 0 & 0 & 0 & 0 & 0  \cr
0 & 1 & 0 & 0 & 0 & 0  \cr \hline  -1 & 0 & 1 & 0 & 0 & 0 \cr 0 &
1 & 1 & 0 & 1 & 0 \cr 0 & 0  & 0 & 1 & 0 & 0 \cr
 0 & 0  & -1 & 0 & -2 & 1\cr
\end{bmat}.$$Finally, perform $-R_3\rightarrow R_3$
$$\begin{bmat}{cc|cccc}1 & 0 & 0 & 0 & 0 & 0  \cr
0 & 1 & 0 & 0 & 0 & 0  \cr \hline  1 & 0 & -1 & 0 & 0 & 0 \cr 0 &
1 & 1 & 0 & 1 & 0 \cr 0 & 0  & 0 & 1 & 0 & 0 \cr
 0 & 0  & -1 & 0 & -2 & 1\cr
\end{bmat}.$$ We conclude that
$$ \begin{bmatrix}-1 & 0  & 0 & 0 \cr
1 & 0&  1& 0 \cr 0 & 1 & 0 & 0 \cr -1 & 0 &  -2 & 1 \cr
\end{bmatrix} \begin{bmatrix}-1 & 0 \cr 0 & 0 \cr
1 & 1 \cr 1 & 2 \cr
\end{bmatrix}\begin{bmatrix}1 & 0 \cr 0 & 1\cr\end{bmatrix} = \begin{bmatrix}1 & 0 \cr 0 & 1 \cr 0 & 0 \cr 0 &
0 \cr
\end{bmatrix}. $$
\end{solu}
\begin{thm}\label{thm:rank_of_product}
Let $A\in\mat{m\times n}{ \BBF }$, $B\in\mat{n\times p}{ \BBF }$.
Then
$$\rank{AB} \leq \min (\rank{A}, \rank{B}).
$$
\end{thm}
\begin{pf}
We prove that $\rank{A} \geq \rank{AB}$. The proof that $\rank{B}
\geq \rank{AB}$ is similar and left to the reader.  Put $r =
\rank{A}, s = \rank{AB}$. There exist matrices $P\in\gl{m}{ \BBF }$,
$Q\in\gl{ n}{ \BBF }$,
 $S\in\gl{m}{ \BBF }$, $T\in\gl{p}{ \BBF }$
such that $$PAQ = D_{m,n,r}, \ \ \ SABT = D_{m,p,s}.
$$
\bigskip
Now $$D_{m,p,s} = SABT = SP^{-1}D_{m,n,r}Q^{-1}BT,
$$from where it follows that

$$ PS^{-1}D_{m,p,s} = D_{m,n,r}Q^{-1}BT.$$Now the proof is
analogous to the uniqueness proof of Theorem \ref{thm:normal_form}.
Put $U = PS^{-1}\in\gl{m}{\BBR}$ and $V = Q^{-1}BT \in \mat{n\times
p}{ \BBF }$, and partition $U$ and $V$ as follows:
$$  U = \begin{bmat}{c|c} U_{11} & U_{12} \cr\hline
U_{21} & U_{22} \end{bmat}, \ \ \ V = \begin{bmat}{c|c} V_{11} &
V_{12} \cr\hline V_{21} & V_{22} \end{bmat},
$$with $U_{11}\in\mat{s}{ \BBF }, V_{11}\in\mat{r\times r}{ \BBF }$.
Then
$$UD_{m,p,s} =  \begin{bmat}{c|c} U_{11} & U_{12} \cr\hline
U_{21} & U_{22} \end{bmat}\begin{bmat}{c|c} {\bf I}_s & {\bf
0}_{s\times (p - s)}\cr\hline {\bf 0}_{(m - s)\times s} & {\bf
0}_{(m - s)\times (p - s)}
\end{bmat}\in\mat{m\times p}{ \BBF },     $$
and
$$D_{m,p,s}V =  \begin{bmat}{c|c} {\bf I}_r & {\bf
0}_{r\times (n - r)}\cr\hline {\bf 0}_{(m - r)\times r} & {\bf
0}_{(m - r)\times (n - r)}
\end{bmat}\begin{bmat}{c|c} V_{11} & V_{12} \cr\hline
V_{21} & V_{22} \end{bmat} \in\mat{m\times p}{ \BBF }.     $$ From
the equality of these two $m\times p$ matrices,  it follows that
$$\begin{bmat}{c|c} U_{11} & {\bf
0}_{s\times (p - s)}\cr\hline U_{21}& {\bf 0}_{(m - s)\times (p -
s)}
\end{bmat}  =   \begin{bmat}{c|c} V_{11} &  V_{12}\cr\hline {\bf 0}_{(m - r)\times r} & {\bf 0}_{(m - r)\times (n -
r)}
\end{bmat}.$$If $s > r$ then (i) $U_{11}$ would have at least one
row of $0_{\BBF }$'s meaning that $U_{11}$ is non-invertible by
Lemma \ref{lem:inverse_and_zero_row}. (ii) $U_{21} = {\bf 0}_{(m -
s)\times s}$. Thus from (i) and (ii) and from Lemma
\ref{lem:inverse_block_matrices}, $U$ is not invertible, which is a
contradiction.
\end{pf}
\begin{cor}
Let $A\in\mat{m\times n}{ \BBF }$, $B\in\mat{n\times p}{ \BBF }$. If
$A$ is invertible then $\rank{AB} = \rank{B}$. If $B$ is invertible
then $\rank{AB} = \rank{A}$.
\end{cor}
\begin{pf}
Using Theorem \ref{thm:rank_of_product}, if $A$  is invertible
$$\rank{AB } \leq \rank{B} = \rank{A^{-1}AB} \leq \rank{AB},$$ and
so $\rank{B} = \rank{AB}$. A similar argument works when $B$ is
invertible.

\end{pf}

\begin{exa}
Study the various possibilities for  the rank of the matrix
$$A = \begin{bmatrix} 1 & 1 & 1 \cr
b + c & c + a & a + b \cr bc & ca & ab\end{bmatrix} .$$ \end{exa}
\begin{solu}Performing $R_2 - (b + c)R_1 \rightarrow R_2$ and $R_3 -
bcR_1 \rightarrow R_3$, we find
$$\begin{bmatrix} 1 & 1 & 1 \cr
0 & a - b & a - c \cr 0 & 0 & (b - c)(a - c)\end{bmatrix} .$$
Performing $C_2 - C_1 \rightarrow C_2$ and $C_3 - C_1 \rightarrow
C_3$, we find
$$\begin{bmatrix} 1 & 0 & 0 \cr
0 & a - b & a - c \cr 0 & 0 & (b - c)(a - c)\end{bmatrix} .$$ We
now examine the various ways of getting rows consisting only of
$0$'s. If $a = b = c,$ the last two rows are $0$-rows and so
$\rank{A} = 1$. If exactly two of $a, b, c$ are equal, the last
row is a $0$-row, but the middle one is not, and so $\rank{A} = 2$
in this case. If none of $a, b, c$ are equal, then the rank is
clearly $3$.
\end{solu}



\section*{\psframebox{Homework}}
\begin{pro}
On a symmetric matrix  $A\in\mat{n\times n}{\BBR}$ with $n \geq 3$,
$$R_3 - 3R_1 \rightarrow R_3$$ successively followed by $$C_3 - 3C_1
\rightarrow C_3$$ are performed. Is the resulting matrix still
symmetric?
\end{pro}
\begin{pro}
Find the rank of $$\begin{bmatrix}a+1 & a+2 & a+3 & a+4 & a+5 \cr
a+2 & a+3 & a+4 & a+5 & a+6 \cr a+3 & a+4 & a+5 & a+6 & a+7 \cr a+4
& a+5 & a+6 & a+7 & a+8 \cr
\end{bmatrix} \in\mat{5\times 5}{\BBR}.$$
\begin{answer} The rank is $2$. \end{answer}
\end{pro}

\begin{pro} Let $A, B$ be arbitrary
$n \times n$ matrices over $\BBR$. Prove or disprove! $\rank{AB} =
\rank{BA}.$\begin{answer} If $B$ is invertible, then $\rank{AB} =
\rank{A} = \rank{BA}$. Similarly, if $A$ is invertible $\rank{AB} =
\rank{B} = \rank{BA}$. Now, take $\dis{A = \left[\begin{array}{ll}1
& 0 \\ 0 & 0
\end{array}\right]}$ and $\dis{B = \left[\begin{array}{ll}0 & 1 \\ 0 & 0
\end{array}\right]}$. Then $AB = B,$ and so $\rank{AB} = 1.$ But
$BA = \left[\begin{array}{ll}0 & 0 \\ 0 & 0
\end{array}\right]$, and so $\rank{BA} = 0.$
\end{answer}
\end{pro}
\begin{pro}
Determine the rank of the matrix $ \begin{bmatrix}1 & 1  & 0 & 0 \cr
0 & 0 & 1& 1  \cr  2 & 2 & 2 & 2 \cr 2 & 0 & 0 & 2 \cr
\end{bmatrix}$ .
\begin{answer}
Observe that $$ \begin{bmatrix}1 & 1  & 0 & 0 \cr 0 & 0 & 1& 1  \cr
2 & 2 & 2 & 2 \cr 2 & 0 & 0 & 2 \cr
\end{bmatrix} \grstep[R_4-2R_1\rightarrow R_4]{R_3-2(R_1+R_2)  \rightarrow   R_3} \begin{bmatrix}1 & 1  & 0 & 0 \cr 0 & 0 & 1&
1  \cr  0 & 0 & 0 & 0 \cr 0 & -2 & 0 & 2 \cr
\end{bmatrix},$$whence the matrix has three pivots and so rank $3$.
\end{answer}
\end{pro}

\begin{pro}
Suppose that the matrix  $\begin{bmatrix} 4 & 2 \cr x^2 & x
\end{bmatrix}\in \mat{2\times 2}{\BBR}$ has rank $1$.  How many
possible values can $x$ assume?
\begin{answer}
The maximum rank of this matrix could be $2$. Hence, for the rank to
be $1$, the rows must be proportional, which entails
$$ \dfrac{x^2}{4}=\dfrac{x}{2} \implies x^2-2x=0 \implies x\in \{0,2\}. $$
\end{answer}
\end{pro}
\begin{pro}
Demonstrate that a non-zero $n\times n$ matrix $A$ over a field
$\BBF$ has rank $1$ if and only if $A$ can be factored as $A=XY$,
where $X\in \mat{n\times 1}{\BBF}$ and $Y\in \mat{1\times n}{\BBF}$.

\begin{answer}
Assume first that the   non-zero $n\times n$ matrix $A$ over a field
$\BBF$ has rank $1$. By permuting the rows of the matrix we may
assume that every other row is a scalar multiple of the first row,
which is non-zero since the rank is $1$. Hence $A$ must be of the
form
$$A = \begin{bmatrix} a_1 & a_2 & \cdots & a_n \cr \lambda _1 a_1 & \lambda _1 a_2 & \cdots & \lambda _1 a_n \cr
\vdots & \vdots & \cdots & \vdots \cr
 \lambda _{n-1} a_1 & \lambda _{n-1} a_2 & \cdots & \lambda _{n-1} a_n \cr
\end{bmatrix}  = \colvec{a_1 \\ a_2 \\ \vdots \\ a_n}\colvec{1  &  \lambda _1  &  \cdots & \lambda _{n-1}}:=XY,  $$
which means that the claimed factorisation indeed exists.

\bigskip

Conversely, assume that $A$ can be factored as $A=XY$, where $X\in
\mat{n\times 1}{\BBF}$ and $Y\in \mat{1\times n}{\BBF}$. Since $A$
is non-zero, we must have $\rank{A}\geq 1$. Similarly, neither $X$
nor $Y$ could be all zeroes, because otherwise $A$ would be zero.
This means that $\rank{X}=1=\rank{Y}$. Now, since
$$ \rank{A}\leq \min (\rank{X}, \rank{Y}) = 1, $$we deduce that $\rank{A}\leq 1$, proving that
$\rank{A}=1$.

\end{answer}
\end{pro}

\begin{pro}
Study the various possibilities for the rank of the matrix
$$  \begin{bmatrix}1 & a & 1 & b \cr a & 1 & b & 1 \cr 1 & b & 1 & a \cr b & 1 & a & 1 \cr   \end{bmatrix}   $$
when $(a,b)\in\BBR^2$. \begin{answer} Effecting $R_3 - R_1
\rightarrow R_3$; $aR_4 - bR_2 \rightarrow R_4$ successively, we
obtain
$$  \begin{bmatrix}1 & a & 1 & b \cr a & 1 & b & 1 \cr 1 & b & 1 & a \cr b & 1 & a & 1 \cr   \end{bmatrix}
\rightsquigarrow
\begin{bmatrix}1 & a & 1 & b \cr a & 1 & b & 1 \cr 0 & b - a & 0 & a - b \cr 0 & a - b & a^2 - b^2 & a - b \cr \end{bmatrix} .
$$
Performing $R_2 - aR_1 \rightarrow R_2$; $R_4 + R_3 \rightarrow
R_4$ we have
$$\rightsquigarrow
\begin{bmatrix}1 & a & 1 & b \cr 0 & 1 - a^2 & b - a & 1 - ab \cr 0 & b - a & 0 & a - b \cr 0 & 0 & a^2 - b^2 & 2(a - b) \cr   \end{bmatrix}.
$$
Performing $(1 - a^2)R_3 - (b - a)R_2\rightarrow R_3$ we have
$$\rightsquigarrow
\begin{bmatrix}1 & a & 1 & b \cr 0 & 1 - a^2 & b - a & 1 - ab \cr 0 & 0 &  -a^2 + 2ab - b^2 & 2a-2b-a^3+ab^2 \cr 0 & 0 & a^2 - b^2 & 2(a - b) \cr   \end{bmatrix}.
$$
Performing $R_3 - R_4\rightarrow R_3$ we have
$$\rightsquigarrow
\begin{bmatrix}1 & a & 1 & b \cr 0 & 1 - a^2 & b - a & 1 - ab \cr 0 & 0 &  -2a(a - b) & -a(a^2 - b^2) \cr 0 & 0 & a^2 - b^2 & 2(a - b) \cr   \end{bmatrix}.
$$
Performing $2aR_4 + (a + b)R_3\rightarrow R_4$ we have
$$\begin{bmatrix}1 & a & 1 & b \cr 0 & 1 - a^2 & b - a & 1 - ab \cr 0 & 0 &  -2a(a - b) & -a(a^2 - b^2) \cr 0 & 0 & 0 & 4a^2-4ab-a^4+a^2b^2-ba^3+ab^3 \cr   \end{bmatrix}.
$$
Factorising, this is
$$=
\begin{bmatrix}1 & a & 1 & b \cr 0 & 1 - a^2 & b - a & 1 - ab
\cr 0 & 0 &  -2a(a - b) & -a(a - b)(a + b) \cr 0 & 0 & 0 &
-a(a+2+b)(a-b)(a-2+b) \cr
\end{bmatrix}.$$ Thus the rank is $4$ if $(a+2+b)(a-b)(a-2+b) \neq
0$. The rank is $3$ if $a + b = 2$ and $(a,b) \neq (1,1)$ or if $a
+ b = -2$ and $(a,b) \neq (-1,-1)$. The rank is $2$ if $a = b \neq
1$ and $a \neq -1$. The rank is $1$ if $a = b = \pm 1$.

\end{answer}
\end{pro}
\begin{pro} Find the rank of
$\begin{bmatrix}1 &-1  &  0 &  1  \cr
              m &  1 &-1  &-1   \cr
              1 &-m  &  1 &  0  \cr
              1 &-1  &   m&  2  \cr\end{bmatrix}$ as a function of  $m\in\BBC$.
\begin{answer}$\rank{A}=4$ if $m^3 + m^2 + 2 \neq 0$, and
$\rank{A}=3$ otherwise.
\end{answer}
\end{pro}
\begin{pro}
Determine the rank of the matrix $\begin{bmatrix}a^2 & ab & ab &
b^2 \cr ab & a^2 & b^2 & ab \cr ab & b^2 & a^2 & ab \cr b^2 & ab &
ab & a^2 \cr
\end{bmatrix}.$
\begin{answer} The rank is $4$ if $a\neq \pm b$. The rank is $1$ is $a = \pm b \neq 0$. The rank is $0$ if $a=b=0$. \end{answer}
\end{pro}
\begin{pro}
Determine the rank of the matrix $\begin{bmatrix}1 & 1 & 1 & 1 \cr
a & b & a & b \cr c & c & d & d \cr ac & bc & ad & bd \cr
\end{bmatrix}.$
\begin{answer} The rank is $4$ if $(a-b)(c-d)\neq 0$. The rank is $2$ is $a = b, c \neq d$ or if $a \neq b, c = d$. The rank is $1$ if $a=b$ and $c=d$. \end{answer}
\end{pro}
\begin{pro}
Let $A\in\mat{3\times 2}{\BBR}$, $B\in \mat{2\times 2}{\BBR}$, and
$C\in\mat{2\times 3}{\BBR}$ be such that $ABC = \begin{bmatrix}
1&1&2 \cr -2&x&1 \cr 1&-2&1 \cr\end{bmatrix}$. Find $x$.
\begin{answer}Observe that $\rank{ABC}\leq \rank{B}\leq 2$. Now,
$$\begin{bmatrix}
1&1&2 \cr -2&x&1 \cr 1&-2&1 \cr\end{bmatrix}
\grstep[R_3-R_1\rightarrow R_3]{R_2+2R_1\to R_2} \begin{bmatrix}
1&1&2 \cr 0&x+2&5 \cr 0&-3&-1 \cr\end{bmatrix},
$$has rank at least $2$, since the first and third rows are not
proportional. This means that it must have rank exactly two, and the
last two rows must be proportional. Hence
$$ \dfrac{x+2}{-3}= \dfrac{5}{-1} \implies x = 13. $$

\end{answer}
\end{pro}



\begin{pro}
Let $B$ be the matrix obtained by adjoining a row (or column) to a
matrix $A$. Prove that either $\rank{B} = \rank{A}$ or $\rank{B} =
\rank{A}+1$.
\end{pro}
\begin{pro}
Let $A\in\mat{n\times n}{\BBR}$. Prove that $\rank{A} =
\rank{AA^T}$. Find a counterexample in the case $A\in\mat{n\times
n}{\BBC}$.
\begin{answer} For the counterexample consider $A = \begin{bmatrix}1& i \cr i & -1 \cr \end{bmatrix}$. \end{answer}
\end{pro}

\begin{pro}
Prove that the rank of a skew-symmetric matrix with real number
entries is an even number.
\end{pro}


\section{Rank and Invertibility}

\begin{thm}\label{thm:rank_and_invertibility} A matrix $A\in\mat{m\times n}{ \BBF }$ is left-invertible
if and only if $\rank{A} = n$. A matrix $A\in\mat{m\times n}{ \BBF
}$ is right-invertible if and only if $\rank{A} = m$.
\end{thm}
\begin{pf}
Observe that we always have $\rank{A} \leq n$.  If $A$ is left
invertible, then $\exists L\in\mat{n\times m}{ \BBF }$ such that $LA
= {\bf I}_n$. By Theorem \ref{thm:rank_of_product}, $$ n =
\rank{{\bf I}_n} = \rank{LA} \leq \rank{A},
$$whence the two inequalities give $\rank{A} = n$.

\bigskip

Conversely, assume that $\rank{A} = n$. Then  $\rank{A^T} = n$ by
Corollary \ref{cor:row_rank_is_column_rank}, and so by Theorem
\ref{thm:normal_form} there exist $P\in\gl{m}{ \BBF }$,
 $Q\in\gl{n}{ \BBF }$,  such that
$$PAQ = \begin{bmatrix}{\bf I}_n  \cr
{\bf 0}_{(m - n)\times n} \cr
\end{bmatrix}, \ \ \ Q^TA^TP^T = \begin{bmatrix}{\bf I}_n & {\bf 0}_{n\times (m - n)} \cr
\end{bmatrix}.           $$
This gives
$$\begin{array}{lll}Q^TA^TP^TPAQ   =  {\bf I}_n &  \implies & A^TP^TPA = (Q^T)^{-1}Q^{-1} \\   & \implies
& ((Q^T)^{-1}Q^{-1})^{-1}A^TP^TPA  = {\bf I}_n,\end{array}$$and so
$((Q^T)^{-1}Q^{-1})^{-1}A^TP^TP$ is a left inverse for $A$.

\bigskip

The right-invertibility case is argued similarly.  \end{pf}

By combining Theorem \ref{thm:rank_and_invertibility} and Theorem
\ref{thm:inverse_square_matrices}, the following corollary is thus
immediate.
\begin{cor}\label{cor:left_inverse_is_right}
If $A\in\mat{m\times n}{ \BBF }$ possesses a left inverse $L$ and a
right inverse $R$ then $m = n$ and $L=R$.\end{cor}



We use Gau\ss-Jordan Reduction to find the inverse of $A\in\gl{n}{
\BBF }$. We form the {\em augmented matrix} $ T = [A|{\bf I}_n]$
which is obtained by putting  $A$ side by side  with the identity
matrix ${\bf I}_n$. We perform permissible row operations on $T$
until instead of $A$ we obtain ${\bf I}_n$, which will appear if the
matrix is invertible. The matrix on the right will be $A^{-1}$. We
finish with $[{\bf I}_n|A^{-1}]$.
\begin{rem} If $A \in\mat{n\times n}{\BBR}$ is non-invertible, then the
left hand side in the procedure above will not reduce to ${\bf
I}_n$.
\end{rem}
\begin{exa}
Find the inverse of the matrix $B\in\mat{3\times 3}{\BBZ_7}$,

$$B = \begin{bmatrix}
\overline{6} & \overline{0}& \overline{1} \cr \overline{3} &
\overline{2} & \overline{0} \cr \overline{1} & \overline{0} &
\overline{1}\cr
\end{bmatrix}.$$
\end{exa}
\begin{solu}We have
\begin{eqnarray*}
\begin{bmat}{ccc|ccc}
\overline{6} & \overline{0}& \overline{1} & \overline{1} &
\overline{0} & \overline{0} \cr \overline{3} & \overline{2} &
\overline{0} & \overline{0} & \overline{1} & \overline{0} \cr
\overline{1} & \overline{0} & \overline{1} & \overline{0} &
\overline{0} & \overline{1}\cr
\end{bmat} & \grstep{R_1 \leftrightarrow R_3} &
\begin{bmat}{ccc|ccc}
\overline{1} & \overline{0} & \overline{1} & \overline{0} &
\overline{0} & \overline{1}\cr \overline{3} & \overline{2} &
\overline{0} & \overline{0} & \overline{1} & \overline{0} \cr
\overline{6} & \overline{0}& \overline{1} & \overline{1} &
\overline{0} & \overline{0} \cr
\end{bmat} \\
& \grstep[R_2 - \overline{3}R_1 \rightarrow R_2]{R_3 -
\overline{6}R_1 \rightarrow R_3} &
\begin{bmat}{ccc|ccc}
\overline{1} & \overline{0} & \overline{1} & \overline{0} &
\overline{0} & \overline{1}\cr \overline{0} & \overline{2} &
\overline{4} & \overline{0} & \overline{1} & \overline{4} \cr
\overline{0} & \overline{0}& \overline{2} & \overline{1} &
\overline{0} & \overline{1} \cr
\end{bmat} \\
& \grstep[5R_1 + R_3 \rightarrow R_1]{R_2 - \overline{2}R_3
\rightarrow R_2} &
\begin{bmat}{ccc|ccc}
\overline{5} & \overline{0} & \overline{0} & \overline{1} &
\overline{0} & \overline{6}\cr \overline{0} & \overline{2} &
\overline{0} & \overline{5} & \overline{1} & \overline{2} \cr
\overline{0} & \overline{0}& \overline{2} & \overline{1} &
\overline{0} & \overline{1} \cr
\end{bmat} \\
& \grstep[\overline{4}R_2 \rightarrow R_2]{\overline{3}R_1
\rightarrow R_1;\ \overline{4}R_3 \rightarrow R_3} &
\begin{bmat}{ccc|ccc}
\overline{1} & \overline{0} & \overline{0} & \overline{3} &
\overline{0} & \overline{4}\cr \overline{0} & \overline{1} &
\overline{0} & \overline{6} & \overline{4} & \overline{1} \cr
\overline{0} & \overline{0}& \overline{1} & \overline{4} &
\overline{0} & \overline{4} \cr
\end{bmat}. \\
\end{eqnarray*}
We conclude that
$$\begin{bmatrix}
\overline{6} & \overline{0}& \overline{1} \cr \overline{3} &
\overline{2} & \overline{0} \cr \overline{1} & \overline{0} &
\overline{1}\cr
\end{bmatrix}^{-1} = \begin{bmatrix}
\overline{3} & \overline{0}& \overline{4} \cr \overline{6} &
\overline{4} & \overline{1} \cr \overline{4} & \overline{0} &
\overline{4}\cr
\end{bmatrix}.$$
\end{solu}






%%%%%I use Jim Hefferon's macros for Gauss-Jordan Reduction
\begin{exa}
Use Gau\ss-Jordan reduction to find the inverse of the matrix $A =
\left[\begin{array}{lll} 0  & 1 & -1 \\ \vspace{1mm} 4  & -3 & 4
\\ \vspace{1mm}
 3 & -3 & 4 \\
\end{array}\right] .$ Also, find $A^{2001}$.
\end{exa}
\begin{solu}Operating on the augmented matrix
\begin{eqnarray*}
\begin{bmat}{ccc|ccc} 0  & 1 & -1 & 1 & 0 & 0 \\ 4  & -3 & 4 & 0 & 1 & 0 \\ 3 & -3
& 4 & 0 & 0 & 1\\ \end{bmat} &\grstep{R_2 - R_3  \rightarrow R_2}
&\begin{bmat}{ccc|ccc} 0 & 1 & -1 & 1 & 0 & 0\\ 1  & 0 & 0 & 0 & 1
& -1\\ 3 & -3 & 4 & 0 & 0 & 1\\ \end{bmat}\\
&\grstep{R_3 - 3R_2\rightarrow R_3} &\begin{bmat}{ccc|ccc} 0 & 1 &
-1 & 1 & 0 & 0\\ 1 & 0 & 0 & 0 & 1 &
-1\\
0 & -3 & 4 & 0 & -3 & 4\\ \end{bmat} \\
&\grstep{R_3 + 3R_1\rightarrow R_3}
&\begin{bmat}{ccc|ccc} 0  & 1 & -1 & 1 & 0 & 0\\
1 & 0 & 0 & 0 & 1 & -1\\ 0 & 0 & 1 & 3 & -3 & 4\\ \end{bmat} \\
&\grstep{R_1 + R_3 \rightarrow R_1} &\begin{bmat}{ccc|ccc} 0 & 1 &
0 & 4 & -3
& 4\\ 1  & 0 & 0 & 0 & 1 & -1\\ 0 & 0 & 1 & 3 & -3 & 4\\
\end{bmat} \\
 &\grstep{R_1 \leftrightarrow R_2} &\begin{bmat}{ccc|ccc} 1  & 0 &
0 & 0 & 1 & -1\\ 0  & 1 & 0 & 4 & -3 & 4\\ 0 & 0 & 1 & 3 & -3 &
4\\ \end{bmat}.
\end{eqnarray*} Thus we deduce that
$$A^{-1} =
\begin{bmatrix}
 0 & 1 & -1\cr
4 & -3 & 4\cr 3 & -3 & 4\cr \end{bmatrix} = A.
$$From $A^{-1} = A$ we deduce $A^2 = {\bf I}_n$. Hence $A^{2000} = (A^2)^{1000} = {\bf I}_n ^{1000} = {\bf I}_n$ and
$A^{2001} = A(A^{2000}) = A{\bf I}_n = A$.
\end{solu}
\begin{exa}
Find the inverse of the triangular matrix $A\in\mat{n\times
n}{\BBR}$,
$$ A = \begin{bmatrix}1 & 1 &  1 & \cdots & 1 \cr 0 & 1 & 1 & \cdots & 1 \cr
 0 & 0 & 1 & \cdots & 1 \cr
\vdots & \vdots & \vdots & \cdots & \vdots \cr 0 & 0 & 0  & \cdots
& 1 \cr \end{bmatrix}.      $$
\end{exa}
\begin{solu}Form the augmented matrix
$$\begin{bmat}{ccccc|ccccc}1 & 1 &  1 & \cdots & 1 & 1 & 0 &  0 & \cdots & 0 \cr
0 & 1 & 1 & \cdots & 1 & 0 & 1 &  0 & \cdots & 0\cr
 0 & 0 & 1 & \cdots & 1 & 0 & 0 &  1 & \cdots & 0\cr
\vdots & \vdots & \vdots & \cdots & \vdots & \vdots & \vdots &
\vdots & \cdots & \vdots \cr 0 & 0 & 0  & \cdots & 1 & 0 & 0 &  0
& \cdots & 1\cr \end{bmat},$$ and perform $R_k - R_{k + 1}
\rightarrow R_k$ successively for $k = 1, 2, \ldots, n - 1$,
obtaining
$$\begin{bmat}{ccccc|ccccc}1 & 0 &  0 & \cdots & 0 & 1 & -1 &  0 & \cdots & 0 \cr
0 & 1 & 0 & \cdots & 0 & 0 & 1 &  -1 & \cdots & 0\cr
 0 & 0 & 1 & \cdots & 0 & 0 & 0 &  1 & \cdots & 0\cr
\vdots & \vdots & \vdots & \cdots & \vdots & \vdots & \vdots &
\vdots & \cdots & \vdots \cr 0 & 0 & 0  & \cdots & 1 & 0 & 0 &  0
& \cdots & 1\cr \end{bmat},$$ whence
$$A^{-1} = \begin{bmatrix} 1 & -1 &  0 & \cdots & 0 \cr 0 & 1 &  -1 & \cdots & 0\cr
0 & 0 &  1 & \cdots & 0\cr \vdots & \vdots & \vdots & \cdots &
\vdots \cr0 & 0 &  0 & \cdots & 1\cr \end{bmatrix},    $$ that is,
the inverse of $A$ has $1$'s on the diagonal and $-1$'s on the
superdiagonal.
\end{solu}
\begin{thm}\label{thm:inverse_triangular_matrices}
Let $A\in \mat{n\times n}{ \BBF }$ be a triangular matrix such that
$a_{11}a_{22} \cdots a_{nn} \neq 0_{\BBF }$. Then $A$ is invertible.
\end{thm}
\begin{pf}
Since the entry $a_{kk} \neq 0_{\BBF }$ we multiply the $k$-th row
by $a_{kk} ^{-1}$ and then proceed to subtract the appropriate
multiples of the preceding $k - 1$ rows at each stage.
\end{pf}
\begin{exa}[Putnam Exam, 1969] Let $A$ and $B$ be matrices of size $3\times 2$ and $2\times
3$ respectively. Suppose that their product $AB$ is given by
$$AB = \begin{bmatrix}8 & 2 & -2 \cr 2 & 5 & 4 \cr -2 & 4 & 5 \cr\end{bmatrix}.$$Demonstrate that the product $BA$ is given by
$$BA = \begin{bmatrix}9 & 0 \cr 0 & 9 \cr\end{bmatrix}.$$ \end{exa}
\begin{solu}Observe that
$$ (AB)^2 =   \begin{bmatrix}8 & 2 & -2 \cr 2 & 5 & 4 \cr -2 & 4 & 5 \cr\end{bmatrix}\begin{bmatrix}8 & 2 & -2 \cr 2 & 5 & 4 \cr -2 & 4 & 5 \cr\end{bmatrix}
 = \begin{bmatrix}72 &  18&  -18\cr 18 & 45 &  36 \cr -18 &  36 &  45 \cr \end{bmatrix}= 9AB. $$
Performing  $R_3 + R_2 \rightarrow R_3$, $R_1 - 4R_2 \rightarrow
R_2$, and $2R_3 + R_1 \rightarrow R_3$ in succession we see that
$$ \begin{bmatrix}8 & 2 & -2 \cr 2 & 5 & 4 \cr -2 & 4 & 5 \cr\end{bmatrix}\rightsquigarrow
\begin{bmatrix}0 & -18 & -18 \cr 2 & 5 & 4 \cr 0 & 0 & 0 \cr\end{bmatrix}
\rightsquigarrow
\begin{bmatrix}0 & -18 & 0 \cr 2 & 5 & -1 \cr 0 & 0 & 0 \cr\end{bmatrix}
\rightsquigarrow
\begin{bmatrix}0 & -18 & 0 \cr 0 & 5 & -1 \cr 0 & 0 & 0 \cr\end{bmatrix},      $$
and so $\rank{AB} = 2$. This entails that $\rank{(AB)^2} = 2$.
Now, since $BA$ is a $2\times 2$ matrix, $\rank{BA} \leq 2$. Also
$$ 2 = \rank{(AB)^2} = \rank{ABAB} \leq \rank{ABA} \leq \rank{BA},
$$and we must conclude that $\rank{BA} =2$. This means that $BA$
is invertible and so $$\begin{array}{lll} (AB)^2 = 9AB & \implies
&
A(BA - 9{\bf I}_{2})B = {\bf 0}_3 \\
& \implies & BA(BA - 9{\bf I}_{2})BA = B{\bf 0}_3A \\
& \implies & BA(BA - 9{\bf I}_{2})BA = {\bf 0}_2 \\
& \implies & (BA)^{-1}BA(BA - 9{\bf I}_{2})BA(BA)^{-1} = (BA)^{-1}{\bf 0}_2(BA)^{-1} \\
& \implies & BA - 9{\bf I}_{2} = {\bf 0}_2 \\
\end{array}
$$
\end{solu}

\section*{\psframebox{Homework}}
\begin{pro}\label{pro:inverse_matrix_mod7} Find the inverse of the matrix
$$\begin{bmatrix} \overline{1} & \overline{2} & \overline{3} \cr
\overline{2} & \overline{3} & \overline{1} \cr \overline{3} &
\overline{1} & \overline{2} \cr
\end{bmatrix} \in \mat{3\times 3}{\BBZ_7}.$$ \begin{answer} We form the augmented matrix
$$ \begin{bmat}{ccc|ccc} \overline{1} & \overline{2} & \overline{3} & \overline{1} & \overline{0} & \overline{0} \cr
\overline{2} & \overline{3} & \overline{1} & \overline{0} &
\overline{1} & \overline{0}\cr \overline{3} & \overline{1} &
\overline{2} & \overline{0} & \overline{0} & \overline{1}\cr
\end{bmat}       $$


From $R_2 -\overline{2}R_1 \rightarrow R_2 $ and $R_3
-\overline{3}R_1 \rightarrow R_3 $ we obtain
$$\rightsquigarrow \begin{bmat}{ccc|ccc}
\overline{1} & \overline{2} & \overline{3}& \overline{1} &
\overline{0} & \overline{0}  \cr \overline{0} & \overline{6} &
\overline{2} & \overline{5} & \overline{1} & \overline{0} \cr
\overline{0} & \overline{2} & \overline{0} & \overline{4} &
\overline{0} & \overline{1} \cr
\end{bmat} .    $$
From $R_2 \leftrightarrow R_3$ we obtain
$$\rightsquigarrow \begin{bmat}{ccc|ccc} \overline{1} & \overline{2} & \overline{3} & \overline{1} & \overline{0} & \overline{0} \cr
 \overline{0} &
\overline{2} & \overline{0} & \overline{4} & \overline{0} &
\overline{1} \cr \overline{0} & \overline{6} & \overline{2} &
\overline{5} & \overline{1} & \overline{0} \cr
\end{bmat}.    $$
Now, from $R_1 - R_2 \rightarrow R_1$ and $R_3 - \overline{3}R_2
\rightarrow R_3$, we obtain
$$\rightsquigarrow \begin{bmat}{ccc|ccc} \overline{1} & \overline{0} & \overline{3} & \overline{4} & \overline{0} & \overline{6} \cr
 \overline{0} &
\overline{2} & \overline{0} & \overline{4} & \overline{0} &
\overline{1} \cr \overline{0} & \overline{0} & \overline{2} &
\overline{0} & \overline{1} & \overline{4} \cr
\end{bmat}.    $$
From $4R_2 \rightarrow R_2$ and $4R_3 \rightarrow R_3$, we obtain
$$\rightsquigarrow \begin{bmat}{ccc|ccc} \overline{1} & \overline{0} & \overline{3} & \overline{4} & \overline{0} & \overline{6} \cr
 \overline{0} &
\overline{1} & \overline{0} & \overline{2} & \overline{0} &
\overline{4} \cr \overline{0} & \overline{0} & \overline{1} &
\overline{0} & \overline{4} & \overline{2} \cr
\end{bmat}.    $$
Finally, from $R_1 - \overline{3}R_3 \rightarrow R_3$ we obtain
$$\rightsquigarrow \begin{bmat}{ccc|ccc} \overline{1} & \overline{0} & \overline{0} & \overline{4} & \overline{2} & \overline{0} \cr
 \overline{0} &
\overline{1} & \overline{0} & \overline{2} & \overline{0} &
\overline{4} \cr \overline{0} & \overline{0} & \overline{1} &
\overline{0} & \overline{4} & \overline{2} \cr
\end{bmat}.    $$

 We deduce that
$$
\begin{bmatrix} \overline{1} & \overline{2} & \overline{3} \cr
\overline{2} & \overline{3} & \overline{1} \cr \overline{3} &
\overline{1} & \overline{2} \cr
\end{bmatrix}^{-1} = \begin{bmatrix} \overline{4} & \overline{2} & \overline{0} \cr
\overline{2} & \overline{0} & \overline{4} \cr \overline{0} &
\overline{4} & \overline{2} \cr
\end{bmatrix}.    $$
\end{answer}
\end{pro}
\begin{pro}
Let $(A, B)\in\mat{3\times 3}{\BBR}$ be given by
$$A =
\begin{bmatrix} a & b & c \cr 1 & 0 & 0 \cr 0 & 1 & 0 \cr \end{bmatrix}, \ \ \
B = \begin{bmatrix} 0 & 0 & -1 \cr 0 & -1 & a \cr -1 & a & b
\cr\end{bmatrix}.$$Find $B^{-1}$ and prove that $A^T = BAB^{-1}$.
\begin{answer} To find the inverse of $B$ we consider the
augmented matrix
$$\begin{bmat}{ccc|ccc}0 & 0 & -1  & 1 & 0 & 0 \cr
0 & -1 & a  & 0 & 1 & 0\cr -1 & a & b & 0 & 0 & 1\cr\end{bmat}.
$$
Performing $R_1 \leftrightarrow R_3$, $-R_3\rightarrow R_3$,  in
succession,
$$\begin{bmat}{ccc|ccc}
 -1 & a & b & 0 & 0 & 1\cr0 & -1 & a  & 0 & 1 & 0\cr 0 & 0 & 1  &
-1 & 0 & 0 \cr\end{bmat}.
$$Performing $R_1 + aR_2 \rightarrow R_1$ and $R_2 - aR_3 \rightarrow
R_2$ in succession,
$$\begin{bmat}{ccc|ccc}
 -1 & 0 & b + a^2 & 0 & a & 1\cr0 & -1 & 0  & a & 1 & 0\cr 0 & 0 & 1  &
-1 & 0 & 0 \cr\end{bmat}.
$$ Performing $R_1 - (b + a^2)R_3 \rightarrow R_3$, $-R_1 \rightarrow
R_1$ and  $-R_2 \rightarrow R_2$ in succession, we find
$$\begin{bmat}{ccc|ccc}
 1 & 0 & 0 & -b - a^2 & -a & -1\cr0 & 1 & 0  & -a & -1 & 0\cr 0 & 0 & 1  &
-1 & 0 & 0 \cr\end{bmat},
$$whence
$$B^{-1} = \begin{bmatrix} -b - a^2 & -a & -1\cr  -a & -1 & 0\cr -1 & 0 & 0 \cr\end{bmatrix}.     $$
Now,
$$\begin{array}{lll}
BAB^{-1} & = &      \begin{bmatrix} 0 & 0 & -1 \cr 0 & -1 & a \cr
-1 & a & b \cr\end{bmatrix}\begin{bmatrix} a & b & c \cr 1 & 0 & 0
\cr 0 & 1 & 0 \cr \end{bmatrix}\begin{bmatrix} -b - a^2 & -a &
-1\cr  -a & -1 & 0\cr -1 & 0 & 0 \cr\end{bmatrix} \\
& = & \begin{bmatrix} 0 &  -1 &  0 \cr -1 &  a&  0\cr 0 & 0& -c
\cr\end{bmatrix}\begin{bmatrix} -b - a^2 & -a & -1\cr -a
& -1 & 0\cr -1 & 0 & 0 \cr\end{bmatrix} \\
& = & \begin{bmatrix} a & 1&  0\cr b & 0& 1\cr c &  0 & 0\cr
\end{bmatrix} \\ & = & A^T,
\end{array}     $$which is what we wanted to prove.
\end{answer}
\end{pro}
\begin{pro}
Let $A=\begin{bmatrix} 1 & 0 & 0 \cr 1 & 1 & 0 \cr 1 & 1 & x \cr
\end{bmatrix}$ where $x\neq 0$ is a real number. Find $A^{-1}$.
\begin{answer}
First, form the augmented matrix:
$$\begin{bmat}{ccc|ccc} 1 & 0 & 0 & 1 & 0 & 0 \cr 1 & 1 & 0 & 0 & 1 & 0 \cr 1 & 1 & x & 0 & 0 & 1  \cr\end{bmat}.$$
Perform $R_2-R_1 \rightarrow R_2$ and  $R_3-R_1 \rightarrow R_3$:
$$\begin{bmat}{ccc|ccc} 1 & 0 & 0 & 1 & 0 & 0 \cr 0 & 1 & 0 & -1 & 1 & 0 \cr 0 & 1 & x & -1 & 0 & 1  \cr\end{bmat}.$$
Performing  $R_3-R_2 \rightarrow R_3$:
$$\begin{bmat}{ccc|ccc} 1 & 0 & 0 & 1 & 0 & 0 \cr 0 & 1 & 0 & -1 & 1 & 0 \cr 0 & 0 & x & 0 & -1 & 1  \cr\end{bmat}.$$
Finally, performing $\dfrac{1}{x}R_3\rightarrow R_3$:
$$\begin{bmat}{ccc|ccc} 1 & 0 & 0 & 1 & 0 & 0 \cr 0 & 1 & 0 & -1 & 1 & 0 \cr 0 & 0 & 1 & 0 & -\dfrac{1}{x} & \dfrac{1}{x}  \cr\end{bmat}.$$


\end{answer}
\end{pro}
\begin{pro}
If the inverse of the matrix $M = \begin{bmatrix} 1 & 0 & 1 \cr -1 &
0 & 0 \cr 0 & 1 & 1 \cr\end{bmatrix}$ is the matrix $M^{-1} =
\begin{bmatrix} 0 & -1 & 0 \cr -1 & -1 & a \cr 1 & 1 & b
\cr\end{bmatrix}$, find $(a, b)$.

\begin{answer}
 Since $MM^{-1}={\bf I}_3$, multiplying the first row of $M$ times the third column of $M^{-1}$, and again, the third row of
$M$ times the third column of $M^{-1}$, we gather that
$$ 1\cdot 0 + 0\cdot a + 1\cdot b = 0, \qquad 0 \cdot 0 + 1\cdot a + 1\cdot b =1 \implies b=0, a=1. $$
\end{answer}
\end{pro}
\begin{pro}
Let $A=\begin{bmatrix} 1 & 0 & 0 \cr 1 & 1 & 0 \cr 1 & 1 & 1 \cr
\end{bmatrix}$ and let $n>0$ be an integer. Find $(A^n)^{-1}$.
\begin{answer}
It is easy to prove by induction that $A^n = \begin{bmatrix} 1 & 0 &
0 \cr n & 1 & 0 \cr \dfrac{n(n+1)}{2} & n & 1 \cr\end{bmatrix}$.
Row-reducing, $(A^n)^{-1} =
\begin{bmatrix} 1 & 0 & 0 \cr -n & 1 & 0 \cr \dfrac{(n-1)n}{2} & -n
& 1 \cr\end{bmatrix}$.
\end{answer}
\end{pro}
\begin{pro}
Give an example of a $2\times 2$ invertible matrix $A$ over $\BBR$
such that $A+A^{-1}$ is the zero matrix.
\begin{answer}
Take, for example, $A = \begin{bmatrix}  0 & -1 \cr 1 & 0
\end{bmatrix}=-A^{-1}$.
\end{answer}
\end{pro}

\begin{pro}
Find all the values of the parameter $a$ for which the matrix $B$
given below is not invertible.

$$B = \left[ \begin{array}{lll}
-1 & a + 2 & 2 \\
0 & a & 1 \\
2 & 1 & a
\end{array} \right]$$
\begin{answer} Operating formally, and using elementary row operations, we
find
$$
B^{-1} = \left[\begin{array}{lll} -\frac{a^2 - 1}{a^2 - 5+ 2a} &
\frac{a^2 + 2a - 2}{a^2 - 5 + 2a} &  \frac{a-2}{a^2- 5 + 2a} \\
 -\frac{2}{a^2 - 5 + 2a} &
\frac{a + 4}{a^2- 5 + 2a} &  -\frac{1}{a^2 - 5 + 2a} \\
\frac{2a}{a^2 - 5 + 2a} & -\frac{2a + 5}{a^2 - 5 + 2a} &
\frac{a}{a^2- 5 + 2a}
\end{array} \right] .
$$
Thus $B$ is invertible whenever $a \neq -1 \pm \sqrt{6}.$


\end{answer}
\end{pro}

\begin{pro}
Find the inverse of the triangular matrix
$$\begin{bmatrix} a & 2a & 3a \cr 0 & b & 2b \cr  0 & 0 & c \cr
\end{bmatrix}\in\mat{3\times 3}{\BBR}
$$assuming that $abc \neq 0$.
\begin{answer} Form the augmented matrix
$$\begin{bmat}{ccc|ccc} a & 2a & 3a & 1 & 0 & 0   \cr 0 & b & 2b & 0 & 1 & 0  \cr  0 & 0 & c & 0 & 0 & 1 \cr
\end{bmat}.
$$Perform $\dfrac{1}{a}R_1 \rightarrow R_1$,  $\dfrac{1}{b}R_2 \rightarrow
R_2$, $\dfrac{1}{a}R_3 \rightarrow R_3$, in succession, obtaining
$$\begin{bmat}{ccc|ccc} 1 & 2 & 3 & 1/a & 0 & 0   \cr 0 & 1 & 2 & 0 & 1/b & 0  \cr  0 & 0 & 1 & 0 & 0 & 1/c \cr
\end{bmat}.
$$
Now perform $R_1 - 2R_2\rightarrow R_1$ and $R_2 - 2R_3\rightarrow
R_2$ in succession, to obtain
$$\begin{bmat}{ccc|ccc} 1 & 0 & -1 & 1/a & -2/a & 0   \cr 0 & 1 & 0 & 0 & 1/b & -2/c  \cr  0 & 0 & 1 & 0 & 0 & 1/c \cr
\end{bmat}.
$$
Finally, perform $R_1  + R_3\rightarrow R_1$ to obtain
$$\begin{bmat}{ccc|ccc} 1 & 0 & 0 & 1/a & -2/b & 1/c \cr 0 & 1 & 0 & 0
& 1/b & -2/c  \cr  0 & 0 & 1 & 0 & 0 & 1/c \cr
\end{bmat}.
$$Whence
$$\begin{bmatrix} a & 2a & 3a \cr 0 & b & 2b \cr  0 & 0 & c \cr
\end{bmatrix}^{-1} = \begin{bmatrix} 1/a & -2/b & 1/c \cr 0 & 1/b & -2/c \cr  0 & 0 & 1/c \cr
\end{bmatrix}.  $$
\end{answer}
\end{pro}

\begin{pro}\label{pro:inverse_with_abc}
Under what conditions is the matrix
$$\begin{bmatrix} b & a & 0 \cr c & 0 & a \cr 0 & c & b \cr  \end{bmatrix}   $$
invertible? Find the inverse under these conditions.
\begin{answer}To compute the inverse matrix we proceed formally as
follows. The augmented matrix is
$$ \begin{bmat}{ccc|ccc} b & a & 0 & 1 & 0 & 0 \cr c & 0 & a & 0 & 1 & 0 \cr 0 & c & b & 0 & 0 & 1\cr\end{bmat}. $$
Performing $bR_2 - cR_1 \rightarrow R_2$ we find
$$ \begin{bmat}{ccc|ccc} b & a & 0 & 1 & 0 & 0 \cr 0 & -ca & ab  & -c & b & 0 \cr 0 & c & b & 0 & 0 & 1\cr\end{bmat}. $$
Performing $aR_3 + R_2 \rightarrow R_3$ we obtain
$$ \begin{bmat}{ccc|ccc} b & a & 0 & 1 & 0 & 0 \cr 0 & -ca & ab  & -c & b & 0 \cr 0 & 0 & 2ab & -c & b & a\cr\end{bmat}. $$
Performing $2R_2 - R_3 \rightarrow R_2$ we obtain
$$ \begin{bmat}{ccc|ccc} b & a & 0 & 1 & 0 & 0 \cr 0 & -2ca & 0  & -c & b & -a\cr 0 & 0 & 2ab & -c & b & a\cr\end{bmat}. $$
Performing $2cR_1  + R_2 \rightarrow R_1$ we obtain
$$ \begin{bmat}{ccc|ccc} 2bc & 0 & 0 & c & b & -a \cr 0 & -2ca & 0  & -c & b & -a\cr 0 & 0 & 2ab & -c & b & a\cr\end{bmat}. $$
From here we easily conclude that
$$ \begin{bmatrix} b & a & 0 \cr c & 0 & a \cr 0 & c & b \cr  \end{bmatrix}^{-1} =   \begin{bmatrix} \frac{1}{2b} &  \frac{1}{2c} &  -\frac{a}{2bc} \cr \frac{1}{2a} &  -\frac{b}{2ac} &  \frac{1}{2c} \cr
 -\frac{c}{2ba} &  \frac{1}{2a}  &  \frac{1}{2b}\cr  \end{bmatrix}          $$
as long as $abc \neq 0$.
\end{answer}
\end{pro}

\begin{pro}
Let $A$ and  $B$ be $n\times n$ matrices over a field $\BBF$ such
that $AB$ is invertible. Prove that both $A$ and $B$ must be
invertible.
\begin{answer}
Since $AB$ is invertible, $\rank{AB}=n$. Thus
$$ n = \rank{AB}\leq \rank{A}\leq n\implies \rank{A}=n, $$
$$ n = \rank{AB}\leq \rank{B}\leq n\implies \rank{B}=n, $$
whence $A$ and $B$ are invertible.
\end{answer}
\end{pro}
\begin{pro}
Find the inverse of the matrix $$
\begin{bmatrix}1+a & 1 & 1 \cr 1 & 1 +b & 1 \cr 1 & 1 & 1 + c \cr
\end{bmatrix}
$$
\begin{answer}Form the expanded matrix
$$
\begin{bmat}{ccc|ccc} 1 + a  & 1 & 1 & 1 & 0 & 0 \\ 1  & 1 + b & 1 & 0 & 1 & 0 \\ 1 &
1 & 1 + c & 0 & 0 & 1\\ \end{bmat} .$$ Perform $bcR_1\rightarrow
R_1$,  $abR_3  \rightarrow R_3$,  $caR_2 \rightarrow R_2$. The
matrix turns into
$$
\begin{bmat}{ccc|ccc}  bc +abc& bc &bc & bc & 0 & 0 \\ ca  &  ca+abc & ca & 0 & ca & 0 \\ ab &
ab & ab+abc & 0 & 0 & ab\\ \end{bmat}. $$ Perform $R_1 + R_2 + R_3
\rightarrow R_1$ the matrix turns into $$
\begin{bmat}{ccc|ccc} ab +  bc+ca +abc& ab +  bc+ca +abc & ab +  bc+ca +abc & bc & ca & ab \\ ca  &  ca+abc & ca & 0 & ca & 0 \\ ab &
ab & ab+abc & 0 & 0 & ab\\ \end{bmat}.
$$ Perform $\frac{1}{ab +  bc+ca +abc}R_1 \rightarrow R_1$. The
matrix turns into
$$
\begin{bmat}{ccc|ccc} 1& 1 & 1 & \frac{bc}{ab +  bc+ca +abc} & \frac{ca}{ab +  bc+ca +abc} & \frac{ab}{ab +  bc+ca +abc} \\ ca  &  ca+abc & ca & 0 & ca & 0 \\ ab &
ab & ab+abc & 0 & 0 & ab\\ \end{bmat}.
$$
Perform $R_2-caR_1 \rightarrow R_2$ and $R_3 - abR_3 \rightarrow
R_3$. We get
$$
\begin{bmat}{ccc|ccc} 1& 1 & 1 & \frac{bc}{ab +  bc+ca +abc} & \frac{ca}{ab +  bc+ca +abc} & \frac{ab}{ab +  bc+ca +abc} \\ 0  &  abc & 0 & -\frac{abc^2}{ab +  bc+ca +abc} & ca - \frac{c^2a^2}{ab +  bc+ca +abc}
& -\frac{a^2bc}{ab +  bc+ca +abc} \\ 0 & 0 & abc & -\frac{ab^2c}{ab +  bc+ca +abc} & -\frac{a^2bc}{ab +  bc+ca +abc} & ab - \frac{a^2b^2}{ab +  bc+ca +abc}\\
\end{bmat}.
$$Perform $\frac{1}{ABC}R_2\rightarrow R_2$ and $\frac{1}{ABC}R_3\rightarrow
R_3$. We obtain
$$
\begin{bmat}{ccc|ccc} 1& 1 & 1 & \frac{bc}{ab +  bc+ca +abc}  & \frac{ca}{ab +  bc+ca +abc} & \frac{ab}{ab +  bc+ca +abc} \\
0  &  1 & 0 & -\frac{c}{ab +  bc+ca +abc}  & \frac{1}{b} -
\frac{ca}{b(ab + bc+ca +abc)}
& -\frac{a}{ab +  bc+ca +abc} \\ 0 & 0 & 1 & -\frac{b}{ab +  bc+ca +abc}  & -\frac{a}{ab +  bc+ca +abc} & \frac{1}{c} - \frac{ab}{c(ab +  bc+ca +abc)}\\
\end{bmat}.
$$Finally we perform $R_1 - R_2 - R_3 \rightarrow R_1$, getting
$$
\begin{bmat}{ccc|ccc} 1& 0 & 0 & \frac{a +  b + bc}{ab +  bc+ca +abc}  & -\frac{c}{ab +  bc+ca +abc} & -\frac{b}{ab +  bc+ca +abc} \\
0  &  1 & 0 & -\frac{c}{ab +  bc+ca +abc}  & \frac{1}{b} -
\frac{ca}{b(ab + bc+ca +abc)}
& -\frac{a}{ab +  bc+ca +abc} \\ 0 & 0 & 1 & -\frac{b}{ab +  bc+ca +abc}  & -\frac{a}{ab +  bc+ca +abc} & \frac{1}{c} - \frac{ab}{c(ab +  bc+ca +abc)}\\
\end{bmat}.
$$

We conclude that the inverse is
$$\begin{bmatrix} \frac{b + c + bc}{ab+bc+ca + abc} & -\frac{c}{ab+bc+ca +
abc}& -\frac{b}{ab+bc+ca + abc} \cr -\frac{c}{ab+bc+ca + abc} &
\frac{c + a + ca}{ab+bc+ca + abc}& -\frac{a}{ab+bc+ca + abc} \cr
-\frac{b}{ab+bc+ca + abc} & -\frac{a}{ab+bc+ca + abc}& \frac{a + b
+ ab}{ab+bc+ca + abc} \cr
\end{bmatrix}$$
\end{answer}
\end{pro}

\begin{pro}
Prove that for the  $n\times n$ ($n > 1$) matrix $$
\begin{bmatrix}0 & 1 & 1 & \ldots & 1 \cr 1 & 0 & 1 & \ldots & 1
\cr 1 & 1 & 0 & \ldots & 1 \cr \vdots & \vdots & \vdots & \ldots &
\vdots \cr 1 & 1& 1& \ldots & 0
\end{bmatrix}^{-1} = \dfrac{1}{n-1}\begin{bmatrix}2-n & 1 & 1 & \ldots & 1 \cr 1 & 2-n & 1 & \ldots & 1
\cr 1 & 1 & 2-n & \ldots & 1 \cr \vdots & \vdots & \vdots & \ldots
& \vdots \cr 1 & 1& 1& \ldots & 2-n \cr
\end{bmatrix}
$$
\end{pro}

\begin{pro}
Prove that  the  $n\times n$ ($n > 1$) matrix $$
\begin{bmatrix}1+a & 1 & 1 & \ldots & 1 \cr 1 & 1+a & 1 & \ldots & 1
\cr 1 & 1 & 1+a & \ldots & 1 \cr \vdots & \vdots & \vdots & \ldots
& \vdots \cr 1 & 1& 1& \ldots & 1+a
\end{bmatrix}$$ has inverse $$-\dfrac{1}{a(n+a)}\begin{bmatrix}1-n-a & 1 & 1 & \ldots & 1 \cr 1 & 1-n-a & 1 & \ldots & 1
\cr 1 & 1 & 1-n-a & \ldots & 1 \cr \vdots & \vdots & \vdots &
\ldots & \vdots \cr 1 & 1& 1& \ldots & 1-n-a \cr
\end{bmatrix}
$$
\end{pro}
\begin{pro}
Prove that $$\begin{bmatrix}1 & 3 & 5 & 7 & \cdots & (2n - 1) \cr
(2n -1) & 1 & 3 & 5 & \cdots & (2n - 3) \cr (2n - 3) & (2n - 1) &
1 & 3 & \cdots & (2n - 5) \cr \vdots & \vdots & \vdots & \vdots &
\vdots & \vdots \cr 3 & 5 & 7 & 9 & \cdots & 1 \cr
\end{bmatrix}$$has inverse $$ \frac{1}{2n^3} \begin{bmatrix}  2 - n^2 & 2 + n^2 & 2 & 2 & \cdots & 2 \cr
2 & 2 - n^2 & 2 + n^2 & 2 & \cdots & 2 \cr 2 & 2 & 2 - n^2 & 2 +
n^2 & \cdots & 2 \cr \vdots & \vdots & \vdots & \vdots & \vdots &
\vdots \cr 2 + n^2 & 2 & 2 & 2 & \cdots & 2 - n^2\cr
\end{bmatrix}.$$
\end{pro}


\begin{pro}
Prove that  the  $n\times n$ ($n > 1$) matrix $$
\begin{bmatrix}1+a_1 & 1 & 1 & \ldots & 1 \cr 1 & 1+a_2 & 1 & \ldots & 1
\cr 1 & 1 & 1+a_3 & \ldots & 1 \cr \vdots & \vdots & \vdots &
\ldots & \vdots \cr 1 & 1& 1& \ldots & 1+a_n
\end{bmatrix}$$ has inverse  $$ -\dfrac{1}{s}\begin{bmatrix}\dfrac{1-a_1s}{a_1 ^2} & \dfrac{1}{a_1a_2} & \dfrac{1}{a_1a_3} & \ldots & \dfrac{1}{a_1a_n} \cr
\dfrac{1}{a_2a_1}&\dfrac{1-a_2s}{a_2 ^2} & \dfrac{1}{a_2a_3} &
\ldots & \dfrac{1}{a_2a_n} \cr \dfrac{1}{a_3a_1} & \dfrac{1}{a_3a_2}
& \dfrac{1-a_3s}{a_3 ^2} & \ldots & \dfrac{1}{a_3a_1n} \cr \vdots &
\vdots & \vdots & \ldots & \vdots \cr \dfrac{1}{a_na_1} &
\dfrac{1}{a_na_2}& \dfrac{1}{a_na_3}& \ldots & \dfrac{1-a_ns}{a_n
^2} \cr
\end{bmatrix},
$$where $s = 1 + \frac{1}{a_1} + \frac{1}{a_2}+\cdots  + \frac{1}{a_n}$.
\end{pro}
\begin{pro}Let $A\in\mat{5\times 5}{\BBR}$.
Shew that if $\rank{A^2} <5$, then $\rank{A} <5$.
\begin{answer}
Since $\rank{A^2} < 5$, $A^2$ is not invertible. But then $A$ is
not invertible and hence $\rank{A}<5$.
\end{answer}
 \end{pro}
\begin{pro}
Let $p$ be an odd prime. How many invertible $2\times 2$ matrices
are there with entries all in $\BBZ_p$?
\begin{answer}
Each entry can be chosen in $p$ ways, which means that there are
$p^2$ ways of choosing the two entries of an arbitrary row. The
first row cannot be the zero row, hence there are $p^2-1$ ways of
choosing it. The second row cannot be one of the $p$ multiples of
the first row, hence there are $p^2-p$ ways of choosing it. In
total, this gives $(p^2-1)(p^2-p)$ invertible matrices in $\BBZ _p$.
\end{answer}
\end{pro}
\begin{pro}
Let $A, B$ be matrices of the same size. Prove that $\rank{A+B} \leq
\rank{A} + \rank{B}$.
\begin{answer}
Assume that both $A$ and $B$ are $m\times n$ matrices. Let
$C=[A\quad B]$ be the $m\times (2n)$ obtained by juxtaposing $A$ to
$B$. $\rank{C}$ is the number of linearly independent columns of
$C$, which is composed of the columns of $A$ and $B$. By
column-reducing the first $n$ columns, we find $\rank{A}$ linearly
independent columns. By column-reducing columns $n+1$ to $2n$, we
find $\rank{B}$ linearly independent columns. These
$\rank{A}+\rank{B}$ columns are distinct, and are a subset of the
columns of $C$. Since $C$ has at most $\rank{C}$ linearly
independent columns, it follows that $\rank{C}\leq
\rank{A}+\rank{B}$. Furthermore,   by adding the $n+k$-column
($1\leq k \leq n$) of $C$ to the $k$-th column, we see that $C$ is
column-equivalent to $[A+B\ B]$. But clearly $$\rank{A+B}\leq
\rank{[A+B\ B]}= \rank{C},
$$since $[A+B\ B]$ is obtained by adding columns to $A+B$. We deduce
$$\rank{A+B}\leq
\rank{[A+B\ B]}= \rank{C} \leq \rank{A}+\rank{B},  $$ as was to be
shewn.
\end{answer}
\end{pro}

\begin{pro}
Let $A\in\mat{3,2}{\BBR}$ and $B\in\mat{2,3}{\BBR}$ be matrices such
that  $AB =
\begin{bmatrix} 0 & -1 & -1 \cr -1 & 0 & -1 \cr 1 & 1 &
2\end{bmatrix}$. Prove that $BA = {\bf I}_2$.
\begin{answer}

Since the first two columns of $AB$ are not proportional, and since
the last column is the sum of the first two, $\rank{AB}=2$. Now,
$$(AB)^2 =
\begin{bmatrix} 0 & -1 & -1 \cr -1 & 0 & -1 \cr 1 & 1 &
2\end{bmatrix}^2 = \begin{bmatrix} 0 & -1 & -1 \cr -1 & 0 & -1 \cr 1
& 1 & 2\end{bmatrix}=AB.$$ Since $BA$ is a $2\times 2$ matrix,
$\rank{BA}\leq 2$. Also,
$$2 = \rank{AB}=\rank{(AB)^2} = \rank{A(BA)B}\leq \rank{BA},   $$
whence $\rank{BA}=2$, which means $BA$ is invertible. Finally,
$$(AB)^2-AB= {\bf 0}_3 \implies A(BA-{\bf I}_2)B= {\bf 0}_3\implies   BA(BA-{\bf I}_2)BA= B{\bf 0}_3A \implies  BA-{\bf I}_2= {\bf 0}_2,  $$
since $BA$ is invertible and we may cancel it.

\end{answer}
\end{pro}


\chapter{Linear Equations}
\section{Definitions} We can write a system of $m$ linear
equations in $n$ variables over a field $\BBF$
$$a_{11}x_1 + a_{12}x_2 + a_{13}x_3 + \cdots + a_{1n}x_n = y_1,$$
$$a_{21}x_1 + a_{22}x_2 + a_{23}x_3 + \cdots + a_{2n}x_n = y_2,$$
$$\vdots$$
$$a_{m1}x_1 + a_{m2}x_2 + a_{m3}x_3 + \cdots + a_{mn}x_n =
y_m,$$in matrix form as
\begin{equation}\label{eq:matrix_equation}\begin{bmatrix} a_{11} &
a_{12} & \cdots & a_{1n} \cr a_{21} & a_{22} & \cdots & a_{2n} \cr
 \vdots & \vdots & \vdots &
\vdots \cr a_{m1} & a_{m2} & \cdots & a_{mn} \cr
\end{bmatrix}\colvec{x_1 \\ x_2 \\ \vdots \\ x_n} = \colvec{y_1 \\ y_2 \\ \vdots \\
y_m}.\end{equation} We write the above matrix relation in the
abbreviated form
\begin{equation}
AX = Y,
\end{equation}
where $A$ is the matrix of coefficients, $X$ is the matrix of
variables and $Y$ is the matrix of constants.  Most often we will
dispense with the matrix of variables $X$ and will simply write
the {\em augmented matrix} of the system as
\begin{equation}[A |Y] = \begin{bmat}{cccc|c} a_{11} &
a_{12} & \cdots & a_{1n}  & y_1\cr a_{21} & a_{22} & \cdots &
a_{2n} & y_2 \cr
 \vdots & \vdots & \vdots &
\vdots  & \vdots \cr a_{m1} & a_{m2} & \cdots & a_{mn} & y_m \cr
\end{bmat}.\end{equation}
\begin{df}
Let $AX = Y$ be as in \ref{eq:matrix_equation}. If $Y = {\bf
0}_{m\times 1}$, then the system is called {\em homogeneous},
otherwise it is called {\em inhomogeneous}. The set
$$\{X\in\mat{n\times 1}{ \BBF }: AX = {\bf
0}_{m\times 1}\}
$$is called the {\em kernel} or {\em nullspace} of $A$ and it is
denoted by $\ker{A}$.
\end{df}
\begin{rem}
Observe that we always have ${\bf 0}_{n\times
1}\in\ker{A}\in\mat{m\times n}{ \BBF }$.
\end{rem}
\begin{df}
 A system of linear equations is {\em consistent} if it has a solution.
If the system does not have a solution then we say that it is {\em
inconsistent}.
\end{df}
\begin{df}
If a row of a matrix is non-zero, we call the  first non-zero
entry of this row a {\em pivot} for this row. \index{pivot}
\end{df}
\begin{df}
 A matrix $M\in\mat{m\times n}{ \BBF }$ is a {\em row-echelon}
matrix if
\begin{itemize}
\item All the zero rows of $M$, if any, are at the bottom of $M$.
\item For any two consecutive rows $R_i$ and $R_{i + 1}$, either
$R_{i + 1}$ is all $0_{\BBF }$'s or the pivot of $R_{i + 1}$ is
immediately to the right of the pivot of $R_i$.
\end{itemize}
The variables accompanying these pivots are called the {\em
leading variables}. Those variables which are not leading
variables are the {\em free parameters.}
\end{df}
\begin{exa}
The matrices
$$\begin{bmatrix}\pscirclebox[doubleline=true, linecolor=red]{1} & 0 & 1 & 1 \cr 0 & 0 & \pscirclebox[doubleline=true, linecolor=red]{2} & 2 \cr 0 & 0 & 0 & \pscirclebox[doubleline=true, linecolor=red]{3} \cr 0 & 0 & 0 & 0\end{bmatrix}, \ \ \
\begin{bmatrix}\pscirclebox[doubleline=true, linecolor=red]{1} & 0 & 1 & 1 \cr 0 & 0 & 0 & \pscirclebox[doubleline=true, linecolor=red]{1} \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0\end{bmatrix}, \ \ \
$$ are in row-echelon form, with the pivots circled,  but the matrices
$$\begin{bmatrix}1 & 0 & 1 & 1 \cr 0 & 0 & 1 & 2 \cr 0 & 0 & 1 & 1 \cr 0 & 0 & 0 & 0\end{bmatrix}, \ \ \
\begin{bmatrix}1 & 0 & 1 & 1 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 1 \cr 0 & 0 & 0 & 0 \cr \end{bmatrix}, \ \ \
$$are not in row-echelon form.
\end{exa}
\begin{rem}
Observe that given a matrix $A\in\mat{m\times n}{ \BBF }$, by
following Gau\ss-Jordan reduction \`{a} la Theorem
\ref{thm:normal_form}, we can find a matrix $P\in\gl{m}{ \BBF }$
such that $PA = B$ is in row-echelon form.
\end{rem}
\begin{exa}
Solve the system of linear equations
$$\begin{bmatrix}1 & 1 & 1 & 1 \cr 0 & 2 & 1 & 0 \cr 0 & 0 & 1 & -1 \cr 0 & 0 & 0 & 2 \cr     \end{bmatrix} \colvec{x\\ y \\ z \\ w} = \colvec{-3 \\ -1 \\ 4 \\ - 6}.        $$
\end{exa}
\begin{solu}Observe that the matrix of coefficients is already in
row-echelon form. Clearly every variable is a leading variable,
and by back substitution
$$2w = -6 \implies w = -\frac{6}{2} = -3,$$
$$z - w = 4 \implies z = 4 + w = 4 - 3 = 1,$$
$$2y + z = -1 \implies y = -\frac{1}{2} - \frac{1}{2}z = -1,$$
$$x + y + z + w = -3 \implies x = -3 - y - z - w = 0.$$
The (unique) solution is thus
$$\colvec{x \\ y \\ z \\ w} = \colvec{0 \\ -1 \\ 1 \\ -3}.$$
\end{solu}
\begin{exa} Solve the system of linear
equations
$$\begin{bmatrix}1 & 1 & 1 & 1 \cr 0 & 2 & 1 & 0 \cr 0 & 0 & 1 & -1 \cr     \end{bmatrix} \colvec{x\\ y \\ z \\ w} = \colvec{-3 \\ -1 \\ 4 }.        $$

\end{exa}
\begin{solu}The system is already in row-echelon form, and we see
that $x, y, z$ are leading variables while  $w$ is a free
parameter. We put $w = t$. Using back substitution, and operating
from the bottom up, we find
$$z - w = 4 \implies z = 4 + w = 4 + t,$$
$$2y + z = -1 \implies y = -\frac{1}{2} - \frac{1}{2}z = -\frac{1}{2} - 2 - \frac{1}{2}t = -\frac{5}{2} - \frac{1}{2}t,$$
$$x + y + z + w = -3 \implies x = -3 - y - z - w = -3 + \frac{5}{2} + \frac{1}{2}t - 4 - t - t = -\frac{9}{2} - \frac{3}{2}t.$$
The solution is thus
$$\colvec{x \\ y \\ z \\ w} =    \colvec{-\frac{9}{2} - \frac{3}{2}t \\-\frac{5}{2} - \frac{1}{2}t \\
4 + t \\ t }, \ t \in \BBR. $$
\end{solu}
\begin{exa} Solve the system of linear
equations
$$\begin{bmatrix}1 & 1 & 1 & 1 \cr 0 & 2 & 1 & 0 \cr      \end{bmatrix} \colvec{x\\ y \\ z \\ w} = \colvec{-3 \\ -1 }.        $$
\end{exa}
\begin{solu}We see that $x, y$ are leading variables, while $z, w$
are free parameters. We put $z = s, w = t$. Operating from the
bottom up, we find
$$2y + z = -1 \implies y = -\frac{1}{2} - \frac{1}{2}z = -\frac{1}{2} - \frac{1}{2}s,$$
$$x + y + z + w = -3 \implies x = -3 - y - z - w =  -\frac{5}{2} - \frac{3}{2}s - t.$$
The solution is thus
$$\colvec{x \\ y \\ z \\ w} =    \colvec{-\frac{5}{2} - \frac{3}{2}s  - t \\-\frac{1}{2} - \frac{1}{2}s \\
s \\ t }, \ (s, t) \in \BBR^2. $$
\end{solu}
\begin{exa}
Find all the solutions of the system $$ x + \overline{2}y +
\overline{2}z = \overline{0},
$$ $$  y + \overline{2}z =
\overline{1},
$$working in $\BBZ_{3}$.
\end{exa}
\begin{solu}The augmented matrix of the system is
$$\begin{bmat}{ccc|c} \overline{1} & \overline{2} & \overline{2} & \overline{0} \cr
\overline{0} & \overline{1} & \overline{2} & \overline{1} \cr
\end{bmat}. $$
The system is already in row-echelon form and $x, y$ are leading
variables while $z$ is a free parameter. We find
$$ y = \overline{1} - \overline{2}z = \overline{1} +
\overline{1}z,
$$ and $$ x = -\overline{2}y - \overline{2}z = \overline{1} +
\overline{2}z.
$$Thus $$\colvec{x\\ y \\ z} = \colvec{\overline{1} + \overline{2}z\\ \overline{1} +
\overline{1}z \\ z}, \ \ z\in\BBZ_3.$$ Letting $z = \overline{0},
\overline{1}, \overline{2}$ successively, we find the three
solutions $$\colvec{x\\ y \\ z} = \colvec{\overline{1} \\
\overline{1} \\ \overline{0}},$$
$$\colvec{x\\ y \\ z} = \colvec{\overline{0} \\
\overline{2} \\ \overline{1}},$$and
$$\colvec{x\\ y \\ z} = \colvec{\overline{2} \\
\overline{0} \\ \overline{2}}.$$
\end{solu}
\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Find all the solutions in $\BBZ_3$ of the system
$$\begin{array}{l}x + y + z + w = \overline{0}, \\
 \overline{2}y + w = \overline{2}. \end{array} $$
\begin{answer}
The free variables are $z$ and $w$. We have $$ \overline{2}y + w =
\overline{2} \implies \overline{2}y = \overline{2} - w \implies y
= \overline{1} + w,
$$and $$x + y + z + w = \overline{0} \implies x = -y-z-w = \overline{2}y + \overline{2}z + \overline{2}w.
$$Hence $$\colvec{x\\ y \\ z \\ w} = \colvec{\overline{0} \\ \overline{1} \\ \overline{0}\\ \overline{0}} + z\colvec{\overline{0} \\ \overline{0} \\ \overline{1}\\ \overline{0}} + w\colvec{\overline{0} \\ \overline{0} \\ \overline{0}\\ \overline{1}}.   $$
This gives the $9$ solutions.
\end{answer}
\end{pro}
\begin{pro}
In $\BBZ_7$, given that $$ \begin{bmatrix} \overline{1} &
\overline{2} & \overline{3} \cr \overline{2} & \overline{3} &
\overline{1} \cr \overline{3} & \overline{1} & \overline{2} \cr
\end{bmatrix}^{-1} = \begin{bmatrix} \overline{4} & \overline{2} & \overline{0} \cr
\overline{2} & \overline{0} & \overline{4} \cr \overline{0} &
\overline{4} & \overline{2} \cr
\end{bmatrix},$$find all solutions of the system
$$\overline{1}x + \overline{2}y + \overline{3}z = \overline{5};$$
$$\overline{2}x + \overline{3}y + \overline{1}z = \overline{6};$$
$$\overline{3}x + \overline{1}y + \overline{2}z = \overline{0}.$$
\begin{answer}
We have
$$ \begin{bmatrix} \overline{1} & \overline{2} & \overline{3} \cr
\overline{2} & \overline{3} & \overline{1} \cr \overline{3} &
\overline{1} & \overline{2} \cr
\end{bmatrix}\begin{bmatrix}x\cr y \cr z \end{bmatrix}  = \begin{bmatrix}\overline{5}\cr \overline{6}\cr \overline{0}\end{bmatrix},$$
Hence
$$ \begin{bmatrix}x\cr y \cr z \end{bmatrix} =  \begin{bmatrix} \overline{1} & \overline{2} & \overline{3} \cr
\overline{2} & \overline{3} & \overline{1} \cr \overline{3} &
\overline{1} & \overline{2} \cr
\end{bmatrix}^{-1}\begin{bmatrix}\overline{5}\cr \overline{6}\cr \overline{0}\end{bmatrix} =
\begin{bmatrix} \overline{4} & \overline{2} & \overline{0} \cr
\overline{2} & \overline{0} & \overline{4} \cr \overline{0} &
\overline{4} & \overline{2} \cr
\end{bmatrix}\begin{bmatrix}\overline{5}\cr \overline{6}\cr \overline{0}\end{bmatrix}= \begin{bmatrix}\overline{4}\cr \overline{3}  \cr\overline{3}
\end{bmatrix} .            $$


\end{answer}
\end{pro}
\begin{pro}
Solve in $\BBZ_{13}$:
 $$ x-\overline{2}y+z=\overline{5}, \qquad \overline{2}x+\overline{2}y=\overline{7},  \qquad \overline{5}x-\overline{3}y +\overline{4}z=\overline{1}.$$
\begin{answer}
The augmented matrix of the system is
\begin{eqnarray*}
\begin{bmat}{lll|l}
\overline{1} & -\overline{2} & \overline{1} & \overline{5}\\
\overline{2} & \overline{2} & \overline{0} & \overline{7}\\
\overline{5} & -\overline{3} & \overline{4} & \overline{1}\\
\end{bmat}
&  \grstep[R_2-\overline{2}R_1\to R_2 ]{R_3-\overline{5}R_1\to R_3}
&
\begin{bmat}{lll|l}
\overline{1} & -\overline{2} & \overline{1} & \overline{5}\\
\overline{0} & \overline{6} & -\overline{2} & -\overline{3}\\
\overline{0} & \overline{7} & -\overline{1} & \overline{2}\\
\end{bmat} \cr
&  \grstep{\overline{6}R_3-\overline{7}R_2\to R_3} &
\begin{bmat}{lll|l}
\overline{1} & -\overline{2} & \overline{1} & \overline{5}\\
\overline{0} & \overline{6} & -\overline{2} & -\overline{3}\\
\overline{0} & \overline{0} & \overline{8} & \overline{7}\\
\end{bmat} \cr
\end{eqnarray*}
Backward substitution yields
$$\overline{8}z = \overline{7} \implies \overline{5}\cdot \overline{8}z = \overline{5}\cdot \overline{7} \implies z = \overline{35}=\overline{9},  $$
$$  \overline{6}y=\overline{2}z-\overline{3} =\overline{2}\cdot \overline{9}-\overline{3}= \overline{15}=\overline{2} \implies \overline{11}\cdot \overline{6}y = \overline{11}\cdot \overline{2}\implies y = \overline{22}=\overline{9},  $$
$$  x=\overline{2}y-\overline{1}z+\overline{5} =\overline{2}\cdot \overline{9}-\overline{1}\cdot \overline{9}+\overline{5}=\overline{14}=\overline{1}.  $$
Conclusion :
$$\psframebox{ x= \overline{1}, \qquad y = \overline{9}, \qquad z = \overline{9}.}  $$
Check:
$$\overline{1} - \overline{2}\cdot \overline{9} + \overline{9} = -\overline{8}\stackrel{\checkmark}{=} \overline{5},  $$
$$\overline{2}\cdot \overline{1} + \overline{2}\cdot \overline{9}  = \overline{20}\stackrel{\checkmark}{=} \overline{7},  $$
$$\overline{5}\cdot \overline{1} - \overline{3}\cdot \overline{9} + \overline{4}\cdot \overline{9}  = \overline{14}\stackrel{\checkmark}{=} \overline{1}.  $$
\end{answer}
\end{pro}
\begin{pro}
Find, with proof, a polynomial $p(x)$ with real number coefficients
and   degree $3$ such that
$$p(-1) = -10, \quad p(0) = -1, \quad p(1) = 2, \quad p(2)= 23.$$
\begin{answer}
We need to solve the system
$$ a-b+c-d=p(-1)=-10, $$
$$ a=p(0)=-1, $$
$$ a+b+c+d=p(1)=2, $$
$$a+2b+4c+8d=p(2)=23.  $$Using row reduction or otherwise, we find
$a=-1$, $b=2$, $c=-3$, $d=4$, and so the polynomial is $$p(x)
=4x^3-3x^2+2x-1.$$
\end{answer}

\end{pro}

\begin{pro}
This problem introduces Hill block ciphers, which are a way of
encoding information with an {\em encoding matrix} $A\in\mat{n\times
n}{\BBZ _{26}}$, where $n$ is a strictly positive integer. Split a
plaintext into blocks of $n$ letters, creating a series of $n\times
1$ matrices $P_k$, and consider the numerical equivalent ($A=0$,
$B=1$, $C=2$, \ldots , $Z=25$) of each letter. The encoded message
is the translation to letters of the $n\times 1$ matrices $C_k =AP_k
\mod 26$.

\bigskip

For example, suppose you want to encode the message {\bf COMMUNISTS
EAT OFFAL} with the encoding matrix $$A = \begin{bmatrix} 0  & 1 & 0
\cr 3 & 0 & 0 \cr 0 & 0 & 2 \cr
\end{bmatrix},
$$a $3\times 3$ matrix. First, split the plaintext into groups of three letters:
$$ {\bf COM} \  {\bf MUN} \ {\bf IST} \ {\bf SEA} \ {\bf TOF} \ {\bf FAL}.
$$ Form $3\times 1$ matrices with each set of letters and find their
numerical equivalent, for example,
$$P_1=\colvec{{\bf C}\\ {\bf O}\\ {\bf M}} = \colvec{2\\ 14 \\ 12}.   $$
Find the product $AP_1$ modulo $26$, and translate into letters:
$$ AP_1= \begin{bmatrix} 0  & 1 & 0
\cr 3 & 0 & 0 \cr 0 & 0 & 2 \cr
\end{bmatrix} \colvec{2\\ 14 \\ 12}= \colvec{14\\ 6 \\ 24} =  \colvec{{\bf O}\\ {\bf G}\\ {\bf Y}},$$
hence {\bf COM} is encoded into {\bf OGY}. Your task is to complete
the encoding of the message.

\begin{answer}
Using the encoding chart
$$\begin{array}{|c||c||c||c||c||c||c||c||c||c||c||c|c|}
\hline 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 \\
\hline A & B & C & D & E &F & G & H & I & J & K & L & M \\
\hline 13 & 14 & 15 & 16 & 17 & 18 & 19 & 20 & 21 & 22 & 23 & 24 &
25 \\
\hline
N & O & P & Q & R & S & T & U & V & W & X & Y & Z \\
\hline
\end{array}$$
we find  $$P_2=\colvec{{\bf M}\\ {\bf U}\\ {\bf N}} = \colvec{12\\
20
\\ 13}, \quad P_3=\colvec{{\bf I}\\ {\bf S}\\ {\bf T}} = \colvec{8\\
18
\\ 19}, \quad P_4=\colvec{{\bf S}\\ {\bf E}\\ {\bf A}} = \colvec{18\\
4
\\ 0}, \quad P_5=\colvec{{\bf T}\\ {\bf O}\\ {\bf F}} = \colvec{19\\
14
\\ 5}, \quad P_6=\colvec{{\bf F}\\ {\bf A}\\ {\bf L}} = \colvec{5\\
0
\\ 11}.        $$
Thus
$$AP_2 = \colvec{20\\ 10 \\ 0} = \colvec{{\bf U}\\ {\bf K}\\ {\bf A}},\
AP_3 = \colvec{18\\ 24 \\ 12} = \colvec{{\bf S}\\ {\bf Y}\\ {\bf
M}},\ AP_4 = \colvec{4\\ 2 \\ 0} = \colvec{{\bf E}\\ {\bf C}\\
{\bf A}},\ AP_5 = \colvec{14\\ 5 \\ 10} = \colvec{{\bf O}\\
{\bf F}\\ {\bf K}},\ AP_6 = \colvec{0\\ 15 \\ 22} = \colvec{{\bf
A}\\ {\bf P}\\ {\bf W}}.
 $$
 Finally, the message is encoded into
 $$ {\bf OGY} \ {\bf UKA}  \ {\bf SYM}  \ {\bf ECA}  \ {\bf OFK}  \ {\bf APW}. $$

\end{answer}


\end{pro}
\begin{pro}
Find all solutions in $\BBZ _{103}$, if any,  to the system
$$ \begin{array}{rll}  x_0+x_1 & = & 0, \\
 x_0+x_2 & = & 1, \\
  x_0+x_3 & = & 2, \\
   \vdots & \vdots & \vdots \\
    x_{0}+x_{100} & = & 99, \\
    x_0+x_1 +x_2+\cdots + x_{100}& = & 4949. \\ \end{array} $$
Hints: $0+1+2+\cdots + 99= 4950$, \quad $99\cdot 77 -103\cdot 74=
1$.
\begin{answer}Observe that since $103$ is prime, $\BBZ_{103}$ is a field. Adding the first hundred equations,
$$100x_0 + x_1+x_2+\cdots + x_{100}=4950 \implies 99x_0 = 4950- 4949=1 \implies x_0 = 77 \mod 103. $$
Now, for $1\leq k \leq 100$,
$$x_k = k-1 - x_0 = k-78=k+25.  $$
This gives
$$x_1=26, x_2=27, \ldots , x_{77}=102, x_{78}=0, x_{79}=1, x_{80}=2,\ldots , x_{100}=22.  $$
\end{answer}

\end{pro}

\end{multicols}
\section{Existence of Solutions} We now answer
the question of deciding when a system of linear equations is
solvable.

\begin{lem}\label{lem:row_echelon_homo_system}
Let $A\in\mat{m\times n}{ \BBF }$ be in row-echelon form, and let
$X\in\mat{n\times 1}{ \BBF }$ be a matrix of variables. The
homogeneous system $AX = {\bf 0}_{m\times 1}$ of $m$ linear
equations in $n$ variables has (i) a unique solution if $m = n$,
(ii) multiple solutions if $m < n$.
\end{lem}
\begin{pf}
If $m = n$ then $A$ is a square triangular matrix whose diagonal
elements are different from $0_{ \BBF }$. As such, it is invertible
by virtue of Theorem \ref{thm:inverse_triangular_matrices}. Thus
$$AX = {\bf 0}_{n\times 1} \implies X = A^{-1}{\bf 0}_{n\times 1}
= {\bf 0}_{n\times 1}$$so there is only the unique solution $X =
{\bf 0}_{n\times 1}$, called the {\em trivial solution}.

\bigskip

If $m < n$ then there are $n - m$ free variables. Letting these
variables run through the elements of the field, we obtain
multiple solutions. Thus if the field has infinitely many
elements, we obtain infinitely many solutions, and if the field
has $k$ elements, we obtain $k^{n - m}$ solutions. Observe that in
this case there is always a non-trivial solution.

\end{pf}
\begin{thm}
Let $A\in\mat{m\times n}{ \BBF }$, and let  $X\in\mat{n\times 1}{
\BBF }$ be a matrix of variables. The homogeneous system $AX = {\bf
0}_{m\times 1}$ of $m$ linear equations in $n$ variables always has
a non-trivial solution if $m < n$.
\end{thm}
\begin{pf}
We can find a matrix $P\in\gl{m}{ \BBF }$ such that $B = PA$ is in
row-echelon form. Now
$$AX = {\bf
0}_{m\times 1} \iff PAX  = {\bf 0}_{m\times 1} \iff BX = {\bf
0}_{m\times 1}.   $$ That is, the systems $AX = {\bf 0}_{m\times
1}$ and $BX = {\bf 0}_{m\times 1}$ have the same set of solutions.
But by Lemma \ref{lem:row_echelon_homo_system} there is a
non-trivial solution.
\end{pf}


\begin{thm}[Kronecker-Capelli] \index{theorem!Kronecker-Capelli}
Let $A\in\mat{m\times n}{ \BBF }, Y\in \mat{m\times 1}{ \BBF }$ be
constant matrices and $X\in\mat{n\times 1}{ \BBF }$ be a matrix of
variables. The matrix equation $AX = Y$ is solvable if and only if
$$\rank{A} = \rank{[A|Y]}.$$
\end{thm}
\begin{pf}
Assume first that $AX = Y$, $$ X = \begin{bmatrix}x_1 \\ x_2 \\
\vdots
\\ x_n
\end{bmatrix}.
$$Let the columns of $[A|X]$ be denoted by $C_i, 1 \leq i \leq n$. Observe that  that $[A|X]\in \mat{m\times (n+1)}{ \BBF }$ and that the $(n+1)$-th column of $[A|X]$ is
$$C_{n + 1} = AX =  \begin{bmatrix} x_1a_{11} + x_2a_{12} + \cdots + x_na_{1n}\\
x_1a_{21} + x_2a_{22} + \cdots + x_na_{2n}\\ \vdots
\\
x_1a_{n1} + x_2a_{n2} + \cdots + x_na_{nn}\end{bmatrix} = \sum
_{i=1} ^n x_iC_i.
$$
By performing $C_{n+1} - \sum_{j = 1} ^nx_jC_j \rightarrow C_{n +
1} $ on $[A|Y] = [A|AX]$ we obtain $[A|0_{n\times 1}]$. Thus
$\rank{[A|Y]} = \rank{[A|0_{n\times 1}]} = \rank{A}$.


\bigskip

Now assume that $r = \rank{A} = \rank{[A|Y]}$. This means that
adding an extra column to $A$ does not change the rank, and hence,
by a sequence column operations $[A|Y]$ is equivalent to
$[A|0_{n\times 1}]$. Observe that none of these operations is a
permutation of the columns, since the first $n$ columns of $[A|Y]$
and $[A|0_{n\times 1}]$ are the same. This means that $Y$ can be
obtained from the columns $C_i, 1 \leq i \leq n$ of $A$ by means
of transvections and dilatations. But then $$Y= \sum _{i = 1}
^nx_iC_i.
$$The solutions sought is thus $$ X = \begin{bmatrix}x_1 \\ x_2 \\
\vdots
\\ x_n
\end{bmatrix}.
$$        \end{pf}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Let $A\in\mat{n\times p}{ \BBF }$, $B\in\mat{n\times q}{ \BBF }$ and
put $C = [A\ \ B]\in\mat{n\times (p+q)}{ \BBF }$ Prove that
$\rank{A} = \rank{C} \iff \exists P\in\mat{p}{q}$ such that $B =
AP$.
\end{pro}

\end{multicols}

\section{Examples of Linear Systems}

\begin{exa}
Use row reduction to solve the system
$$\begin{array}{lllllllll}
x & + & 2y & + & 3z & + & 4w & = & 8 \\
x & + & 2y & + & 4z & + & 7w & = & 12 \\
2x & + & 4y & + & 6z & + & 8w & = & 16 \\
\end{array}$$
\end{exa}
\begin{solu} Form the expanded matrix of coefficients and apply row
operations to obtain
\begin{eqnarray*}
\begin{bmat}{llll|l}
1   & 2  & 3  & 4  & 8 \\
1   & 2  & 4  & 7  & 12 \\
2   & 4  & 6  & 8  & 16 \\
\end{bmat}
&  \grstep[R_2 - R_1  \rightarrow   R_2]{R_3 - 2R_1 \rightarrow
R_3} &
\begin{bmat}{llll|l}
1   & 2  & 3  & 4  & 8 \\
0   & 0  & 1  & 3  & 4 \\
0   & 0  & 0  & 0  & 0 \\
\end{bmat}
. \end{eqnarray*} The matrix is now in row-echelon form. The
variables $x$ and $z$ are the pivots, so $w$ and $y$ are free.
Setting $w = s, y = t$ we have
$$z = 4 - 3s,$$$$ x = 8 - 4w - 3z - 2y = 8 - 4s - 3(4 - 3s) - 2t =
-4 + 5s - 2t.$$ Hence the solution is given by
$$\colvec{x\\ y\\ z\\ w} = \colvec{-4 + 5s - 2t\\ t\\ 4 - 3s\\ s}.$$
\end{solu}
\begin{exa}
Find $\alpha \in \BBR$ such that the system
$$x + y - z = 1,$$
$$2x + 3y + \alpha z = 3,$$
$$x + \alpha y + 3z = 2,$$ posses (i) no solution, (ii) infinitely
many solutions, (iii) a unique solution.
\end{exa}
\begin{solu}The augmented matrix of the system is
$$\begin{augmatrix}{3} 1 & 1 & -1 & 1 \\ 2 & 3 & \alpha & 3 \\ 1 & \alpha & 3 & 2 \\ \end{augmatrix}.$$
By performing $R_2 - 2R_1 \rightarrow R_2$ and $R_3 - R_1
\rightarrow R_3$ we obtain
$$\rightsquigarrow\begin{augmatrix}{3} 1 & 1 & -1 & 1 \\ 0 & 1 & \alpha  + 2 & 1 \\ 0 & \alpha  - 1 & 4 & 1  \end{augmatrix}.$$
By performing $R_3 - (\alpha  - 1)R_2 \rightarrow R_3$ on this
last matrix we obtain
$$\rightsquigarrow\begin{augmatrix}{3} 1 & 1 & - 1 & 1 \\ 0 & 1 & \alpha  + 2 & 1 \\ 0 & 0 & (\alpha  - 2)(\alpha  + 3) & \alpha  - 2\end{augmatrix}.$$
If $\alpha  = -3,$ we obtain no solution. If $\alpha = 2$, there
is an infinity of solutions $$\colvec{x \\ y \\ z} = \colvec{5t
\\ 1 - 4t \\ t}, \ \ \ t\in\BBR.$$
If $\alpha \neq 2$ and $\alpha \neq 3$, there is a unique solution
$$\colvec{x \\ y \\ z} = \colvec{1
\\ \dfrac{1}{\alpha + 3} \\ \dfrac{1}{\alpha + 3}}.$$
\end{solu}



\begin{exa}
Solve the system
$$\begin{bmatrix}
\overline{6} & \overline{0}& \overline{1} \cr \overline{3} &
\overline{2} & \overline{0} \cr \overline{1} & \overline{0} &
\overline{1}\cr
\end{bmatrix}\colvec{x\\ y\\ z} = \colvec{\overline{1} \\ \overline{0} \\ \overline{2}},$$ for $(x, y, z)\in
(\BBZ_7)^3$.
\end{exa}\begin{solu}Performing operations on the augmented matrix
we have
\begin{eqnarray*}
\begin{bmat}{ccc|c}
\overline{6} & \overline{0}& \overline{1} & \overline{1} \cr
\overline{3} & \overline{2} & \overline{0} & \overline{0} \cr
\overline{1} & \overline{0} & \overline{1}& \overline{2}\cr
\end{bmat} &
\grstep{R_1 \leftrightarrow R_3}&
\begin{bmat}{ccc|c}
\overline{1} & \overline{0} & \overline{1}& \overline{2}\cr
\overline{3} & \overline{2} & \overline{0} & \overline{0} \cr
\overline{6} & \overline{0}& \overline{1} & \overline{1} \cr
\end{bmat} \\  &  \grstep[R_2 - \overline{3}R_1 \rightarrow R_2]{R_3 -
\overline{6}R_1 \rightarrow R_3} &
\begin{bmat}{ccc|c}
\overline{1} & \overline{0} & \overline{1}& \overline{2}\cr
\overline{0} & \overline{2} & \overline{4} & \overline{1} \cr
\overline{0} & \overline{0}& \overline{2} & \overline{3} \cr
\end{bmat}
\end{eqnarray*} This gives $$\overline{2}z  = \overline{3}  \implies   z =
\overline{5},$$
$$\overline{2}y = \overline{1} - \overline{4}z = \overline{2} \implies   y = \overline{1},$$
$$x = \overline{2} - z = \overline{4}.$$ The solution is thus
$$(x, y, z) = (\overline{4}, \overline{1}, \overline{5}).$$
\end{solu}
\section*{\psframebox{Homework}}

\begin{pro}
Find the general solution to the system
$$ \begin{bmatrix}1 & 1 & 1 & 1   & 1 \cr   1 & 0 & 1 & 0 & 1 \cr 2  & 1 & 2 & 1 & 2 \cr 4  & 2 & 4 & 2 & 4 \cr 1 & 0 & 0 & 0 & 1 \cr    \end{bmatrix}\colvec{a \\ b \\ c \\ d \\ f} =
\colvec{1 \\ -1 \\ 0 \\ 0 \\ 0}    $$ or shew that there is no
solution. \begin{answer} Observe that the third row is the sum of
the first two rows and the fourth row is twice the third. So we have
\begin{eqnarray*}
 \begin{bmat}{ccccc|c}1 & 1 & 1 & 1   & 1 & 1\cr   1 & 0 & 1 & 0 & 1 & -1\cr 2  & 1 & 2 & 1 & 2 & 0\cr 4  & 2 & 4 & 2 & 4 & 0\cr 1 & 0 & 0 & 0 & 1 & 0\cr    \end{bmat}
 & \grstep[R_4 - 2R_1 - 2R_2 \rightarrow R_4]{R_3-R_1-R_2\rightarrow
 R_3} &  \begin{bmat}{ccccc|c}1 & 1 & 1 & 1   & 1 & 1\cr   1 & 0 & 1 & 0 & 1& -1 \cr 0  & 0 & 0 & 0 & 0 & 0\cr 0  & 0 & 0 & 0 & 0 & 0\cr 1 & 0 & 0 & 0 & 1 & 0\cr
 \end{bmat}\\
 & \grstep[R_1 - R_5 \rightarrow R_1]{R_2-R_5 \rightarrow R_2} &
 \begin{bmat}{ccccc|c}0 & 1 & 1 & 1   & 0 & 1\cr   0 & 0 & 1 & 0 & 0 & -1\cr 0  & 0 & 0 & 0 & 0 & 0 \cr 0  & 0 & 0 & 0 & 0  & 0\cr 1 & 0 & 0 & 0 & 1 & 0\cr
 \end{bmat}\\
\end{eqnarray*}
Rearranging the rows we obtain $$ \begin{bmat}{ccccc|c} 1 & 0 & 0
& 0 & 1 & 0\cr 0 & 1 & 1 & 1   & 0 & 1\cr   0 & 0 & 1 & 0 & 0 &
-1\cr 0  & 0 & 0 & 0 & 0 & 0 \cr 0  & 0 & 0 & 0 & 0  & 0\cr
 \end{bmat} . $$Hence $d$ and $f$ are free variables. We obtain
 $$c = -1,$$
$$ b = 1 -c -d = 2-d,  $$
$$a = -f.  $$
The solution is $$\colvec{a\\ b\\ c\\ d\\ f} = \colvec{0\\
2\\ -1 \\ 0 \\ 0} + d\colvec{0 \\ -1 \\ 0 \\ 1 \\ 0} +
f\colvec{-1\\ 0 \\ 0 \\ 0 \\ 1}.
$$



\end{answer}
\end{pro}
\begin{pro}
Find all solutions of the system
$$ \begin{bmatrix}1 & 1 & 1 & 1   & 1 \cr   1 & 1 & 1 & 1 & 2 \cr 1  & 1 & 1 & 3 & 3 \cr 1  & 1 & 4 & 4 & 4 \cr 1 & 2 & 3 & 4 & 5 \cr    \end{bmatrix}\colvec{a \\ b \\ c \\ d \\ f} =
\colvec{3 \\ 4 \\ 7 \\ 6 \\ 9},    $$ if any.
\begin{answer}
The unique solution is $\colvec{1\\ 1\\ -1\\ 1\\ 1}$.
\end{answer}

\end{pro}
\begin{pro}
Study the system
$$x + 2my + z = 4m;$$
$$2mx + y + z = 2;$$$$x + y + 2mz =
2m^2,$$with real parameter $m$. You must determine, with proof, for
which $m$ this system has (i) no solution, (ii) exactly one
solution, and (iii) infinitely many solutions.  \begin{answer} The
augmented matrix of the system is
$$\begin{bmat}{ccc|c}2m & 1 & 1 & 2 \cr 1 & 2m & 1 & 4m \cr 1 & 1 & 2m & 2m^2 \cr
\end{bmat}.
$$ Performing $R_1 \leftrightarrow R_2$.
$$\begin{bmat}{ccc|c} 1 & 2m & 1 & 4m \cr 2m & 1 & 1 & 2 \cr 1 & 1 & 2m & 2m^2 \cr
\end{bmat}.
$$Performing $R_2 \leftrightarrow R_3$.
$$\begin{bmat}{ccc|c} 1 & 2m & 1 & 4m \cr 1 & 1 & 2m & 2m^2 \cr 2m & 1 & 1 & 2 \cr
\end{bmat}.
$$
Performing $R_2 - R_1 \rightarrow R_1$ and $R_3 - 2mR_1
\rightarrow R_3$ we obtain
$$\begin{bmat}{ccc|c} 1 & 2m & 1 & 4m \cr 0 & 1 - 2m & 2m - 1 & 2m^2 - 4m \cr  0 & 1 - 4m^2 & 1 - 2m & 2 - 8m^2 \cr
\end{bmat}.
$$ If $m = \frac{1}{2}$ the matrix becomes $$  \begin{bmat}{ccc|c} 1 & 1 & 1 & 2 \cr 0 & 0 & 0 & -\frac{3}{2} \cr
0 & 0 & 0 & 0 \cr
\end{bmat}     $$ and hence it does not have a solution. If $m \neq
\frac{1}{2}$, by performing $\frac{1}{1 - 2m}R_2 \rightarrow R_2$
and $\frac{1}{1 - 2m}R_3 \rightarrow R_3$, the matrix becomes
$$\begin{bmat}{ccc|c} 1 & 2m & 1 & 4m \cr 0 & 1 & - 1 & \frac{2m(m - 2)}{1 - 2m} \cr  0 & 1 + 2m & 1 & 2(1 + 2m) \cr
\end{bmat}.
$$ Performing $R_3 - (1 + 2m)R_2 \rightarrow R_3$ we obtain
$$\begin{bmat}{ccc|c} 1 & 2m & 1 & 4m \cr 0 & 1 & - 1 & \frac{2m(m - 2)}{1 - 2m} \cr  0 &  0 & 2 + 2m & \frac{2(1 + 2m)(1 - m^2)}{1 -2m} \cr
\end{bmat}.
$$ If $m = -1$ then the matrix reduces to
$$\begin{bmat}{ccc|c} 1 & -2 & 1 & -4 \cr 0 & 1 & - 1 & 2 \cr  0 &  0 & 0 & 0 \cr
\end{bmat}.
$$The solution in this case is $$\begin{bmatrix} x \cr y \cr z \cr   \end{bmatrix}  =
\begin{bmatrix} z \cr 2 + z \cr z    \end{bmatrix}.     $$ If $m \neq
-1, m \neq -\frac{1}{2}$ we have the solutions
$$\begin{bmatrix} x \cr y \cr z \cr   \end{bmatrix}  =
\begin{bmatrix} \frac{m - 1}{1 - 2m} \cr \frac{1 - 3m}{1 - 2m}  \cr \frac{(1 + 2m)(1 - m)}{1 - 2m}    \end{bmatrix}.     $$



\end{answer}
\end{pro}

\begin{pro}
Study the following system of linear equations with parameter $a$.
$$(2a - 1)x + ay - (a + 1)z = 1,$$ $$ ax + y - 2z = 1,
$$

$$2x + (3 - a)y + (2a - 6)z = 1.$$
You must determine for which $a$ there is: (i) no solution, (ii) a
unique solution, (iii) infinitely many solutions. \begin{answer} By
performing the elementary row operations, we obtain the following
triangular form:
$$
ax  + y - 2z  =  1,$$ $$ (a - 1)^2y  + (1 - a)(a - 2)z = 1 - a,$$
$$(a - 2)z =  0. $$ If $a = 2,$ there is an infinity of solutions:
$$\colvec{x\\ y\\ z} = \colvec{1 + t\\ -1\\ t} \ t\in\BBR.$$

Assume $a \neq 2$. Then $z = 0$ and the system becomes
$$ax + y = 1,$$
$$ (a - 1)^2y = 1 - a,$$
$$2x + (3 - a)y = 1.$$ We see that if $a = 1$,  the system becomes
$$x + y = 1,$$
$$2x + 2y = 1,$$and so there is no solution. If $(a - 1)(a - 2) \neq 0$, we obtain the
unique solution
$$\colvec{x\\ y \\ z} = \colvec{\frac{1}{a - 1}\\ -\frac{1}{a - 1}\\ 0}.$$


\end{answer}
\end{pro}
\begin{pro}
Determine the values of the parameter $m$ for which  the system
$$\begin{array}{lllllll} x      &+&  y &+& (1-m)z  &=& m+2  \cr
           (1+m)x  &-&  y &+& 2z      &=& 0    \cr
           2x      &-& my &+& 3z      &=& m+2 \cr \end{array} $$
is solvable.
\begin{answer}
The system is solvable if $m \neq 0, m \neq \pm 2$. If $m \neq 2$ there is the solution $\colvec{x\\ y \\
z} = \colvec{\frac{1}{m-2} \\ \frac{m+3}{m-2} \\ \frac{m+2}{m-2}
}.$
\end{answer}
\end{pro}
\begin{pro}
Determine the values of the parameter $m$ for which  the system
$$\begin{array}{lllllllll}
x &+& y &+& z &+& t &=& 4a \cr
             x &-& y &-& z &+& t &=& 4b \cr
            -x &-& y &+& z &+& t &=& 4c \cr
           x &-& y &+&z &-&t &=& 4d \cr \end{array} $$ is solvable.
\begin{answer}
There is the unique solution $\colvec{x \\ y \\ z\\ t} = \colvec{a
+ d + b - c\\ -c - d - b + a\\ d + c - b + a\\ c - d + b+ a}.
  $

\end{answer}
\end{pro}
\begin{pro}
It is known that the system
$$ay + bx = c;$$ $$cx  + az = b;$$ $$bz + cy = a$$possesses a unique
solution. What conditions must $(a, b, c)\in\BBR^3$  fulfill in this
case?  Find this unique solution. \begin{answer} The system can be
written as
$$ \begin{bmatrix} b & a & 0 \cr c & 0 & a \cr 0 & c & b \cr  \end{bmatrix}\begin{bmatrix} x \cr y \cr z  \end{bmatrix} =
\begin{bmatrix} c \cr b \cr a  \end{bmatrix}.      $$  The system will have the unique
solution
 $$\begin{array}{lll} \begin{bmatrix} x \cr y \cr z  \end{bmatrix} & =  & \begin{bmatrix} b & a & 0 \cr c & 0 & a \cr 0 & c & b \cr  \end{bmatrix}^{-1}
 \begin{bmatrix} c \cr b \cr a \cr \end{bmatrix}\vspace{3mm}\\
 & = &\begin{bmatrix} \frac{1}{2b} &  \frac{1}{2c} &  -\frac{a}{2bc} \cr \frac{1}{2a} &  -\frac{b}{2ac} &  \frac{1}{2c} \cr
 -\frac{c}{2ba} &  \frac{1}{2a}  &  \frac{1}{2b}\cr  \end{bmatrix} \begin{bmatrix} c \cr b \cr a \cr \end{bmatrix}\vspace{3mm}\\
& = & \begin{bmatrix} \dfrac{b^2 + c^2 - a^2}{2bc} \cr \dfrac{a^2
+ c^2 - b^2}{2ac}\cr \dfrac{a^2 + b^2 -
c^2}{2ab}\cr\end{bmatrix}\end{array},
$$as long as the inverse matrix exists, which is as long as $abc\neq
0$
\end{answer}
\end{pro}
\begin{pro}
For which values of the real parameter $a$ does the following system
have (i) no solutions, (ii) exactly one solution, (iii) infinitely
many solutions?
$$ \begin{array}{rrrrrrr}  (1-a)x & + & (2a+1)y & + & (2a+2)z  & = & a, \\
ax & + & ay &  &  & = & 2a+2, \\
2x & + & (a+1)y & + & (a-1)z  & = & a^2-2a+9. \\
 \end{array} $$
\begin{answer}
We first form the augmented matrix,
\begin{eqnarray*}
\begin{bmat}{lll|l}
1-a   & 2a+1  & 2a+2  & a \\
a   & a  & 0  & 2a+2   \\
2   & a+1  & a-1  & a^2-2a+9   \\
\end{bmat}
&  \grstep{R_1 + R_2  \rightarrow   R_1} &
\begin{bmat}{lll|l}
1   & 3a+1  & 2a+2  & 3a+2 \\
a   & a  & 0  & 2a+2   \\
2   & a+1  & a-1  & a^2-2a+9   \\
\end{bmat} \\
&  \grstep[R_3-2R_1\rightarrow R_3]{R_2 - aR_1  \rightarrow   R_2} &
\begin{bmat}{lll|l}
1   & 3a+1  & 2a+2  & 3a+2 \\
0   & -3a^2  & -2a^2-2a  & -3a^2+2   \\
0   & -5a-1  & -3a-5  &  a^2-8a+5   \\
\end{bmat}\\
. \end{eqnarray*} After   $(-5a-1)R_2 +3a^2R_3  \rightarrow R_2$,
this las matrix becomes
$$
\begin{bmat}{lll|l}
1   & 3a+1  & 2a+2  & 3a+2 \\
0   & 0  & a^3-3a^2+2a  & 3a^4-9a^3+18a^2-10a-2   \\
0   & -5a-1  & -3a-5  &  a^2-8a+5   \\
\end{bmat}. $$Exchanging the last two rows and factoring,  $$
 \begin{bmat}{lll|l}
1   & 3a+1  & 2a+2  & 3a+2 \\
0   & -5a-1  & -3a-5  &  a^2-8a+5   \\
0   & 0  & a(a-1)(a-2)  & (a-1)(3a^3-6a^2+12a+2)   \\
\end{bmat}.
$$
Thus we must examine $a\in \{1,2,3\}$ and $a\not\in \{0,1,2\}$.

\bigskip
Clearly, if $a(a-1)(a-2)\neq 0$, then there is the unique solution
$$ \left\{ z=\frac {2+12a-6a^{2}+3a^{3}}{a \left( a-2 \right) },\qquad y=-\frac {2a^{3}-3\,a^{2}+6a+10}{a \left( a-2
 \right) },\qquad x={\frac {2\,a^{3}-a^{2}+4a+6}{a \left( a-2 \right)
}} \right\}.
 $$
If $a=0$, the system becomes
$$x+y+2z=0, \quad 0 = 2, \quad 2x+y-z=0,   $$
which is inconsistent (no solutions).

\bigskip

If $a=1$, the system becomes
$$ 3y+4z=1, \qquad x+y=1, \qquad 2x+2y=8, $$
which has infinitely many solutions, $$\left\{
y=\dfrac{1}{3}-\dfrac{4}{3}z,\qquad
x=\dfrac{2}{3}+\dfrac{4}{3}z,\qquad z=z \right\}.
$$


\bigskip

If $a=2$, the system becomes
$$ -x+5y+6z=2, \quad 2x+2y=6, \qquad 2x+3y + z = 9, $$
which is also inconsistent, as can be seen by observing that
$$(-x+5y+6z)-6(2x+3y + z)=2-18 \implies -13x-13y=-18, $$which
contradicts the equation $2x+2y=6$.
\end{answer}
\end{pro}


\begin{pro}
Find strictly positive real numbers $x, y, z$   such that
$$\begin{array}{lcl}  x^3y^2z^6    &=& 1 \\
                              x^4y^5z^{12} &=& 2 \\
                              x^2y^2z^5    &=& 3.\\
\end{array}
$$
\begin{answer} $$\begin{array}{lcl}x &=& 2^{-2} 3^6    \\
                  y &=& 2^{-3}3^{12}  \\
                  z &=& 2^2 3^{-7}.   \\ \end{array}$$  \end{answer}
\end{pro}

\begin{pro}[Leningrad Mathematical Olympiad, 1987, Grade 5] The numbers
$1$, $2$, $\ldots$ , $16$ are arranged in a $4\times 4$ matrix $A$
as shewn below. We may add $1$ to all the numbers of any row or
subtract $1$ from all numbers of any column. Using only the
allowed operations, how can we obtain $A^T$?

$$
A = \begin{bmatrix}
  1 & 2 & 3 & 4 \cr
  5 & 6 & 7 & 8 \cr
  9 & 10 & 11 & 12 \cr
  13 & 14 & 15 & 16 \cr
\end{bmatrix}
$$

\begin{answer} Denote the addition operations applied to the rows by $a_1$,
$a_2$, $a_3$, $a_4$ and the subtraction operations to the columns
by $b_1$, $b_2$, $b_3$, $b_4.$ Comparing $A$ and $A^T$  we obtain
$7$ equations in $8$ unknowns.  By inspecting the diagonal
entries, and the entries of the first row of $A$ and $A^T$, we
deduce the following equations
$$a_1 = b_1,$$
$$a_2 = b_2,$$$$a_3 = b_3,$$$$a_4 = b_4,$$$$a_1 - b_2 = 3,$$
$$a_1 - b_3 = 6,$$
$$a_1 - b_4 = 9.$$This is a system of $7$ equations in $8$
unknowns. We may let $a_4 = 0$ and thus obtain $a_1 = b_1 = 9,$
$a_2 = b_2 = 6,$ $a_3 = b_3 = 3,$ $a_4 = b_4 = 0.$
\end{answer}
\end{pro}
\begin{pro}[International Mathematics Olympiad, 1963] Find all solutions
$x_1, x_2, x_3, x_4, x_5$ of the system
$$x_5 + x_2 = yx_1;$$$$x_1 + x_3 = yx_2;$$$$x_2 + x_4 = yx_3;$$$$x_3 + x_5 = yx_4;$$$$x_4 + x_1 =
yx_5,$$where $y$ is a parameter.\begin{answer} The augmented matrix
of this system is
$$ \begin{bmat}{ccccc|c} -y & 1 & 0 & 0 & 1 & 0\cr
1 & -y & 1 & 0 & 0 & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr 0 & 0 & 1 &
-y & 1 & 0 \cr 1 & 0 & 0 & 1 & -y & 0 \cr
\end{bmat}.
$$ Permute the rows to obtain

$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 1 & -y & 1 & 0 & 0 & 0 \cr -y & 1 & 0 &
0 & 1 & 0\cr
\end{bmat}.$$
Performing $R_5 + yR_1 \rightarrow R_5$ and $R_4 - R_1 \rightarrow
R_4$ we get
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 0 & -y & 1 & -1 & y & 0 \cr 0 & 1 & 0 &
y & 1 - y^2& 0\cr
\end{bmat}.
$$
Performing $R_5 - R_2 \rightarrow R_5$ and $R_4 + yR_2 \rightarrow
R_4$ we get
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 0 & 0 & 1 - y^2 & y-1 & y & 0 \cr 0 & 0
& y & y - 1 &  1 - y^2& 0\cr
\end{bmat}.
$$
Performing $R_5 - yR_3 \rightarrow R_5$ and $R_4 + (y^2 -1)R_3
\rightarrow R_4$ we get
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 0 & 0 & 0 & -y^3 + 2y-1 & y^2 + y - 1 &
0 \cr 0 & 0 & 0 & y^2 + y - 1 &  1 - y - y^2& 0\cr
\end{bmat}.
$$
Performing $R_5 +  R_4\rightarrow R_5$ we get
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 0 & 0 & 0 & -y^3 + 2y-1 & y^2  +y - 1 &
0 \cr 0 & 0 & 0 & -y^3 +y^2 + 3y - 2 & 0& 0\cr
\end{bmat}.
$$Upon factoring, the matrix is equivalent to
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 0 & 0 & 0 & -(y-1)(y^2+y-1) & y^2  +y -
1 & 0 \cr 0 & 0 & 0 & -(y-2)(y^2+y-1) & 0& 0\cr
\end{bmat}.
$$ Thus $(y-2)(y^2+y-1)x_4 = 0$. If $y = 2$ then the system
reduces to
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -2 & 0 \cr 0 & 1 & -2 & 1 & 0 & 0 \cr
0 & 0 & 1 & -2 & 1 & 0 \cr 0 & 0 & 0 & -5 & 5 & 0 \cr 0 & 0 & 0 &
0 & 0& 0\cr
\end{bmat}.
$$
In this case $x_5$ is free and by backwards substitution we obtain
$$\begin{bmatrix} x_1 \cr x_2 \cr x_3 \cr x_4 \cr x_5\end{bmatrix} =
\begin{bmatrix} t \cr t \cr t \cr t \cr t\end{bmatrix}, \ \ \ t\in\BBR.$$
If $y^2 + y - 1 = 0$ then the system reduces to
$$ \begin{bmat}{ccccc|c}1 & 0 & 0 & 1 & -y & 0 \cr 0 & 1 & -y & 1 & 0 & 0 \cr
0 & 0 & 1 & -y & 1 & 0 \cr 0 & 0 & 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0
& 0& 0\cr
\end{bmat}.
$$ In this case $x_4, x_5$ are free, and
$$\begin{bmatrix} x_1 \cr x_2 \cr x_3 \cr x_4 \cr x_5\end{bmatrix} =
\begin{bmatrix} yt - s \cr y^2s - yt - s \cr ys - t \cr s \cr t\end{bmatrix}, \ \ \ (s,t)\in\BBR^2.$$
Since $y^2s - s = (y^2 + y - 1)s - ys$, this last solution can be
also written as
$$\begin{bmatrix} x_1 \cr x_2 \cr x_3 \cr x_4 \cr x_5\end{bmatrix} =
\begin{bmatrix} yt - s \cr -ys - yt \cr ys - t \cr s \cr t\end{bmatrix}, \ \ \ (s,t)\in\BBR^2.$$
Finally, if $(y - 2)(y^2 + y - 1) \neq 0$, then $x_4 = 0$, and we
obtain
$$\begin{bmatrix} x_1 \cr x_2 \cr x_3 \cr x_4 \cr x_5\end{bmatrix} =
\begin{bmatrix} 0 \cr 0 \cr 0 \cr 0 \cr 0\end{bmatrix}.$$
\end{answer}
\end{pro}

\chapter{Vector Spaces}
\section{Vector Spaces}
\begin{df}
A {\em vector space} $\vecspace{V}{+}{\cdot}{ \BBF }$ over a field
$\field{ \BBF }{+}{}$ is a non-empty set $V$ whose elements are
called {\em vectors}, possessing  two operations $+$ (vector
addition), and $\cdot$ (scalar multiplication) which satisfy the
following axioms. \index{vector space}\index{vectors!in a vector
space}
$$\forall
(\v{a}, \v{b}, \v{c}) \in V^3,\  \forall (\alpha, \beta)\in \BBF
^2,$$

\begin{enumerate}
\item[VS1] {\bf Closure\ under \ vector\ addition} :
\begin{equation}\v{a} + \v{b}\in
V   ,\label{vs:closure_under_addtion}\end{equation} \item[VS2]
{\bf Closure\ under \ scalar\ multiplication}
\begin{equation}\alpha\v{a} \in V  ,
\label{vs:closure_under_scalar_multiplication}
\end{equation}
\item[VS3] {\bf Commutativity} \begin{equation}  \v{a} + \v{b} =
\v{b} + \v{a} \label{vs:commutativity}\end{equation} \item[VS4]
{\bf Associativity}\begin{equation} {(\v{a} + \v{b}) + \v{c} =
\v{a} + (\v{b} + \v{c})} \label{vs:associativity}\end{equation}
\item[VS5] {\bf Existence\ of\ an\ additive\ identity}
\begin{equation}\exists \ \v{0}\in V: \v{a} + \v{0} =
\v{a} + \v{0}= \v{a} \label{vs:additive_identity}\end{equation}
\item[VS6]  {\bf Existence\ of\ additive\ inverses}
\begin{equation}   \exists \  -\v{a}\in V: \v{a} +
(-\v{a})=(-\v{a}) + \v{a} = \v{0}
\label{vs:additive_inverses}\end{equation} \item[VS7] {\bf
Distributive\ Law} \begin{equation} \alpha(\v{a} + \v{b}) =
\alpha\v{a} + \alpha\v{b}
\label{vs:distributive_law_1}\end{equation} \item[VS8]{\bf
Distributive\ Law}
\begin{equation}(\alpha + \beta)\v{a} = \alpha \v{a} + \beta\v{a}
\label{vs:distributive_law_2}
\end{equation}
\item[VS9]
     \begin{equation}1_{\BBF }\v{a} = \v{a}   \label{vs:1v_is_v}\end{equation}
\item[VS10] \begin{equation} (\alpha \beta) \v{a} = \alpha (\beta
\v{a}) \label{vs:associative_scalar_product}
\end{equation}
\end{enumerate}


\end{df}
\begin{exa}
If $n$ is a positive integer, then $\vecspace{\BBF ^n}{+}{\cdot}{
\BBF }$ is a vector space by defining
$$(a_1, a_2, \ldots ,a_n) + (b_1, b_2, \ldots ,b_n) = (a_1 + b_1, a_2 + b_2, \ldots ,a_n + b_n),$$
$$\lambda(a_1, a_2, \ldots ,a_n) = (\lambda  a_1, \lambda  a_2, \ldots , \lambda  a_n).$$
In particular, $\vecspace{\BBZ^2 _2}{+}{\cdot}{\BBZ_2}$ is a vector
space with only four elements and we have seen the two-dimensional
and tridimensional spaces $\vecspace{\BBR^2}{+}{\cdot}{\BBR}$ and
$\vecspace{\BBR^3}{+}{\cdot}{\BBR}$.
\end{exa}
\begin{exa}
$\vecspace{\mat{m\times n}{ \BBF }}{+}{\cdot}{ \BBF }$ is a vector
space under matrix addition and scalar multiplication of matrices.
\end{exa}
\begin{exa}
If $$\BBF[x] = \{a_0 + a_1x + a_2x + \cdots + a_nx^n: a_i\in \BBF, \
\ n\in \BBN\}$$ denotes the set of polynomials with coefficients in
a field $\field{ \BBF }{+}{}$ then $\vecspace{\BBF[x]}{+}{\cdot}{
\BBF }$ is a vector space, under polynomial addition and scalar
multiplication of a polynomial.
\end{exa}
\begin{exa}
If $$\BBF_n[x] = \{a_0 + a_1x + a_2x + \cdots + a_kx^k: a_i\in \BBF,
\ \ n\in \BBN, k \leq n\}$$ denotes the set of polynomials with
coefficients in a field $\field{ \BBF }{+}{}$ and degree at most
$n$, then $\vecspace{\BBF_n[x]}{+}{\cdot}{ \BBF }$ is a vector
space, under polynomial addition and scalar multiplication of a
polynomial.
\end{exa}
\begin{exa}
Let $k\in\BBN$ and let $C^k(\BBR^{[a; b]})$ denote the set of
$k$-fold continuously differentiable real-valued functions defined
on the interval $[a; b]$. Then $C^k(\BBR^{[a; b]})$ is a vector
space under addition of functions and multiplication of a function
by a scalar.
\end{exa}
\begin{exa}
Let $p \in ]1;+\infty[$. Consider the set of sequences $\{a_n\}_{n =
0} ^\infty, \ \ a_n\in\BBC$,
$$ l^p =\left\{\{a_n\}_{n = 0}
^\infty: \sum _{n = 0} ^\infty |a_n|^p < + \infty\right\}.$$ Then
$l^p$ is a vector space  by defining addition as termwise addition
of sequences and scalar multiplication as termwise multiplication:
$$    \{a_n\}_{n = 0} ^\infty +   \{b_n \}_{n = 0} ^\infty=  \{(a_n + b_n)\}_{n = 0} ^\infty ,          $$
$$  \lambda \{a_n \}_{n = 0} ^\infty   = \{\lambda a_n\}_{n = 0} ^\infty , \ \ \lambda\in\BBC.  $$
All the axioms of a vector space follow trivially from the fact that
we are adding complex numbers, except that we must prove that in
$l^p$ there is closure under addition and scalar multiplication.
Since $ \sum _{n = 0} ^\infty |a_n|^p < + \infty \implies   \sum _{n
= 0} ^\infty |\lambda a_n|^p < + \infty$ closure under scalar
multiplication follows easily. To prove closure under addition,
observe that if $z\in \BBC$ then $|z| \in \BBR_+$ and so by the
Minkowski Inequality Theorem \ref{thm:minkowski_inequality} we have
\begin{equation}\begin{array}{lll}\left( \sum _{n = 0} ^N |a_n + b_n|^p
\right)^{1/p}     &  \leq & \left(\sum _{n = 0} ^N |a_n|^p
\right)^{1/p}+  \left(\sum _{n = 0} ^N |b_n|^p \right)^{1/p} \\ &
\leq &  \left(\sum _{n = 0} ^\infty |a_n|^p\right)^{1/p} +
\left(\sum _{n = 0} ^\infty |b_n|^p\right)^{1/p}.\end{array}
\label{eq:lp_1}
\end{equation} This in turn implies that the series on the left in
(\ref{eq:lp_1}) converges, and so we may take the limit as
$N\rightarrow +\infty$ obtaining
\begin{equation} \left(\sum _{n = 0} ^\infty |a_n + b_n|^p \right)^{1/p}    \leq     \left(\sum _{n = 0}
^\infty |a_n|^p\right)^{1/p} +  \left(\sum _{n = 0} ^\infty
|b_n|^p\right)^{1/p}.\label{eq:lp_2}\end{equation} Now
(\ref{eq:lp_2}) implies that the sum of two sequences in $l^p$ is
also in $l^p$, which demonstrates closure under addition.
\end{exa}
\begin{exa}
The set $$V = \{a + b\sqrt{2} + c\sqrt{3}: (a, b, c) \in \BBQ^3\}$$
with addition defined as $$(a + b\sqrt{2} + c\sqrt{3}) + (a' +
b'\sqrt{2} + c'\sqrt{3}) = (a + a') + (b + b')\sqrt{2} + (c +
c')\sqrt{3},$$and scalar multiplication defined as
$$\lambda (a + b\sqrt{2} + c\sqrt{3}) = (\lambda a) + (\lambda b)\sqrt{2} + (\lambda
c)\sqrt{3},$$constitutes a vector space over $\BBQ$.
\end{exa}
\begin{thm}
In any vector space $\vecspace{V}{+}{\cdot}{ \BBF }$, $$\forall \ \
\alpha \in \BBF, \ \ \ \alpha\v{0} = \v{0}. $$
\end{thm}
\begin{pf}
We have $$\alpha\v{0} = \alpha(\v{0} + \v{0}) = \alpha\v{0} +
\alpha\v{0}.$$Hence
$$\alpha\v{0} - \alpha\v{0} = \alpha\v{0},$$or
$$\v{0} = \alpha\v{0},$$proving the theorem.
\end{pf}
\begin{thm}
In any vector space $\vecspace{V}{+}{\cdot}{ \BBF }$, $$\forall \ \
\v{v} \in V, \ \ \ 0_{\BBF }\v{v} = \v{0}. $$
\label{thm:zeroscalar_times_vector}\end{thm}
\begin{pf}
We have
$$0_{\BBF }\v{v} = (0_{\BBF } + 0_{\BBF })\v{v} = 0_{\BBF }\v{v} +
0_{\BBF }\v{v}.$$Therefore
$$0_{\BBF }\v{v} - 0_{\BBF }\v{v} = 0_{\BBF }\v{v},$$or
$$\v{0} = 0_{\BBF }\v{v},$$proving the theorem.
\end{pf}
\begin{thm}
In any vector space $\vecspace{V}{+}{\cdot}{ \BBF }$, $\alpha\in
\BBF , \ \ \v{v} \in V,$ $$\alpha\v{v}= \v{0} \ \ \ \implies\ \ \
\alpha = 0_{\BBF } \ \ \ \vee\ \ \  \v{v} = \v{0}.$$
\end{thm}
\begin{pf}
Assume that $\alpha \neq 0_{\BBF }$. Then $\alpha$ possesses a
multiplicative inverse $\alpha^{-1}$ such that $\alpha^{-1}\alpha =
1_{\BBF }$. Thus
$$\alpha\v{v} = \v{0} \implies   \alpha^{-1}\alpha\v{v} =
\alpha^{-1}\v{0}.$$By Theorem \ref{thm:zeroscalar_times_vector},
$\alpha^{-1}\v{0} = \v{0}$. Hence
$$\alpha^{-1}\alpha\v{v} =
\v{0}.$$Since by Axiom \ref{vs:1v_is_v}, we have
$\alpha^{-1}\alpha\v{v} = 1_{\BBF }\v{v} = \v{v}$, and so we
conclude that $\v{v} = \v{0}$.
\end{pf}
\begin{thm}
In any vector space $\vecspace{V}{+}{\cdot}{ \BBF }$, $$\forall
\alpha \in \BBF, \ \ \ \forall \ \ \v{v} \in V, \ \ \ (-\alpha)\v{v}
= \alpha(-\v{v}) = -(\alpha\v{v}).
$$
\end{thm}
\begin{pf}
We have
$$0_{\BBF }\v{v} = (\alpha + (-\alpha))\v{v} = \alpha \v{v} +
(-\alpha) \v{v},$$whence
$$-(\alpha \v{v}) + 0_{\BBF } \v{v} = (-\alpha)
\v{v},$$that is$$-(\alpha \v{v}) = (-\alpha) \v{v}.$$Similarly,
$$\v{0} = \alpha (\v{v} - \v{v}) = \alpha\v{v}
+ \alpha(-\v{v}),$$whence
$$-(\alpha \v{v}) + \v{0} = \alpha (-\v{v}),$$that
is$$-(\alpha \v{v})= \alpha (-\v{v}),$$proving the theorem.
\end{pf}
\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Is $\BBR^2$
with vector addition and scalar multiplication defined as $$\colvec{x_1 \\
x_2 } + \colvec{ y_1 \\  y_2} =  \colvec{x_1 + y_1 \\ x_2 + y_2},
\ \ \ \lambda\colvec{x_1\\ x_2} = \colvec{\lambda x_1 \\ 0}
$$a vector space? \begin{answer} No, since $1_{\BBF }\v{v} = \v{v}$ is not fulfilled. For
example
$$1\cdot \colvec{1\\ 1} = \colvec{1\cdot 1 \\ 0 } \neq \colvec{1\\
1}.
$$
\end{answer}
\end{pro}
\begin{pro}
Demonstrate that the commutativity axiom \ref{vs:commutativity} is
redundant. \begin{answer} We expand $(1_{\BBF } + 1_{\BBF })(\v{a} +
\v{b})$ in two ways, first using \ref{vs:distributive_law_1} first
and then \ref{vs:distributive_law_2}, obtaining
$$(1_{\BBF } + 1_{\BBF })(\v{a} + \v{b}) = (1_{\BBF } + 1_{\BBF })\v{a} + (1_{\BBF } + 1_{\BBF }) \v{b}
= \v{a} + \v{a} + \v{b} + \v{b},$$and then using
\ref{vs:distributive_law_2} first and then
\ref{vs:distributive_law_1}, obtaining
$$(1_{\BBF } + 1_{\BBF })(\v{a} + \v{b}) = 1_{\BBF }(\v{a} + \v{b}) + 1_{\BBF }(\v{a} + \v{b})=
\v{a} + \v{b} + \v{a} + \v{b}.$$ We thus have the equality
$$\v{a} + \v{a} + \v{b} +
\v{b} = \v{a} + \v{b} + \v{a} + \v{b}.$$Cancelling $\v{a}$ from
the left and $\v{b}$ from the right, we obtain
$$\v{a} + \v{b} = \v{b} + \v{a},$$which is
what we wanted to shew.
\end{answer}
\end{pro}
\begin{pro}
Let $V = \BBR^+ = ]0; +\infty[$, the positive real numbers and $F =
\BBR$, the real numbers. Demonstrate that $V$ is a vector space over
$\BBF$ if vector addition is defined as $a \oplus b = ab$, $(a,
b)\in (\BBR^+)^2$ and scalar multiplication is defined as $\alpha
\otimes a = a^{\alpha}$, $(\alpha, a)\in (\BBR, \BBR^+)$.
\begin{answer}
We must prove that each of the axioms of a vector space are
satisfied. Clearly if $(x,y, \alpha)\in\BBR^+\times\BBR^+\times
\BBR$ then $x\oplus y = xy > 0$ and $\alpha\otimes x = x^\alpha >
0$, so $V$ is closed under vector addition and scalar
multiplication. Commutativity and associativity of vector addition
are obvious.
\bigskip

Let $A$ be additive identity. Then we need $$x\oplus A = x
\implies xA = x \implies A = 1.$$Thus the additive identity is
$1$. Suppose $I$ is the additive inverse of $x$. Then $$x\oplus I
= 1 \implies xI = 1 \implies I = \dfrac{1}{x}.
$$Hence the additive inverse of $x$ is $\dfrac{1}{x}$.

\bigskip

Now $$\alpha \otimes (x\oplus y) = (xy)^{\alpha} =
x^{\alpha}y^{\alpha} = x^{\alpha} \oplus y^{\alpha} = (\alpha
\otimes x)\oplus (\alpha \otimes y),
$$ and $$(\alpha + \beta)\otimes x = x^{\alpha + \beta} = x^\alpha x^{\beta} = (x^\alpha)\oplus (x^{\beta})
= (\alpha \otimes x) \oplus (\beta \otimes x),    $$whence the
distributive laws hold.

\bigskip
Finally, $$1\otimes x = x^1 = x,   $$and $$\alpha \otimes (\beta
\otimes x) = (\beta \otimes x)^\alpha = (x^\beta)^\alpha =
x^{\alpha\beta} = (\alpha\beta)\otimes x,
$$and the last two axioms also hold.


\end{answer}
\end{pro}
\begin{pro}
Let $\BBC$ denote the complex numbers and $\BBR$ denote the real
numbers. Is $\BBC$ a vector space over $\BBR$ under ordinary
addition and multiplication? Is $\BBR$ a vector space over $\BBC$?
\begin{answer} $\BBC$ is a vector space over $\BBR$, the proof is trivial. But $\BBR$ is not
a vector space over $\BBC$, since, for example taking $i$ as a
scalar (from $\BBC$) and $1$ as a vector (from $\BBR$) the scalar
multiple $i\cdot 1 = i \not\in\BBR$ and so there is no closure under
scalar multiplication.
\end{answer}
\end{pro}
\begin{pro}
Construct a vector space with exactly $8$ elements.
\begin{answer} One example is  $$(\BBZ_2)^3 = \left\{\colvec{\overline{0}\\ \overline{0} \\ \overline{0}}, \colvec{\overline{0}\\ \overline{0} \\ \overline{1}},
\colvec{\overline{0}\\ \overline{1} \\ \overline{0}}, \colvec{\overline{0}\\
\overline{1} \\ \overline{1}},
\colvec{\overline{1}\\ \overline{0} \\ \overline{0}}, \colvec{\overline{1}\\
\overline{0} \\ \overline{1}}, \colvec{\overline{1}\\
\overline{1} \\ \overline{0}}, \colvec{\overline{1}\\
\overline{1} \\  \overline{1}}\right\}.   $$Addition is the
natural element-wise addition and scalar multiplication is
ordinary element-wise scalar multiplication.
\end{answer}
\end{pro}
\begin{pro}
Construct a vector space with exactly $9$ elements.
\begin{answer} One example is  $$(\BBZ_3)^2 = \left\{\colvec{\overline{0}\\ \overline{0}}, \colvec{ \overline{0} \\ \overline{1}},
\colvec{\overline{0}\\ \overline{2}}, \colvec{\overline{1}\\
\overline{0}},
\colvec{\overline{1}\\ \overline{1} }, \colvec{\overline{1}\\ \overline{2} }, \colvec{\overline{2}\\
\overline{0} }, \colvec{\overline{2}\\
\overline{1} }, \colvec{\overline{2}\\
\overline{2} }\right\}.   $$Addition is the natural element-wise
addition and scalar multiplication is ordinary element-wise scalar
multiplication.
\end{answer}
\end{pro}
\end{multicols}
\section{Vector Subspaces}
\begin{df}Let $\vecspace{V}{+}{\cdot}{ \BBF }$ be a vector space. A
non-empty subset $U \subseteq V$ which is also a vector space
under the inherited operations of $V$ is called a {\em vector
subspace of $V$.}
\end{df}
\begin{exa}
Trivially, $X_1 = \{\v{0}\}$ and $X_2 = V$ are vector subspaces of
$V$.
\end{exa}
\begin{thm}
Let $\vecspace{V}{+}{\cdot}{ \BBF }$ be a vector space. Then $U
\subseteq V, \ \ U \neq \varnothing$ is a subspace of $V$ if and
only if $\forall \alpha\in \BBF$ and $\forall (\v{a}, \v{b})\in U^2$
it is verified that
$$\v{a} + \alpha\v{b} \in U.$$
\label{thm:condition_for_subspace}\end{thm}
\begin{pf}
Observe that $U$ inherits commutativity, associativity and the
distributive laws from $V$. Thus a non-empty  $U \subseteq V$ is a
vector subspace of $V$ if (i) $U$ is closed under scalar
multiplication, that is, if $\alpha\in \BBF$ and $\v{v}\in U$, then
$\alpha \v{v}\in U$; (ii) $U$ is closed under vector addition, that
is, if $(\v{u}, \v{v})\in U^2$, then $\v{u} + \v{v}\in U$. Observe
that (i) gives the existence of inverses in $U$, for take $\alpha =
-1_{\BBF }$ and so $\v{v}\in U \implies -\v{v}\in U$. This coupled
with (ii) gives the existence of the zero-vector, for $\v{0} = \v{v}
- \v{v}\in U$. Thus we need to prove that if a non-empty subset of
$V$ satisfies the property stated in the Theorem then it is closed
under scalar multiplication and vector addition, and vice-versa, if
a non-empty subset of $V$ is closed under scalar multiplication and
vector addition, then it satisfies the property stated in the
Theorem. But this is trivial.
\end{pf}
\begin{exa}
Shew that $\dis{X =\left\{A \in\mat{n\times n}{ \BBF }: \tr{A} =
0_{\BBF }\right\}}$ is a subspace of $\mat{n\times n}{ \BBF }$.
\end{exa}
\begin{solu}Take $A, B \in X, \alpha \in {\BBR}$. Then $$\tr{A +
\alpha B} = \tr{A} + \alpha\tr{B} = 0_{\BBF } + \alpha (0_{\BBF }) =
0_{\BBF }.$$ Hence $A + \alpha B \in X$, meaning that $X$ is a
subspace of $\mat{n\times n}{ \BBF }$.
\end{solu}

\begin{exa} Let $U \in \mat{n\times n}{ \BBF }$ be an arbitrary but fixed.
Shew that $$ \mathscr{C}_U = \left\{A \in \mat{n\times n}{ \BBF }:
AU = UA\right\}$$ is a subspace of $\mat{n\times n}{ \BBF }$.
\end{exa}
\begin{solu}Take $(A, B) \in (\mathscr{C}_U)^2$. Then $AU = UA$ and
$BU = UB.$ Now
$$(A + \alpha B)U = AU + \alpha BU = UA + \alpha UB = U(A + \alpha B),$$ meaning that
$A + \alpha B \in \mathscr{C}_U$. Hence $\mathscr{C}_U$ is a
subspace of $\mat{n\times n}{ \BBF }$. $\mathscr{C}_U$ is called the
{\em commutator} of $U$.
\end{solu}

\begin{thm}
Let $X \subseteq V, \ \ Y \subseteq V$ be vector subspaces of a
vector space $\vecspace{V}{+}{\cdot}{ \BBF }$. Then their
intersection $X \cap Y$ is also a vector subspace of $V$.
\end{thm}
\begin{pf}
Let $\alpha\in \BBF$ and $(\v{a}, \v{b})\in (X\cap Y)^2$. Then
clearly $(\v{a}, \v{b})\in X$ and $(\v{a}, \v{b})\in Y$. Since $X$
is a vector subspace, $\v{a} + \alpha \v{b}\in X$ and since $Y$ is a
vector subspace, $\v{a} + \alpha \v{b}\in Y$. Thus
$$\v{a} + \alpha \v{b}\in X \cap Y$$ and so $X \cap Y$
is a vector subspace of $V$ by virtue of Theorem
\ref{thm:condition_for_subspace}.
\end{pf}
\begin{rem}
We we will soon see that the only vector subspaces of
$\vecspace{\BBR^2}{+}{\cdot}{\BBR}$ are the set containing the
zero-vector, any line through the origin, and $\BBR^2$ itself. The
only vector subspaces of $\vecspace{\BBR^3}{+}{\cdot}{\BBR}$ are the
set containing the zero-vector, any line through the origin, any
plane containing the origin and $\BBR^3$ itself.
\end{rem}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900



\begin{pro}
Prove that $$ X = \left\{\colvec{a \\
b
\\ c \\ d}\in\BBR^4: a - b - 3d = 0\right\}
$$ is a vector subspace of $\BBR^4$. \begin{answer} Take $\alpha\in\BBR$ and
$$\v{x} = \colvec{a \\
b
\\ c \\ d}\in X, \ \  a - b - 3d = 0, \ \ \ \v{y} = \colvec{a' \\
b'
\\ c' \\ d'}\in X, \ \  a' - b' - 3d' = 0.   $$
Then $$\v{x} + \alpha \v{y} = \colvec{a \\
b
\\ c \\ d} + \alpha\colvec{a' \\
b'
\\ c' \\ d'} = \colvec{a + \alpha a' \\
b + \alpha b'
\\ c + \alpha c' \\ d + \alpha d'}.    $$
Observe that
$$ (a + \alpha a') - (b + \alpha b') - 3(d + \alpha d') = (a - b - 3d) + \alpha (a' - b' - 3d') = 0 + \alpha 0 = 0,$$
meaning that $\v{x} + \alpha\v{y}\in X$, and so $X$ is a vector
subspace of $\BBR^4$.
\end{answer}
\end{pro}
\begin{pro}
Prove that
  $$X = \left\{\begin{bmatrix}a\cr 2a - 3b\cr 5b\cr a + 2b\cr a\end{bmatrix}:a, b \in \BBR\right\}$$is a vector
  subspace of $\BBR^5$. \label{exa:subspace_in_r5}\begin{answer}  Take $$\v{u} = \begin{bmatrix}a_1\cr 2a_1 - 3b_1\cr
5b_1\cr a_1 + 2b_1\cr a_1\end{bmatrix},
  \v{v} = \begin{bmatrix}a_2\cr 2a_2 - 3b_2\cr 5b_2\cr a_2 + 2b_2\cr a_2\end{bmatrix}, \ \  \alpha
  \in \BBR .$$Put $s = a_1 + \alpha a_2, t = b_1 + \alpha b_2$. Then
  $$
  \v{u} + \alpha\v{v}   =
  \begin{bmatrix}a_1 + \alpha a_2\cr 2(a_1 + \alpha a_2) - 3(b_1 + \alpha b_2)\cr
5(b_1 + \alpha b_2)\cr (a_1 + \alpha a_2)  + 2(b_1 + \alpha
b_2)\cr
  a_1 + \alpha a_2\end{bmatrix}  = \begin{bmatrix} s\cr 2s - 3t\cr 5t\cr s + 2t\cr s\end{bmatrix} \in X,$$since this last matrix has
  the basic shape of matrices in $X$. This  shews that
  $X$ is a vector subspace of $\BBR^5$.
\end{answer}
\end{pro}
\begin{pro}
Let $A\in \mat{m\times n}{\BBF}$ be a fixed matrix. Demonstrate that
$$ S = \{X\in\mat{n\times 1}{\BBF}:  AX= {\bf 0}_{m\times 1}\}$$is a
subspace of $\mat{n\times 1}{\BBF} $.
\end{pro}
\begin{pro}
Prove that the set $X\subseteq \mat{n\times n}{ \BBF }$ of upper
triangular matrices is a subspace of $\mat{n\times n}{ \BBF }$.
\end{pro}
\begin{pro}
Prove that the set $X\subseteq \mat{n\times n}{ \BBF }$ of symmetric
matrices is a subspace of $\mat{n\times n}{ \BBF }$.
\end{pro}
\begin{pro}
Prove that the set $X\subseteq \mat{n\times n}{ \BBF }$ of
skew-symmetric matrices is a subspace of $\mat{n\times n}{ \BBF }$.
\end{pro}
\begin{pro}
Prove that the following subsets are {\bf not} subspaces of the
given vector space. Here you must say which of the axioms for a
vector space fail.\begin{dingautolist}{202} \item $\dis{\left\{\colvec{a\\ b\\
0}: a, b \in {\BBR}, a^2 + b^2 = 1\right\} \subseteq {\BBR}^3}$
\item $\dis{\left\{\colvec{a\\ b\\ 0}: a, b \in {\BBR}^2, ab =
0\right\} \subseteq {\BBR}^3}$ \item $\dis{\left\{
\begin{bmatrix} a & b \cr 0 & 0\end{bmatrix}: (a, b)\in {\BBR}^2, a  + b^2 =
0\right\} \ \subseteq \mat{2\times 2}{\BBR}}$
\end{dingautolist}
\begin{answer} We shew that some of the properties in the definition of vector
subspace fail to hold in these sets. \begin{dingautolist}{202}\item Take $\v{x} = \colvec{0\\ 1\\
0}, \ \alpha = 2.$ Then $\v{x} \in V$ but $2\v{x} = \colvec{0\\
2\\ 0} \not\in V$ as $0^2 + 2^2 = 4 \neq 1.$ So $V$ is not closed
under scalar multiplication. \item Take $\v{x} = \colvec{0\\
1\\ 0}, \v{y} = \colvec{1\\ 0\\ 0}$. Then $\v{x} \in W,
\v{y} \in W$ but $\v{x} + \v{y} = \colvec{1\\ 1\\
0} \not\in W$ as $1\cdot 1 = 1 \neq 0.$ Hence $W$ is not closed
under vector addition. \item Take $\v{x} = \dis{
\begin{bmatrix} -1 & 1 \cr 0 & 0 \end{bmatrix}.}$ Then $\v{x} \in
Z$ but $-\v{x} = -\dis{ \begin{bmatrix} -1 & 1 \cr 0 & 0
\end{bmatrix} = \begin{bmatrix} 1 & -1 \cr 0 & 0 \end{bmatrix} \not\in Z}$ as $1 +
(-1)^2 = 2 \neq 0.$ So $Z$ is not closed under scalar
multiplication.
\end{dingautolist}
\end{answer}
\end{pro}
\begin{pro}
Let $\vecspace{V}{+}{\cdot}{ \BBF }$ be a vector space, and let $U_1
\subseteq V$ and $U_2 \subseteq V$ be vector subspaces. Prove that
if $U_1 \cup U_2$ is a vector subspace of $V$, then either $U_1
\subseteq U_2$ or $U_2 \subseteq U_1$.
\begin{answer}
Assume $U_1\nsubseteq U_2$ and  $U_2\nsubseteq U_1$.  Take
$\v{v}\in U_2 \setminus U_1$ (which is possible because
$U_2\nsubseteq U_1$) and $\v{u}\in U_1 \setminus U_2$ (which is
possible because $U_1\nsubseteq U_2$). If $\v{u} + \v{v} \in U_1$,
then---as $-\v{u}$ is also in $U_1$---the sum of two vectors in
$U_1$ must also be in $U_1$ giving $$ \v{u} + \v{v} - \v{u} =
\v{v} \in U_1,
$$a contradiction. Similarly if $\v{u} + \v{v} \in U_2$, then---as
$-\v{v}$ also in $U_2$---the sum of two vectors in $U_2$ must also
be in $U_1$ giving $$ \v{u} + \v{v} - \v{u} = \v{u} \in U_2,
$$another
 contradiction. Hence either $U_1\subseteq U_2$ or $U_2\subseteq U_1$ (or
 possibly both).

\end{answer}
\end{pro}

\begin{pro}
Let $V$ a vector space over a field $\BBF$. If $\BBF$ is infinite,
show that $V$ is {\bf not} the set-theoretic union of a finite
number of {\bf proper} subspaces. \begin{answer} Assume contrariwise
that $V = U_1 \bigcup U_2 \bigcup \cdots \bigcup U_k$ is the
shortest such list. Since the $U_j$ are proper subspaces, $k > 1.$
Choose $\v{x} \in U_1, \v{x} \not\in U_2 \bigcup \cdots \bigcup U_k$
and choose $\v{y} \not\in U_1.$ Put $L = \{\v{y} + \alpha\v{x}|
\alpha \in \BBF\}$. Claim: $L \bigcap U_1 = \varnothing$. For if
$\v{u} \in L \bigcap U_1$ then $\exists a_0 \in \BBF$ with $\v{u} =
\v{y} + a_0\v{x}$ and so $\v{y} = \v{u} - a_0\v{x} \in U_1$, a
contradiction. So $L$ and $U_1$ are disjoint.

\bigskip

We now shew that $L$ has at most one vector in common with $U_j, 2
\leq j \leq k.$ For, if there were two elements of $\BBF$, $a \neq
b$ with $\v{y} + a\v{x}, \v{y} + b\v{x} \in U_j, j \geq 2$ then
$$(a - b)\v{x} = (\v{y} + a\v{x}) - (\v{y} +
b\v{x}) \in U_j,$$ contrary to the choice of $\v{x}.$

\bigskip

Conclusion: since $\BBF$ is infinite, $L$ is infinite. But we have
shewn that $L$ can have at most one element in common with the
$U_j$. This means that there are not enough $U_j$ to go around to
cover the whole of $L$. So $V$ cannot be a finite union of proper
subspaces.
\end{answer}
\end{pro}
\begin{pro}
Give an example of a finite vector space $V$ over a finite field
$\BBF$ such that $$V = V_1 \cup V_2 \cup V_3,$$where the $V_k$ are
proper subspaces. \begin{answer}  Take $F = \BBZ _2, V = F\times F.$
Then $V$ has the four elements
$$\begin{bmatrix}\overline{0}\cr \overline{0}\end{bmatrix}, \begin{bmatrix}\overline{0}\cr \overline{1}\end{bmatrix},
\begin{bmatrix}\overline{1}\cr \overline{0}\end{bmatrix}, \begin{bmatrix}\overline{1}\cr
\overline{1}\end{bmatrix},$$with the following subspaces
$$V_1 = \left\{\begin{bmatrix}\overline{0}\cr \overline{0}\end{bmatrix},
 \begin{bmatrix}\overline{0}\cr \overline{1}\end{bmatrix}\right\}, \ \
 V_2 = \left\{\begin{bmatrix}\overline{0}\cr \overline{0}\end{bmatrix},
 \begin{bmatrix}\overline{1}\cr \overline{0}\end{bmatrix}\right\},\ \
 V_3 = \left\{\begin{bmatrix}\overline{0}\cr \overline{0}\end{bmatrix},
 \begin{bmatrix}\overline{1}\cr \overline{1}\end{bmatrix}\right\}.$$
It is easy to verify that these subspaces satisfy the conditions
of the problem.
\end{answer}
\end{pro}

\end{multicols}
\section{Linear Independence}
\begin{df}
Let $(\lambda_1, \lambda_2, \cdots , \lambda_n)\in \BBF ^n$. Then
the vectorial sum
$$\sum_{j = 1} ^n \lambda_j\v{ a}_j$$ is said to be a {\em linear
combination} of the vectors  $\v{ a}_i \in V,  1 \leq i \leq n.$
\end{df}
\begin{exa}
Any matrix $\dis{\begin{bmatrix}a & b \cr c & d
\cr\end{bmatrix}\in\mat{2\times 2}{\BBR}}$ can be written as a
linear combination of the matrices $$\dis{\begin{bmatrix}1 & 0 \cr 0
& 0 \cr\end{bmatrix}},\  \dis{\begin{bmatrix}0 & 1 \cr 0 & 0
\cr\end{bmatrix}},\ \dis{\begin{bmatrix}0 & 0 \cr 1 & 0
\cr\end{bmatrix}}, \ \dis{\begin{bmatrix}0 & 0 \cr 0 & 1
\cr\end{bmatrix}},$$ for
$$\begin{bmatrix}a & b \cr c & d
\cr\end{bmatrix} = a\begin{bmatrix}1 & 0 \cr 0 & 0
\cr\end{bmatrix} + b\begin{bmatrix}0 & 1 \cr 0 & 0
\cr\end{bmatrix} + c\begin{bmatrix}0 & 0 \cr 1 & 0
\cr\end{bmatrix} + d\begin{bmatrix}0 & 0 \cr 0 & 1
\cr\end{bmatrix}.$$
\end{exa}
\begin{exa} Any polynomial of degree at most $2$, say $a + bx + cx^2 \in\BBR_2[x]$ can be written as a linear
combination of $1$, $x - 1$, and $x^2 - x + 2$, for
$$
a + bx + cx^2 = (a - c)(1) + (b + c)(x - 1) + c(x^2 - x + 2).
$$

 \end{exa}
Generalising the notion of two parallel vectors, we have
\begin{df}
The vectors $\v{ a}_i \in V,  1 \leq i \leq n,$ are {\em linearly
dependent} or {\em tied} if
$$\exists (\lambda_1, \lambda_2, \cdots , \lambda_n) \in \BBF ^n \setminus \{{\bf 0}\} \ \ {\rm such \ that\ }
\sum_{j = 1} ^n \lambda_j\v{ a}_j = \v{0},$$that is, if there is a
non-trivial linear combination of them adding to the zero vector.
\end{df}\begin{df}
The vectors $\v{ a}_i \in V,  1 \leq i \leq n,$ are {\em linearly
independent} or {\em free} if they are not linearly dependent. That
is, if $(\lambda_1, \lambda_2, \cdots , \lambda_n) \in \BBF ^n $
then
$$\sum_{j = 1} ^n \lambda_j\v{ a}_j = \v{0} \implies   \lambda_1 = \lambda_2= \cdots = \lambda_n = 0_{\BBF }.$$
\end{df}
\begin{rem}
A family of vectors is linearly independent if and only if the
only linear combination of them giving the zero-vector is the
trivial linear combination.
\end{rem}


\begin{exa}

$$\left\{\colvec{1 \\ 2 \\ 3}, \colvec{4 \\ 5 \\ 6}, \colvec{7 \\ 8 \\
9}\right\}$$is a tied family of vectors in $\BBR^3$, since
$$(1)\colvec{1 \\ 2 \\ 3} + (-2) \colvec{4 \\ 5 \\ 6} + (1)\colvec{7 \\ 8 \\
9} = \colvec{0 \\ 0 \\ 0}.$$
\end{exa}
\begin{exa}
Let $\v{u}, \v{v}$ be linearly independent vectors in some vector
space over a field $\BBF$ with characteristic different from $2$.
Shew that the two new vectors $\v{x} = \v{u} - \v{v}$ and $\v{y} =
\v{u} + \v{v}$ are also linearly independent.

\end{exa}\begin{solu}Assume that $a(\v{u} - \v{v}) + b(\v{u} + \v{v})
= \v{0}.$ Then
$$(a + b)\v{u} + (a - b)\v{v} = \v{0}.$$Since $\v{u}, \v{v}$ are linearly independent, the
above coefficients must be 0, that is, $a + b = 0_{\BBF }$ and $a -
b = 0_{\BBF }$. But this gives $2a = 2b = 0_{\BBF },$ which implies
$a = b = 0_{\BBF },$ if the characteristic of the field is not $2$.
This proves the linear independence of $\v{u} - \v{v}$ and $\v{u} +
\v{v}$.
\end{solu}

\begin{thm}\label{thm:lin_ind_columns}
Let $A\in\mat{m\times n}{ \BBF }$. Then the columns of $A$ are
linearly independent if and only the only solution to the system $AX
= {\bf 0}_m$ is the trivial solution.
\end{thm}
\begin{pf}
Let $A_1, \ldots , A_n$ be the columns of $A$. Since
$$x_1A_1 + x_2A_2 + \cdots + x_nA_n = AX,   $$the result follows.
\end{pf}
\begin{thm}
Any family $$\{\v{0}, \v{u}_1, \v{u}_2, \ldots , \v{u}_k\}$$
containing the zero-vector is linearly dependent.
\end{thm}
\begin{pf}
This follows at once by observing that
$$1_{\BBF }\v{0} +  0_{\BBF }\v{u}_1 +  0_{\BBF }\v{u}_2 + \ldots ,
+  0_{\BBF }\v{u}_k = \v{0}$$is a non-trivial linear combination of
these vectors equalling the zero-vector.
\end{pf}



\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900



\begin{pro}
Shew that
$$\left\{\colvec{1 \\ 0 \\ 0}, \colvec{1 \\ 1 \\ 0}, \colvec{1 \\ 1 \\
1}\right\}$$forms a free family of vectors in $\BBR^3$.
\begin{answer} If
$$a\colvec{1 \\ 0 \\ 0} + b\colvec{1 \\ 1 \\ 0} + c\colvec{1 \\ 1 \\
1} = \v{0},$$then
$$\colvec{a + b + c \\ b + c \\ c} = \colvec{0 \\ 0 \\ 0}.$$This
clearly entails that $c = b = a = 0,$ and so the family is free.
\end{answer}
\end{pro}
\begin{pro}
Prove that the set
$$\left\{\begin{bmatrix}1\cr 1\cr 1\cr 1\end{bmatrix}, \begin{bmatrix}1\cr 1\cr -1\cr -1\end{bmatrix},
\begin{bmatrix}1\cr -1\cr 1\cr -1\end{bmatrix}, \begin{bmatrix}1\cr 1\cr 0\cr 1\end{bmatrix}\right\}$$
is a linearly independent set of vectors in $\BBR^4$ and shew that
$X =
\begin{bmatrix}1\cr 2\cr 1\cr 1\end{bmatrix}$ can be written as a linear
combination of these vectors. \begin{answer}
Assume$$a\begin{bmatrix}1\cr 1\cr 1\cr 1\end{bmatrix} +
b\begin{bmatrix}1\cr 1\cr -1\cr -1\end{bmatrix} +
c\begin{bmatrix}1\cr -1\cr 1\cr -1\end{bmatrix} +
d\begin{bmatrix}1\cr 1\cr 0\cr 1\end{bmatrix}  =
\begin{bmatrix}0\cr 0\cr 0\cr 0\end{bmatrix}.
$$ Then
$$a + b + c + d = 0,$$$$a + b - c + d = 0,$$$$a - b + c = 0,$$
$$a - b - c + d = 0.$$Subtracting the second equation from the
first, we deduce $2c = 0$, that is, $c = 0.$ Subtracting the third
equation from the fourth, we deduce $-2c + d = 0$ or $d = 0.$ From
the first and third equations, we then deduce $a + b = 0$ and $a -
b = 0,$ which entails $a = b = 0.$ In conclusion, $a = b = c = d =
0$.

\bigskip


Now, put $$x\begin{bmatrix}1\cr 1\cr 1\cr 1\end{bmatrix} +
y\begin{bmatrix}1\cr 1\cr -1\cr -1\end{bmatrix} +
  z\begin{bmatrix}1\cr -1\cr 1\cr -1\end{bmatrix}
+ w\begin{bmatrix}1\cr 1\cr 0\cr 1\end{bmatrix}
  =   \begin{bmatrix}1\cr 2\cr 1\cr 1\end{bmatrix}.
$$ Then

$$x + y + z + w  = 1,$$$$x + y - z + w = 2,$$$$x - y + z = 1,$$
$$x - y - z + w = 1.$$Solving as before, we find
$$2\begin{bmatrix}1\cr 1\cr 1\cr 1\end{bmatrix} + \frac{1}{2}\begin{bmatrix}1\cr 1\cr -1\cr -1
\end{bmatrix}
 - \frac{1}{2}\begin{bmatrix}1\cr -1\cr 1\cr -1\end{bmatrix} -
\begin{bmatrix}1\cr 1\cr 0\cr 1\end{bmatrix}   = \begin{bmatrix}1\cr 2\cr 1\cr
1\end{bmatrix}. $$
\end{answer}
\end{pro}


\begin{pro}
Let $(\v{u}, \v{v})\in (\BBR^n)^2$. Prove that $|\dotprod{u}{v}| =
\norm{\v{u}}\norm{\v{v}}$ if and only if $\v{u}$ and $\v{v}$ are
linearly dependent.
\end{pro}
\begin{pro}
Prove that $$\left\{\begin{bmatrix}1 & 0 \cr 0 & 1 \cr
\end{bmatrix},
\begin{bmatrix}1 & 0 \cr 0 & -1 \cr \end{bmatrix}, \begin{bmatrix}0 & 1 \cr 1 & 0 \cr
\end{bmatrix}, \begin{bmatrix}0 & 1 \cr -1 & 0 \cr
\end{bmatrix}\right\}$$is a linearly independent family over $\BBR$.
Write $\begin{bmatrix}1 & 1 \cr 1 & 1 \cr
\end{bmatrix}$ as a linear combination of these matrices.
\end{pro}
\begin{pro}
Let $\{\v{v}_1, \v{v}_2, \v{v}_3, \v{v}_4 \}$ be a linearly
independent family of vectors. Prove that the family
$$\{\v{v}_1 + \v{v}_2, \v{v}_2 + \v{v}_3, \v{v}_3 + \v{v}_4, \v{v}_4 + \v{v}_1\}$$is
not linearly independent. \begin{answer} We have
$$(\v{v}_1 + \v{v}_2) - (\v{v}_2 + \v{v}_3) + (\v{v}_3 + \v{v}_4) - (\v{v}_4 + \v{v}_1) =
\v{0},$$a non-trivial linear combination of these vectors
equalling the zero-vector.
\end{answer}
\end{pro}
\begin{pro}
 Let $\{\v{v}_1, \v{v}_2,
\v{v}_3\}$ be linearly independent vectors  in $\BBR^5$. Are the
vectors $$ \v{b}_1 = 3\v{v}_1+ 2\v{v}_2 + 4\v{v}_3,
$$
$$ \v{b}_2 = \v{v}_1+ 4\v{v}_2 +
2\v{v}_3,  $$
$$ \v{b}_3 = 9\v{v}_1+ 4\v{v}_2 +
3\v{v}_3,  $$ $$ \v{b}_4 = \v{v}_1+ 2\v{v}_2 + 5\v{v}_3,
$$linearly independent? Prove or disprove!
\end{pro}
\begin{pro}
Is the family $\{1, \sqrt{2}\}$ linearly independent over $\BBQ$?
\begin{answer}Yes. Suppose that  $a + b\sqrt{2} = 0$ is a non-trivial linear combination of $1$ and $\sqrt{2}$ with rational numbers $a$ and $b$. If one of $a, b$ is different from $0$ then so is the other.
Hence $$a + b \sqrt{2} = 0 \implies \sqrt{2} = -\dfrac{b}{a}.$$The
sinistral side of the equality $\sqrt{2} = -\dfrac{b}{a}$ is
irrational whereas the dextral side is rational, a
contradiction.\end{answer}
\end{pro}
\begin{pro}
Is the family $\{1, \sqrt{2}\}$ linearly independent over $\BBR$?
\begin{answer}No. The representation $2\cdot 1 + (-\sqrt{2}) \sqrt{2} = 0$ is a non-trivial linear combination of $1$ and $\sqrt{2}$. \end{answer}
\end{pro}
\begin{pro}
Consider the vector space $$V = \{a + b\sqrt{2} + c\sqrt{3}: (a, b,
c) \in \BBQ^3\}.$$
 \begin{enumerate}
\item Shew that $\{1, \sqrt{2}, \sqrt{3}\}$ are linearly
independent
over $\BBQ$. \\
\item Express $$\frac{1}{1 - \sqrt{2}} + \frac{2}{\sqrt{12} - 2}$$
as a linear combination of $\{1, \sqrt{2}, \sqrt{3}\}$.\\

\end{enumerate}
\begin{answer} \begin{enumerate}\item Assume that $$a + b\sqrt{2} +
c\sqrt{3} = 0, \ \ a, b, c, \in \BBQ, a^2 + b^2 + c^2 \neq 0.
$$If $ac \neq 0,$ then
$$b\sqrt{2} = -a - c\sqrt{3} \Leftrightarrow 2b^2 = a^2 + 2ac\sqrt{3} + 3c^2 \Leftrightarrow \frac{2b^2 - a^2 - 3c^2}{2ac} = \sqrt{3}.$$
The dextral side of the last implication is irrational, whereas
the sinistral side is rational. Thus it must be the case that $ac
= 0.$ If $a = 0, c \neq 0$ then
$$b\sqrt{2} + c\sqrt{3} = 0 \Leftrightarrow  -\frac{b}{c} =
\sqrt{\frac{3}{2}},$$and again the dextral side is irrational and
the sinistral side is rational. Thus if $a = 0$ then also $c  =
0.$ We can similarly prove that $c = 0$ entails $a = 0.$ Thus we
have $$b\sqrt{2} = 0,$$ which means that $b = 0.$ Therefore
$$a + b\sqrt{2} + c\sqrt{3} = 0, a, b, c, \in \BBQ , \  \Leftrightarrow a = b = c =
0.$$This proves that $\{1, \sqrt{2}, \sqrt{3}\}$ are linearly
independent over $\BBQ$. \item Rationalising denominators,
$$\begin{array}{lll}\dfrac{1}{1 - \sqrt{2}} + \dfrac{2}{\sqrt{12} - 2} & = &  \dfrac{1 + \sqrt{2}}{1 - 2} + \dfrac{2\sqrt{12} + 4}{12 - 4}
\\ & = & -1 - \sqrt{2} + \dfrac{1}{2}\sqrt{3} + \dfrac{1}{2} \\ &  =
&  -\dfrac{1}{2} - \sqrt{2} + \dfrac{1}{2}\sqrt{3}. \end{array}$$
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro}
Let $f, g, h$ belong to $C^\infty (\BBR^\BBR)$ (the space of
infinitely continuously differentiable real-valued functions defined
on the real line) and be given by $$f(x) = e^x, g(x) = e^{2x}, h(x)
= e^{3x}.$$ Shew that $f, g, h$ are linearly independent over
$\BBR$.
\begin{answer} Assume that
$$ae^x + be^{2x} + ce^{3x} = 0.$$Then
$$c = -ae^{-2x} - be^{-x}. $$Letting $x\rightarrow +\infty$, we
obtain $c = 0$. Thus$$ae^x + be^{2x} = 0,$$and so
$$b = -ae^{-x}. $$Again, letting $x\rightarrow +\infty$, we
obtain $b = 0$. This yields $$ae^x = 0.$$Since the exponential
function never vanishes, we deduce that $a = 0$. Thus $a = b = c =
0$ and the family is linearly independent over $\BBR$.
\end{answer}
\end{pro}
\begin{pro}
Let $f, g, h$ belong to $C^\infty(\BBR^\BBR)$ be given by
$$f(x) = \cos^2x, g(x) = \sin^2x, h(x) = \cos 2x.$$ Shew that $f,
g, h$ are linearly dependent over $\BBR$. \begin{answer} This
follows at once from the identity
$$\cos 2x = \cos^2x - \sin^2x,$$which implies
$$\cos 2x -\cos^2x + \sin^2x = 0.$$
\end{answer}
\end{pro}

\end{multicols}
\section{Spanning Sets}
\begin{df}
A family $\{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots, \}
\subseteq V$ is said to {\em span} or {\em generate} $V$ if every
$\v{v}\in V$ can be written as a linear combination of the
$\v{u}_j$'s. \index{spanning set}
\end{df}
\begin{thm}
If $\{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots, \} \subseteq V$
spans $V$, then any superset $$\{\v{w}, \v{u}_1, \v{u}_2, \ldots ,
\v{u}_k, \ldots, \} \subseteq V$$ also spans $V$.
\end{thm}
\begin{pf}
This follows at once from
$$ \sum _{i = 1} ^l \lambda _i \v{u}_i = 0_{\BBF }\v{w} + \sum _{i = 1} ^l \lambda _i \v{u}_i.$$
\end{pf}
\begin{exa}
The family of vectors $$\left\{\v{i} = \colvec{1\\ 0 \\
0}, \v{j} = \colvec{0\\ 1\\ 0}, \v{k} = \colvec{0\\
0\\ 1}\right\}$$ spans $\BBR^3$ since given $\colvec{a\\ b\\
c}\in\BBR^3$ we may write $$\colvec{a\\ b\\ c} = a\v{i} + b\v{j} +
c\v{k} .$$ \label{exa:ijk}\end{exa}
\begin{exa}
Prove that the family of vectors $$\left\{\v{t}_1 = \colvec{1\\ 0 \\
0}, \v{t}_2 = \colvec{1\\ 1\\ 0}, \v{t}_3 = \colvec{1\\
1\\ 1}\right\}$$ spans $\BBR^3$.\label{triagbasisr3}\end{exa}
\begin{solu}This follows from the identity
$$\colvec{a\\ b\\ c} = (a - b)\colvec{1\\ 0\\ 0} + (b - c)\colvec{1\\ 1\\ 0} + c\colvec{1\\ 1\\ 1} = (a - b)\v{t}_1 + (b - c)\v{t}_2 + c\v{t}_3.$$
\end{solu}
\begin{exa}
Since
$$\begin{bmatrix}a & b \cr c & d
\cr\end{bmatrix} = a\begin{bmatrix}1 & 0 \cr 0 & 0 \cr\end{bmatrix}
+ b\begin{bmatrix}0 & 1 \cr 0 & 0 \cr\end{bmatrix} +
c\begin{bmatrix}0 & 0 \cr 1 & 0 \cr\end{bmatrix} + d\begin{bmatrix}0
& 0 \cr 0 & 1 \cr\end{bmatrix}$$the set of matrices
$\dis{\begin{bmatrix}1 & 0 \cr 0 & 0 \cr\end{bmatrix}}$,
$\dis{\begin{bmatrix}0 & 1 \cr 0 & 0 \cr\end{bmatrix}}$,
$\dis{\begin{bmatrix}0 & 0 \cr 1 & 0 \cr\end{bmatrix}}$,
$\dis{\begin{bmatrix}0 & 0 \cr 0 & 1 \cr\end{bmatrix}}$ is a
spanning set for $\mat{2\times 2}{\BBR}$.

\end{exa} \begin{exa}
The set $$\{1, x, x^2, x^3, \ldots , x^n, \ldots\}$$spans $\BBR[x]$,
the set of polynomials with real coefficients and indeterminate $x$.
\end{exa}

\begin{df}\index{vector!span}
The {\em span} of a family of vectors $\{\v{u}_1, \v{u}_2, \ldots
, \v{u}_k, \ldots, \}$ is the set of all finite linear
combinations obtained from the $u_i$'s. We denote the span of
$\{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots, \}$ by
$$\span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots,
}.$$
\end{df}
\begin{thm}
Let $\vecspace{V}{+}{\cdot}{ \BBF }$  be a vector space. Then
$$\span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots,
} \subseteq V$$ is a vector subspace of $V$.
\end{thm}
\begin{pf}
Let $\alpha\in \BBF$ and let
$$\v{x} = \sum _{k = 1} ^l a_k\v{u}_k , \ \ \v{y} = \sum _{k = 1} ^l b_k\v{u}_k
,$$be in $\span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots, }$ (some
of the coefficients might be $0_{\BBF }$). Then
$$\v{x} + \alpha \v{y} =  \sum _{k = 1} ^l (a_k + \alpha b_k)\v{u}_k \in \span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k,
\ldots, },$$and so $\span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k,
\ldots, }$ is a subspace of $V$.
\end{pf}
\begin{cor}
$\span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots, } \subseteq V$
is the smallest vector subspace of $V$ (in the sense of set
inclusion) containing the set $$\{\v{u}_1, \v{u}_2, \ldots ,
\v{u}_k, \ldots, \}.$$
\end{cor}
\begin{pf}
If $W \subseteq V$ is a vector subspace of $V$ containing the set
$$\{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots, \}$$
then it contains every finite linear combination of them, and
hence, it contains $\span{\v{u}_1, \v{u}_2, \ldots , \v{u}_k,
\ldots, }.$
\end{pf}
\begin{exa}
If $A\in\mat{2\times 2}{\BBR}$, $\dis{A\in \span{\begin{bmatrix}1 &
0 \cr 0 & 0 \cr
\end{bmatrix},
\begin{bmatrix}0 & 0 \cr 0 & 1 \cr \end{bmatrix}, \begin{bmatrix}0 & 1 \cr 1 & 0 \cr
\end{bmatrix}}}$ then $A$ has the form
$$a\begin{bmatrix}1 & 0 \cr
0 & 0 \cr
\end{bmatrix} + b\begin{bmatrix}0 & 0 \cr 0 & 1 \cr \end{bmatrix} + c \begin{bmatrix}0 & 1 \cr 1 & 0 \cr
\end{bmatrix} = \begin{bmatrix}a & c \cr
c & b \cr
\end{bmatrix},$$i.e., this family spans the set of all symmetric
$2\times 2$ matrices over $\BBR$.
\end{exa}


\begin{thm} Let $V$ be a vector space over a field $\BBF$ and let $(\v{v}, \v{w})\in V^2$, $\gamma\in \BBF\setminus\{0_{\BBF }\}$.
Then $$\span{\v{v}, \v{w}} = \span{\v{v},\gamma\v{w}}.$$
\label{thm:linear_combinations_and_span_1}\end{thm}
\begin{pf}
The equality $$a\v{v} + b\v{w} =   a\v{v} +
(b\gamma^{-1})(\gamma\v{w}),
$$proves the statement.
\end{pf}
\begin{thm} Let $V$ be a vector space over a field $\BBF$ and let $(\v{v}, \v{w})\in V^2$, $\gamma\in \BBF$.
Then $$\span{\v{v}, \v{w}} = \span{\v{w}, \v{v} + \gamma\v{w}}.$$
\label{thm:linear_combinations_and_span_2}\end{thm} \begin{pf}
This follows from the equality
$$a\v{v} + b\v{w} =  a(\v{v} + \gamma\v{w}) +  (b -
a\gamma)\v{w}.
$$
\end{pf}

\section*{\psframebox{Homework}}


\begin{pro}
Let $\BBR_3[x]$ denote the set of polynomials with degree at most
$3$ and real coefficients. Prove that the set
$$\{1, 1 + x, (1 + x)^2, (1 + x)^3\}$$spans $\BBR_3[x]$.
\begin{answer} Given an arbitrary polynomial
$$p(x) = a + bx + cx^2 + dx^3,$$we must shew that there are real numbers
$s, t, u, v$ such that
$$p(x) = s + t(1 + x) + u(1 + x)^2 + v(1 + x)^3.$$In order to do this we find the Taylor expansion of $p$ around
$x = -1$. Letting $x = -1$ in this last equality, $$s = p(-1) = a -
b + c - d\in\BBR.$$ Now,
$$p'(x) = b + 2cx + 3dx^2 = t + 2u(1 + x) + 3v(1 + x)^2. $$Letting
$x = -1$ we find
$$t = p'(-1) = b -2c +3d\in\BBR.$$Again,
$$p''(x) = 2c + 6dx = 2u + 6v(1 + x).$$Letting $x = -1$ we find
$$u = p''(-1) =  c - 3d \in \BBR.$$Finally,
$$p'''(x) = 6d = 6v,$$so we let $v = d\in\BBR$. In other words, we
have
$$p(x) = a + bx + cx^2 + dx^3 = (a - b + c - d) + (b - 2c + 3d)(1 + x) + (c - 3d)(1 + x)^2 + d(1 + x)^3.$$
\end{answer}
\end{pro}
\begin{pro} Shew that $\dis{\colvec{1\\ 1 \\ -1}}\not\in\span{\colvec{1\\ 0 \\ -1}, \colvec{0\\ 1 \\
-1}}$.\begin{answer} Assume contrariwise that $$\colvec{1\\ 1 \\
-1} = a\colvec{1\\ 0
\\ -1} + b\colvec{0\\ 1 \\ -1}. $$ Then we must have
$$a = 1,$$
$$ b = 1, $$
$$ -a - b = -1,$$ which is impossible. Thus $\colvec{1\\ 1 \\ -1}$
is not a linear combination of $\colvec{1\\ 0 \\ -1},
\colvec{0\\ 1 \\ -1}$ and hence is not in $\span{\colvec{1\\ 0 \\
-1}, \colvec{0\\ 1 \\ -1}}$. \end{answer}
\end{pro}
\begin{pro} What is
$\dis{ \span{\begin{bmatrix}1 & 0 \cr 0 & 0 \cr
\end{bmatrix},
\begin{bmatrix}0 & 0 \cr 0 & 1 \cr \end{bmatrix}, \begin{bmatrix}0 & 1 \cr -1 & 0 \cr
\end{bmatrix}}}?$ \begin{answer}It is
$$a\begin{bmatrix}1 & 0 \cr
0 & 0 \cr
\end{bmatrix} + b\begin{bmatrix}0 & 0 \cr 0 & 1 \cr \end{bmatrix} + c \begin{bmatrix}0 & 1 \cr -1 & 0 \cr
\end{bmatrix} = \begin{bmatrix}a & c \cr
-c & b \cr
\end{bmatrix},$$i.e., this family spans the set of all skew-symmetric
$2\times 2$ matrices over $\BBR$.\end{answer}
\end{pro}

\begin{pro}
Prove that$$\span{\begin{bmatrix}1 & 0 \cr 0 & 1 \cr
\end{bmatrix},
\begin{bmatrix}1 & 0 \cr 0 & -1 \cr \end{bmatrix}, \begin{bmatrix}0 & 1 \cr 1 & 0 \cr
\end{bmatrix}, \begin{bmatrix}0 & 1 \cr -1 & 0 \cr
\end{bmatrix}} = \mat{2\times 2}{\BBR}.$$
\end{pro}
\begin{pro}
For the vectors in $\BBR^3$, $$\v{a} = \colvec{1\\ 2\\ 1},\ \v{b} = \colvec{1\\
3\\ 2},\ \v{c} = \colvec{1\\ 1\\ 0},\ \v{d}  = \colvec{3\\
8\\ 5},$$prove that
$$\span{\v{a},\v{b}}  = \span{\v{c},\v{d}}.$$
\end{pro}
\section{Bases}
\begin{df}
A family $\{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots\} \subseteq
V$ is said to be a {\em basis} of  $V$ if (i) they are linearly
independent, (ii) they span $V$. \index{basis}
\end{df}
\begin{exa}
The family $$ \v{e}_i = \begin{bmatrix}0_{\BBF }\cr \vdots \cr
0_{\BBF } \cr 1_{\BBF } \cr 0_{\BBF } \cr \vdots \cr 0_{\BBF }
\end{bmatrix},$$where there is a $1_{\BBF }$ on the $i$-th slot and
$0_{\BBF }$'s on the other $n - 1$ positions, is a basis for $\BBF
^n$.
\end{exa}





\begin{thm}
Let $\vecspace{V}{+}{\cdot}{ \BBF }$ be a vector space and let $$U =
\{\v{u}_1, \v{u}_2, \ldots , \v{u}_k, \ldots\} \subseteq V$$ be a
family of linearly independent vectors in $V$ which is maximal in
the sense that if $U'$ is any other family of vectors of $V$
properly containing $U$ then $U'$ is a dependent family. Then $U$
forms a basis for $V$. \label{thm:maximal_spanning_set}\end{thm}
\begin{pf}
Since $U$ is a linearly independent family, we need only to prove
that it spans $V$. Take $\v{v}\in V$. If $\v{v}\in U$ then there
is nothing to prove, so assume that $\v{v}\in V\setminus U$.
Consider the set $U' = U \cup \{\v{v}\}$. This set properly
contains $U$, and so, by assumption, it forms a dependent family.
There exists scalars $\alpha_0, \alpha_1, \ldots, \alpha_n$ such
that $$\alpha_0\v{v} + \alpha_1\v{u}_1 +  \cdots + \alpha_n\v{u}_n
= \v{0}.
$$ Now, $\alpha_0 \neq 0_{\BBF }$, otherwise the $\v{u}_i$ would be
linearly dependent. Hence $\alpha_0 ^{-1}$ exists and we have
 $$\v{v} =  -\alpha_0 ^{-1}(
\alpha_1\v{u}_1 +  \cdots +  \alpha_n\v{u}_n),
$$and so the $\v{u}_i$ span $V$.
\end{pf}
\begin{rem}
From Theorem \ref{thm:maximal_spanning_set} it follows that to
shew that a vector space has a basis it is enough to shew that it
has a maximal linearly independent set of vectors. Such a proof
requires something called Z\"{o}rn's Lemma, and it is beyond our
scope. We dodge the whole business by taking as an axiom that
every vector space possesses a basis.
\end{rem}
\begin{thm}[Steinitz Replacement Theorem] \index{theorem!Steinitz}Let $\vecspace{V}{+}{\cdot}{ \BBF }$ be a vector space and let
$U = \{\v{u}_1, \v{u}_2, \ldots \} \subseteq V$. Let $W =
\{\v{w}_1, \v{w}_2, \ldots, \v{w}_k \}$ be an independent family
of vectors in $\span{U}$. Then there exist $k$ of the $\v{u}_i$'s,
say $\{\v{u}_1, \v{u}_2, \ldots, \v{u}_k \}$ which may be replaced
by the $\v{w}_i$'s in such a way that
$$\span{\v{w}_1, \v{w}_2, \ldots, \v{w}_k,  \v{u}_{k + 1}, \ldots } = \span{U}.  $$
\label{thm:steinitz_replacement}
\end{thm}
\begin{pf}
We prove this by induction on $k$. If $k = 1$, then $$\v{w}_1 =
\alpha_1\v{u}_1 + \alpha_2\v{u}_2 + \cdots + \alpha_n\v{u}_n
$$for some $n$ and scalars $\alpha_i$. There is an $\alpha_i \neq
0_{\BBF }$, since otherwise $\v{w}_1 = \v{0}$ contrary to the
assumption that the $\v{w}_i$ are linearly independent. After
reordering, we may assume that $\alpha_1 \neq 0_{\BBF }$. Hence
$$\v{u}_1
= \alpha_1 ^{-1}(\v{w}_1 -( \alpha_2\v{u}_2 + \cdots +
\alpha_n\v{u}_n)),
$$and so $\v{u}_1 \in \span{\v{w}_1, \v{u}_2, \ldots,
}$ and $$\span{\v{w}_1, \v{u}_2, \ldots, } = \span{\v{u}_1,
\v{u}_2, \ldots, }.$$

\bigskip
Assume now that the theorem is true for any set of fewer than $k$
independent vectors. We may thus assume that that $\{\v{u}_1,
\ldots\}$ has more than $k - 1$ vectors and that
$$\span{\v{w}_1, \v{w}_2, \ldots, \v{w}_{k - 1},
\v{u}_k, , \ldots } = \span{U}.$$ Since $\v{w}_k\in U$ we have
$$\v{w}_k
= \beta_1\v{w}_1 + \beta_2\v{w}_2 + \cdots + \beta_{k - 1}\v{w}_{k
- 1} + \gamma_k\v{u}_k + \gamma_{k+1}\v{u}_{k+1} +
\gamma_m\v{u}_m.
$$If all the $\gamma_i = 0_{\BBF }$, then the $\{\v{w}_1, \v{w}_2, \ldots, \v{w}_k
\}$ would be linearly dependent, contrary to assumption. Thus there
is a $\gamma_i \neq 0_{\BBF }$, and after reordering, we may assume
that $\gamma_k \neq 0_{\BBF }$. We have therefore
$$\v{u}_k
= \gamma_k ^{-1}(\v{w}_k - (\beta_1\v{w}_1 + \beta_2\v{w}_2 +
\cdots + \beta_{k - 1}\v{w}_{k - 1}  + \gamma_{k+1}\v{u}_{k+1} +
\gamma_m\v{u}_m)).
$$ But this means that
$$ \span{\v{w}_1, \v{w}_2, \ldots, \v{w}_k,  \v{u}_{k + 1}, \ldots } = \span{U}.
$$This finishes the proof.
\end{pf}
\begin{cor}
Let $\{\v{w}_1, \v{w}_2, \ldots , \v{w}_n\}$ be an independent
family of vectors with $ V = \span{\v{w}_1, \v{w}_2, \ldots ,
\v{w}_n}$. If  we also have $ V = \span{\v{u}_1, \v{u}_2, \ldots ,
\v{u}_\nu}$, then
\begin{enumerate}
\item $n \leq \nu$, \item $n = \nu$ if and only if the $\{\v{y}_1,
\v{y}_2, \ldots , \v{y}_\nu\}$ are a linearly independent family.
\item Any basis for $V$ has exactly $n$ elements.

\end{enumerate}
\end{cor}
\begin{pf}
\begin{enumerate}
\item In the  Steinitz Replacement Theorem
\ref{thm:steinitz_replacement} replace the first $n$ $\v{u}_i$'s
by the $\v{w}_i$'s and $n \leq \nu$ follows. \item If $\{\v{u}_1,
\v{u}_2, \ldots , \v{u}_\nu\}$ are a linearly independent family,
then we may interchange the r\^{o}le of the $\v{w}_i$ and
$\v{u}_i$ obtaining $\nu \leq n$. Conversely, if $\nu = n$ and if
the $\v{u}_i$ are dependent, we could express some $\v{u}_i$ as a
linear combination of the remaining $\nu - 1$ vectors, and thus we
would have shewn that some $\nu - 1$ vectors span $V$. From (1) in
this corollary we would conclude that $n \leq \nu - 1$,
contradicting $n = \nu$. \item This follows from the definition of
what a basis is and from (2) of this corollary.
\end{enumerate}
\end{pf}
\begin{df}
The {\em dimension} of a vector space $\vecspace{V}{+}{\cdot}{ \BBF
}$ is the number of elements of any of its bases, and we denote it
by $\dim V$.
\end{df}
\begin{thm}
Any linearly independent family of vectors $$\{\v{x}_1, \v{x}_2,
\ldots , \v{x}_k\}$$ in  a vector space $V$ can be completed into
a family $$\{\v{x}_1, \v{x}_2, \ldots , \v{x}_k, \v{y}_{k + 1},
\v{y}_{k + 2}, \ldots \}$$ so that this latter family become a
basis for $V$. \label{thm:enlargement_of_basis}\end{thm}
\begin{pf}
Take any basis $\{\v{u}_1, \ldots, \v{u}_k, \v{u}_{k+1}, \ldots,
\}$ and use Steinitz Replacement Theorem
\ref{thm:steinitz_replacement}.
\end{pf}
\begin{cor}
If $U \subseteq V$ is a vector subspace of a finite dimensional
vector space $V$ then $\dim U \leq \dim V$.
\end{cor}
\begin{pf}
Since any basis of $U$ can be extended to a basis of $V$, it
follows that the number of elements of the basis of $U$ is at most
as large as that for $V$.
\end{pf}

\begin{exa}Find a basis and the dimension of the space generated by the set of
symmetric matrices in $\mat{n\times n}{\BBR}$. \end{exa}
\begin{solu} Let ${\bf E}_{ij}\in\mat{n\times n}{\BBR}$ be the
$n\times n$ matrix with a $1$ on the $ij$-th position and $0$'s
everywhere else. For $1 \leq i < j \leq n$, consider  the
$\binom{n}{2} = \dfrac{n(n - 1)}{2}$ matrices $A_{ij} = {\bf E}_{ij}
+ {\bf E}_{ji}$. The $A_{ij}$ have a $1$ on the $ij$-th and $ji$-th
position and $0$'s everywhere else. They, together with the $n$
matrices ${\bf E}_{ii}, 1 \leq i \leq n$ constitute a basis for the
space of symmetric matrices. The dimension of this space is thus
$$  \frac{n(n - 1)}{2} + n = \frac{n(n + 1)}{2}.$$
\end{solu}
\begin{thm}\label{thm:base_matrix_is_invertible}
Let $\{\v{u}_1, \ldots , \v{u}_n\}$ be vectors in  $\BBR^n$. Then
the $\v{u}$'s form a basis if and only if the $n\times n$ matrix $A$
formed by taking the $\v{u}$'s as the columns of $A$ is invertible.
\end{thm}
\begin{pf}
Since we have the right number of vectors, it is enough to prove
that the $\v{u}$'s are linearly independent. But if $X =
\colvec{x_1 \\ x_2 \\ \vdots \\ x_n}$, then $$x_1\v{u}_1 + \cdots
+ x_n\v{u}_n = AX.
$$If $A$ is invertible, then $AX = {\bf 0}_n \implies X = A^{-1}{\bf 0}_n = {\bf
0}_n$, meaning that $x_1 = x_2 = \cdots x_n = 0$, so the $\v{u}$'s
are linearly independent.

\bigskip

Conversely, assume that the $\v{u}$'s are linearly independent. Then
the equation $AX = {\bf 0}_n$ has a unique solution. Let $r =
\rank{A}$ and let $(P, Q)\in (\gl{n}{\BBR})^2$ be matrices such that
$A = P^{-1}D_{n,n,r}Q^{-1}$, where $D_{n,n,r}$ is the Hermite normal
form of $A$. Thus

$$ AX = {\bf 0}_n \implies P^{-1}D_{n,n,r}Q^{-1}X = {\bf 0}_n  \implies D_{n,n,r}Q^{-1}X = {\bf 0}_n. $$
Put $Q^{-1}X = \colvec{y_1 \\ y_2 \\ \vdots \\ y_n}$. Then

$$D_{n,n,r}Q^{-1}X = {\bf 0}_n \implies y_1\v{e}_1 + \cdots + y_r\v{e}_r = {\bf 0}_n,    $$
where $\v{e}_j$ is the $n$-dimensional column vector with a $1$ on
the $j$-th slot and $0$'s everywhere else. If $r< n$ then
$y_{r+1}, \ldots , y_n$ can be taken arbitrarily and so there
would not be a unique solution, a contradiction. Hence $r = n$ and
$A$ is invertible.   \end{pf}

\section*{\psframebox{Homework}}
\begin{pro} In problem \ref{exa:subspace_in_r5} we saw that
$$X = \left\{\begin{bmatrix}a\cr 2a - 3b\cr 5b\cr a + 2b\cr a\end{bmatrix}:a, b \in \BBR\right\}$$is a vector
  subspace of $\BBR^5$. Find a basis for this subspace.
\begin{answer}
  We have
  $$\begin{bmatrix}a\cr 2a - 3b\cr 5b\cr a + 2b\cr a\end{bmatrix} =
  a\begin{bmatrix}1\cr 2\cr 0\cr 1\cr 1\end{bmatrix} + b\begin{bmatrix}0\cr -3\cr 5\cr 2\cr
  0\end{bmatrix},$$so clearly the family $$\left\{\begin{bmatrix}1\cr 2\cr 0\cr 1\cr 1\end{bmatrix},\ \begin{bmatrix}0\cr -3\cr 5\cr 2\cr 0\end{bmatrix}
  \right\}$$ spans the subspace. To shew that this is a  linearly
  independent family, assume that
  $$ a\begin{bmatrix}1\cr 2\cr 0\cr 1\cr 1\end{bmatrix} + b\begin{bmatrix}0\cr -3\cr 5\cr 2\cr
  0\end{bmatrix} = \begin{bmatrix}0\cr 0\cr 0\cr 0\cr 0\end{bmatrix}.$$
Then it follows clearly that $a = b = 0$, and so this is a
linearly independent family. Conclusion:
$$\left\{\begin{bmatrix}1\cr 2\cr 0\cr 1\cr 1\end{bmatrix},\
\begin{bmatrix}0\cr -3\cr 5\cr 2\cr 0\end{bmatrix}
  \right\}$$  is a basis for the subspace.
  \end{answer}
  \end{pro}
\begin{pro} Let $\{\v{v}_1, \v{v}_2, \v{v}_3, \v{v}_4  , \v{v}_5 \}$ be a
basis for a vector space $V$ over a field $\BBF$. Prove that
$$\{\v{v}_1 + \v{v}_2, \v{v}_2 + \v{v}_3, \v{v}_3 + \v{v}_4, \v{v}_4 + \v{v}_5,
\v{v}_5 + \v{v}_1\}$$is also a basis for $V$.
\begin{answer} Suppose
$$\begin{array}{lll}\v{0} & = & a(\v{v}_1 + \v{v}_2) + b(\v{v}_2 + \v{v}_3) + c(\v{v}_3 + \v{v}_4) + d(\v{v}_4 +
\v{v}_5) + f(\v{v}_5 + \v{v}_1) \\
& = &  (a + f)\v{v}_1 + (a + b )\v{v}_2 + (b + c)\v{v}_3 + (c +
d)\v{v}_4 + (d + f )\v{v}_5.
\end{array}$$ Since $\{\v{v}_1, \v{v}_2, \ldots , \v{v}_5
\}$ are linearly independent, we have
$$a + f = 0,$$$$a + b = 0$$$$b + c = 0$$$$c + d = 0$$$$d + f =
0.$$Solving we find $a = b = c = d = f = 0,$ which means that the
$$\{\v{v}_1 + \v{v}_2, \v{v}_2 + \v{v}_3, \v{v}_3 + \v{v}_4, \v{v}_4 + \v{v}_5,
\v{v}_5 + \v{v}_1\}$$ are linearly independent. Since the
dimension of $V$ is 5, and we have 5 linearly independent vectors,
they must also be a basis for $V$.
\end{answer}
\end{pro}
\begin{pro} Find a basis for the solution space of
the system of $n + 1$ linear equations of $2n$ unknowns
$$x_1 + x_2 + \cdots + x_n = 0,$$
$$x_2 + x_3 + \cdots + x_{n + 1} = 0,$$
$$\vdots \vdots \vdots$$
$$x_{n + 1} + x_{n + 2} + \cdots + x_{2n} = 0.$$
\begin{answer} The matrix of coefficients is already in echelon form. The
dimension of the solution space is $n - 1$ and the following vectors
in $\BBR^{2n}$ form a basis for the solution space
$$a_1 = \begin{bmatrix}-1\cr 1\cr 0\cr \vdots \cr 0\cr -1\cr 1\cr 0\cr \vdots \cr 0 \cr\end{bmatrix},\ \ \
a_2 = \begin{bmatrix}-1\cr 0\cr 1\cr \vdots\cr 0\cr -1\cr 0\cr
1\cr \vdots \cr 0\cr\end{bmatrix},\ldots , \ \ \ a_{n - 1} =
\begin{bmatrix}-1\cr 0\cr  \ldots \cr 1\cr -1\cr 0\cr \vdots \cr
0\cr 1 \cr\end{bmatrix}.$$ (The ``second'' $-1$ occurs on the
$n$-th position. The $1$'s migrate from the $2$nd and $n + 1$-th
position on $a_1$ to the $n - 1$-th and $2n$-th  position on $a_{n
- 1}$.)
\end{answer}
\end{pro}
\begin{pro}
Prove that the set $V$ of skew-symmetric $n\times n$ matrices
is a subspace of $\mat{n\times n}{\BBR}$ and find its dimension.
Exhibit a basis for $V$.
\begin{answer}Let $A^T=-A$ and $B^T=-B$ be
skew symmetric $n\times n$ matrices. Then if $\lambda\in\BBR$ is a
scalar, then
$$ (A+\lambda B)^T = -(A+\lambda B), $$so $A+\lambda B$ is also skew-symmetric,
proving that $V$ is a subspace. Now consider  the set of
$$ 1+2+\cdots + (n-1)= \dfrac{(n-1)n}{2} $$matrices
$A_k$,  which are $0$ everywhere except in the $ij$-th and
$ji$-spot, where $1 \leq i < j \leq n,$ $a_{ij}=1=-a_{ji}$ and
$i+j=k$, $3\leq k \leq 2n-1$. (In the case $n=3$, they are
$$\begin{bmatrix} 0 & 1 & 0 & \cr -1 & 0 & 0   \cr 0 & 0 & 0      \end{bmatrix}, \quad
\begin{bmatrix} 0 & 0 & 1 & \cr 0 & 0 & 0   \cr -1 & 0 & 0      \end{bmatrix},
\begin{bmatrix} 0 & 0 & 0 & \cr 0 & 0  & 1  \cr 0 & -1 & 0      \end{bmatrix},  $$
for example.) It is clear that these matrices form a basis for $V$
and hence $V$ has dimension $\dfrac{(n-1)n}{2}$.
\end{answer}
\end{pro}
\begin{pro}Prove that the set
$$X = \left\{(a, b, c, d)| b + 2c = 0\right\} \subseteq \BBR^4$$
is a vector subspace of $\BBR^4$. Find its dimension and a basis for
$X$.
 \begin{answer} Take $(\v{u}, \v{v})\in X^2$ and $\alpha \in \BBR$.
Then $$ \v{u} = \colvec{a \\ b \\ c \\ d}, \ \ b + 2c = 0, \ \ \
\v{v} = \colvec{a' \\ b' \\ c' \\ d'}, \ \ b' + 2c' = 0.
$$We have
$$\v{u} + \alpha\v{v} =\colvec{a + \alpha a' \\ b+ \alpha b' \\ c+ \alpha c' \\ d+ \alpha
d'},
$$and to demonstrate that $\v{u} + \alpha\v{v}\in X$ we
need to shew that $(b+ \alpha b') + 2(c+ \alpha c') = 0$. But this
is easy, as
$$ (b+ \alpha b') + 2(c+ \alpha c') = (b + 2c) +  \alpha (b' + 2c') = 0 + \alpha 0 = 0.
$$Now
$$\begin{bmatrix}a\cr b\cr c\cr d\end{bmatrix} =
\begin{bmatrix}a\cr -2c\cr c\cr d\end{bmatrix} = a\begin{bmatrix}1\cr 0\cr 0\cr 0\end{bmatrix} +
c\begin{bmatrix}0\cr -2\cr 1\cr 0\end{bmatrix} +
d\begin{bmatrix}0\cr 0\cr 0\cr 1\end{bmatrix}$$ It is clear that
$$\begin{bmatrix}1\cr 0\cr 0\cr 0\end{bmatrix}, \begin{bmatrix}0\cr -2\cr 1\cr 0\end{bmatrix},
\begin{bmatrix}0\cr 0\cr 0\cr 1\end{bmatrix}$$ are linearly independent and span
$X$. They thus constitute a basis for $X$.
\end{answer}
\end{pro}
\begin{pro}
Prove that the dimension of the vector subspace of lower
triangular $n\times n$ matrices is $\dfrac{n(n+1)}{2}$ and find a
basis for this space.
\begin{answer}
As a basis we may take the $\dfrac{n(n+1)}{2}$ matrices ${\bf
E}_{ij}\in \mat{n}{\BBF}$ for $1\leq i \leq j \leq n$.
\end{answer}
\end{pro}
\begin{pro}
Find a basis and the dimension of $$X = \span{\v{v_1} =
\colvec{1\\ 1\\ 1 \\ 1}, \ \ \ \v{v_2} = \colvec{1\\ 1\\ 1\\
0},\ \ \ \v{v_3} =\colvec{2\\ 2\\ 2\\ 1}}.
$$
\begin{answer} $\dim{X} = 2$, as basis one may take $\{\v{v_1}, \v{v_2}\}$.\end{answer}
\end{pro}
\begin{pro}
Find a basis and the dimension of $$X = \span{\v{v_1} =
\colvec{1\\ 1\\ 1\\ 1}, \ \ \ \v{v_2} = \colvec{1\\ 1\\
1\\ 0},\ \ \ \v{v_3} =\colvec{2\\ 2 \\ 2\\ 2}}.
$$
\begin{answer} $\dim{X} = 3$, as basis one may take $\{\v{v_1}, \v{v_2}, \v{v_3}\}$.\end{answer}
\end{pro}
\begin{pro}
Find a basis and the dimension of $$X = \span{\v{v_1} =
\begin{bmatrix}1 & 0 \cr 0 & 1 \cr \end{bmatrix}, \ \ \
\v{v_2} = \begin{bmatrix} 1 & 0 & \cr 2 & 0 \cr
\end{bmatrix},\ \ \ \v{v_3} =\begin{bmatrix}0 & 1  \cr 2 & 0 \cr \end{bmatrix},
 \ \v{v_4} =\begin{bmatrix}1 & -1 \cr 0 & 0 \cr \end{bmatrix} }.
$$
\begin{answer} $\dim{X} = 3$, as basis one may take $\{\v{v_1}, \v{v_2}, \v{v_3}\}$.\end{answer}
\end{pro}
\begin{pro}
 Prove that the set $$V = \left\{\begin{bmatrix} a& b & c \cr 0 & d & f \cr 0 & 0 & g \cr \end{bmatrix}
\in\mat{3\times 3}{\BBR}: a+b+c=0, \quad a+d+g=0\right\}  $$ is a
vector space of $\mat{3\times 3}{\BBR}$ and find a basis for it and
its dimension.
\begin{answer} Let $\lambda\in\BBR$. Observe that
$$\begin{bmatrix} a& b & c \cr 0 & d & f \cr 0 & 0 & g \cr \end{bmatrix}+
\lambda \begin{bmatrix} a'& b' & c' \cr 0 & d' & f' \cr 0 & 0 & g'
\cr \end{bmatrix}  =\begin{bmatrix} a + \lambda a'& b+ \lambda b' &
c+ \lambda c' \cr 0 & d+ \lambda d' & f+ \lambda f' \cr 0 & 0 & g+
\lambda g' \cr
\end{bmatrix} $$
and if $a+b+c = 0, \quad a+d+g=0,\quad  a'+b'+c' = 0, \quad
a'+d'+g'=0 $, then
$$a+\lambda a' +b+\lambda b'+c+\lambda c' = (a+b+c) + \lambda (a'+b'+c')= 0+\lambda 0= 0,   $$
and
$$a+\lambda a' +d+\lambda d'+g+\lambda g' = (a+d+g) + \lambda (a'+d'+g')= 0+\lambda 0= 0,   $$
proving that $V$ is a subspace.

\bigskip

Now, $a+b+c=0=a+d+g\implies a=-b-c, g=b+c-d$. Thus
$$\begin{bmatrix} a& b & c \cr 0 & d & f \cr 0 & 0 & g \cr \end{bmatrix} =
\begin{bmatrix} -b-c& b & c \cr 0 & d & f \cr 0 & 0 & b+c-d \cr
\end{bmatrix}= b\begin{bmatrix} -1& 1 & 0 \cr 0 & 0 & 0 \cr 0 & 0 & 1 \cr \end{bmatrix}
+c\begin{bmatrix} -1& 0 & 1 \cr 0 & 0 & 0 \cr 0 & 0 & 1 \cr
\end{bmatrix} +d\begin{bmatrix} 0& 0 & 0 \cr 0 & 1 & 0 \cr 0 & 0 & -1 \cr \end{bmatrix}
+f\begin{bmatrix} 0& 0 & 0 \cr 0 & 0 & 1 \cr 0 & 0 & 0 \cr
\end{bmatrix}.$$
It is clear that these four matrices span $V$ and are linearly
independent. Hence, $\dim V = 4$.
\end{answer}
\end{pro}
\section{Coordinates}
\begin{thm}
Let $\{\v{v}_1, \v{v}_2, \ldots, \v{v}_n\}$ be a basis for a
vector space $V$. Then any $\v{v}\in V$ has a unique
representation $$\v{v} = a_1\v{v}_1 + a_2\v{v}_2 + \cdots +
a_n\v{v}_n.    $$ \label{thm:unique_rep_in_basis}\end{thm}
\begin{pf}
Let $$\v{v} = b_1\v{v}_1 + b_2\v{v}_2 + \cdots + b_n\v{v}_n
$$be another representation of $\v{v}$. Then $$\v{0} =
(a_1 - b_1)\v{v}_1 + (a_2 - b_2)\v{v}_2 + \cdots + (a_n -
b_n)\v{v}_n.    $$Since $\{\v{v}_1, \v{v}_2, \ldots, \v{v}_n\}$
forms a basis for $V$, they are a linearly independent family. Thus
we must have $$ a_1 - b_1 = a_2 - b_2 = \cdots = a_n - b_n = 0_{\BBF
},
$$ that is
$$a_1 = b_1; a_2 = b_2; \cdots ;a_n = b_n,   $$proving uniqueness.
\end{pf}
\begin{df}
An {\em ordered basis} $\{\v{v}_1, \v{v}_2, \ldots, \v{v}_n\}$ of a
vector space $V$ is a basis where the order of the $\v{v}_k$ has
been fixed. Given an ordered basis $\{\v{v}_1, \v{v}_2, \ldots,
\v{v}_n\}$ of a vector space $V$, Theorem
\ref{thm:unique_rep_in_basis} ensures that there are unique $(a_1,
a_2, \ldots, a_n)\in \BBF ^n$ such that
$$\v{v} = a_1\v{v}_1 + a_2\v{v}_2 + \cdots +
a_n\v{v}_n.    $$ The $a_k$'s are called the {\em coordinates} of
the vector $\v{v}$.
\end{df}
\begin{exa} The standard ordered basis for $\BBR^3$ is ${\mathscr S} = \{\v{i}, \v{j}, \v{k}\}$.
The vector $\colvec{1\\ 2\\ 3}\in \BBR^3$ for example, has
coordinates $(1, 2, 3)_{\mathscr S}$. If the order of the basis were
changed to the ordered basis ${\mathscr S_1} =
\{\v{i}, \v{k}, \v{j}\}$, then $\colvec{1\\ 2\\
3}\in \BBR^3$ would have  coordinates $(1, 3, 2)_{\mathscr S_1}$.
\end{exa}

\begin{rem}
Usually, when we give a coordinate representation for a vector
$\v{v}\in \BBR^n$, we assume that we are using the standard basis.
\end{rem}

\begin{exa}
Consider the vector $\colvec{1\\ 2\\ 3}\in \BBR^3$ (given in
standard representation). Since
$$ \colvec{1\\ 2\\ 3} =    -1\colvec{1\\ 0\\
0} -1 \colvec{1\\ 1\\
0} +  3\colvec{1\\ 1\\
1},     $$under the
ordered basis $\dis{{\mathscr B}_1 = \left\{\colvec{1\\ 0\\
0}, \colvec{1\\ 1\\
0}, \colvec{1\\ 1\\
1}\right\}}$, $\colvec{1\\ 2\\ 3} $ has coordinates $(-1, -1,
3)_{\mathscr B_1}$. We write
$$ \colvec{1\\ 2\\ 3} = \colvec{-1\\ -1\\ 3}_{\mathscr B_1}.  $$
\end{exa}
\begin{exa}
The vectors of $$\mathscr{B}_1 = \left\{\colvec{1 \\ 1}, \colvec{1\\
2}\right\}$$are non-parallel, and so form a basis for
$\BBR^2$. So do the vectors $$\mathscr{B}_2 = \left\{\colvec{2 \\
1}, \colvec{1\\ -1}\right\}.$$Find the coordinates of  $\colvec{3\\
4}_{\mathscr{B}_1}$ in the base $\mathscr{B}_2$.
\end{exa}
\begin{solu}We are seeking $x, y$ such that
$$ 3\colvec{1\\ 1} + 4\colvec{1 \\ 2}  = x\colvec{2 \\ 1} + y\colvec{1\\ -1}
\implies \begin{bmatrix}1 & 1 \cr 1 & 2 \end{bmatrix}\colvec{3 \\
4} =
\begin{bmatrix}2 & 1 \cr 1 & -1 \cr
\end{bmatrix}\colvec{x \\ y}_{\mathscr{B}_2} . $$
Thus $$\begin{array}{lll} \colvec{x\\
y}_{\mathscr{B}_2} & = &
\begin{bmatrix}2 & 1 \cr 1 & -1 \cr
\end{bmatrix}^{-1}\begin{bmatrix}1 & 1 \cr 1 & 2 \end{bmatrix}\colvec{3 \\
4} \vspace{2mm}\\
& = & \begin{bmatrix} \frac{1}{3} & \frac{1}{3} \cr \frac{1}{3} & -\frac{2}{3} \cr \end{bmatrix} \begin{bmatrix}1 & 1 \cr 1 & 2 \end{bmatrix}\colvec{3 \\
4} \vspace{2mm} \\
& = & \begin{bmatrix}\frac{2}{3} & 1 \cr -\frac{1}{3} & -1 \cr  \end{bmatrix}\colvec{3 \\
4}  \vspace{2mm}\\
& = & \colvec{6 \\ -5}_{\mathscr{B}_2}.
\end{array}$$
Let us check  by expressing both vectors in the standard basis of
$\BBR^2$:
$$\colvec{3 \\ 4}_{\mathscr{B}_1} = 3\colvec{1\\ 1} + 4\colvec{1 \\ 2} = \colvec{7\\ 11},$$
$$\colvec{6 \\ -5}_{\mathscr{B}_2} = 6\colvec{2\\ 1} - 5\colvec{1 \\ -1} = \colvec{7\\ 11}.$$
\end{solu}

\bigskip
In general let us consider  bases $\mathscr{B}_1$ ,
$\mathscr{B}_2$ for the same vector space $V$. We want to convert
$X_{\mathscr{B}_1}$ to $Y_{\mathscr{B}_2}$. We let $A$ be the
matrix formed with the column vectors of $\mathscr{B}_1$ in the
given order an $B$ be the matrix formed with the column vectors of
$\mathscr{B}_2$ in the given order. Both $A$ and $B$ are
invertible matrices since the $\mathscr{B}$'s are bases, in view
of Theorem \ref{thm:base_matrix_is_invertible}. Then we must have
$$AX _{\mathscr{B}_1} = BY_{\mathscr{B}_2} \implies Y_{\mathscr{B}_2} =
B^{-1}AX_{\mathscr{B}_1}.
$$Also, $$X _{\mathscr{B}_1} = A^{-1}BY_{\mathscr{B}_2} . $$

This prompts the following definition.
\begin{df}
Let ${\mathscr B_1} = \{\v{u}_1, \v{u}_2, \ldots, \v{u}_n\}$ and
${\mathscr B_2} = \{\v{v}_1, \v{v}_2, \ldots, \v{v}_n\}$ be two
ordered  bases for a vector space $V$.    Let $A\in \mat{n\times n}{
\BBF }$ be the matrix having the $\v{u}$'s as its columns and let
$B\in \mat{n\times n}{ \BBF }$ be the matrix having the $\v{v}$'s as
its columns. The matrix $P = B^{-1}A$ is called the {\em transition
matrix} from ${\mathscr B_1}$ to ${\mathscr B_2}$ and the matrix
$P^{-1} = A^{-1}B$ is called the {\em transition matrix} from
${\mathscr B_2}$ to ${\mathscr B_1}$. \index{matrix!transition}
\end{df}
\begin{exa}
Consider the bases  of $\BBR^3$ $$ \mathscr{B}_1 = \left\{\colvec{1 \\ 1 \\
1}, \colvec{1 \\ 1 \\ 0},  \colvec{1 \\ 0 \\ 0} \right\},
$$
 $$ \mathscr{B}_2 = \left\{\colvec{1 \\ 1 \\
-1}, \colvec{1 \\ -1 \\ 0},  \colvec{2 \\ 0 \\ 0} \right\}.
$$Find the transition matrix from  ${\mathscr B_1}$ to ${\mathscr
B_2}$ and also the transition matrix from  ${\mathscr B_2}$ to
${\mathscr B_1}$. Also find the coordinates of $\colvec{1\\
2 \\ 3}_{\mathscr{B}_1}$ in terms of $\mathscr{B}_2$.
\end{exa}
\begin{solu}Let $$A = \begin{bmatrix} 1 & 1 & 1 \cr 1 & 1 & 0 \cr 1&
0 & 0 \cr
\end{bmatrix}, \ \ B = \begin{bmatrix} 1 & 1 & 2 \cr 1 & -1 & 0 \cr
-1 & 0 & 0 \cr
\end{bmatrix}.$$
The transition matrix from  ${\mathscr B_1}$ to ${\mathscr B_2}$
is
$$\begin{array}{lll}P & = & B^{-1}A \vspace{2mm}\\
& = &\begin{bmatrix} 1 & 1 & 2 \cr 1 & -1 & 0 \cr -1 & 0 & 0 \cr
\end{bmatrix}^{-1}\begin{bmatrix} 1 & 1 & 1 \cr 1 & 1 & 0 \cr 1&
0 & 0 \cr
\end{bmatrix}\vspace{2mm}\\ &  = &  \begin{bmatrix} 0 & 0 & -1 \cr 0 & -1 & -1 \cr \frac{1}{2} & \frac{1}{2} & 1 \cr \end{bmatrix}  \begin{bmatrix} 1 & 1 & 1 \cr 1 & 1 & 0 \cr 1&
0 & 0 \cr
\end{bmatrix} \vspace{2mm}\\ &  = &     \begin{bmatrix} -1 & 0 & 0 \cr -2 &  -1 & -0 \cr 2 & 1 & \frac{1}{2}  \end{bmatrix}.  \end{array}   $$
The transition matrix from  ${\mathscr B_2}$ to ${\mathscr B_1}$
is $$P^{-1} = \begin{bmatrix} -1 & 0 & 0 \cr -2 & -1 & 0  \cr 2 &
1 & \frac{1}{2}  \end{bmatrix}^{-1} = \begin{bmatrix} -1 & 0 & 0
\cr 2 & -1 & 0 \cr 0 & 2 & 2 \cr\end{bmatrix}.
$$Now,
$$  Y_{\mathscr{B}_2} = \begin{bmatrix} -1 & 0 & 0 \cr -2 & -1 & 0  \cr 2 &
1 & \frac{1}{2}  \end{bmatrix}\colvec{1\\
2 \\ 3}_{\mathscr{B}_1}  = \colvec{-1 \\ -4 \\
\frac{11}{2}}_{\mathscr{B}_2}. $$ As a check, observe that in the
standard basis for $\BBR^3$
$$ \colvec{1\\
2 \\ 3}_{\mathscr{B}_1}  = 1\colvec{1\\ 1\\ 1} + 2\colvec{1\\ 1\\
0 } + 3\colvec{1\\ 0 \\ 0} = \colvec{6\\ 3 \\ 1},
$$
$$ \colvec{-1\\
-4 \\ \frac{11}{2}}_{\mathscr{B}_2}  = -1\colvec{1\\ 1\\ -1}  - 4\colvec{1\\ -1\\
0 } + \frac{11}{2}\colvec{2\\ 0 \\ 0} = \colvec{6\\ 3 \\ 1}.
$$
\end{solu}




\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
\begin{enumerate}
\item Prove that the following vectors are linearly independent in
$\BBR^4$
$$\v{a}_1 = \begin{bmatrix} 1 \cr 1 \cr 1 \cr 1\cr  \end{bmatrix}, \ \
\v{a}_2 =\begin{bmatrix} 1 \cr 1 \cr -1 \cr -1\cr
\end{bmatrix}, \ \
\v{a}_3 =\begin{bmatrix} 1 \cr -1 \cr 1 \cr -1\cr
\end{bmatrix}, \ \
\v{a}_4 =\begin{bmatrix} 1 \cr -1 \cr -1 \cr 1\cr
\end{bmatrix}. $$

\item Find the coordinates of $\colvec{1 \\ 2 \\ 1
\\ 1}$ under the ordered basis $(\v{a}_1, \v{a}_2,
\v{a}_3, \v{a}_4)$.

\item Find the coordinates of $\colvec{1 \\ 2 \\
1 \\ 1}$ under the ordered basis $(\v{a}_1, \v{a}_3, \v{a}_2,
\v{a}_4).$

\end{enumerate}
\begin{answer} \begin{enumerate}\item It is enough to prove that the matrix
$$A = \begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix}    $$
is invertible. But an easy computation shews that
$$ A^2 = \begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix}^2 = 4{\bf I}_4,     $$whence the inverse sought is
$$ A^{-1} = \frac{1}{4}A = \frac{1}{4}\begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix}  = \begin{bmatrix} 1/4 & 1/4 & 1/4 & 1/4  \cr
1/4 & 1/4 & -1/4 & -1/4 \cr 1/4 & -1/4 & 1/4 & -1/4  \cr 1/4 & -1/4
& -1/4 & 1/4  \cr\end{bmatrix}. $$ \item Since the $\v{a}_k$ are
four linearly independent vectors in $\BBR^4$ and $\dim \BBR^4 = 4$,
they form a basis for $\BBR^4$. Now, we want to solve
$$A\colvec{x\\ y\\ z \\ w} = \begin{bmatrix} 1 & 1 & 1 & 1 \cr
 1 & 1 & -1 & -1\cr
 1 & -1 & 1 & -1\cr
 1 & -1 & -1 & 1\cr\end{bmatrix} \colvec{x\\ y\\ z \\ w} =   \colvec{1 \\ 2 \\ 1
\\ 1}     $$ and so
$$\colvec{x\\ y\\ z \\ w} = A^{-1}\colvec{1 \\ 2 \\ 1
\\ 1} = \begin{bmatrix} 1/4 & 1/4 & 1/4 & 1/4  \cr
1/4 & 1/4 & -1/4 & -1/4 \cr 1/4 & -1/4 & 1/4 & -1/4  \cr 1/4 &
-1/4 & -1/4 & 1/4  \cr\end{bmatrix}\colvec{1 \\ 2 \\ 1
\\ 1} = \colvec{5/4 \\ 1/4 \\ -1/4 \\ -1/4}.  $$
It follows that
$$\colvec{1 \\ 2 \\ 1
\\ 1} = \frac{5}{4}\begin{bmatrix} 1 \cr 1 \cr 1 \cr 1\cr  \end{bmatrix}
+ \frac{1}{4}\begin{bmatrix} 1 \cr 1 \cr -1 \cr -1\cr
\end{bmatrix} - \frac{1}{4}\begin{bmatrix} 1 \cr -1 \cr 1 \cr -1\cr
\end{bmatrix} - \frac{1}{4}\begin{bmatrix} 1 \cr -1 \cr -1 \cr 1\cr
\end{bmatrix}.        $$ The coordinates sought are $$ \left(\frac{5}{4}, \frac{1}{4}, -\frac{1}{4}, -\frac{1}{4}\right).  $$

\item Since we have $$\colvec{1 \\ 2 \\ 1
\\ 1} = \frac{5}{4}\begin{bmatrix} 1 \cr 1 \cr 1 \cr 1\cr
\end{bmatrix}- \frac{1}{4}\begin{bmatrix} 1 \cr -1 \cr 1 \cr -1\cr
\end{bmatrix}
+ \frac{1}{4}\begin{bmatrix} 1 \cr 1 \cr -1 \cr -1\cr
\end{bmatrix}  - \frac{1}{4}\begin{bmatrix} 1 \cr -1 \cr -1 \cr 1\cr
\end{bmatrix},        $$ the
coordinates sought are $$ \left(\frac{5}{4}, -\frac{1}{4},
\frac{1}{4}, -\frac{1}{4}\right).  $$
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro}Consider the matrix
$$A(a) = \begin{bmatrix} a &1 &1&1\cr 0&1&0 & 1 \cr 1& 0& a &1  \cr 1&1&1&1 \cr  \end{bmatrix}.$$
\begin{dingautolist}{202}
\item Determine all $a$ for which $A(a)$ is not invertible. \item
Find the inverse of $A(a)$ when $A(a)$ is invertible. \item Find
the transition matrix from the basis $${\mathscr B}_1 = \colvec{1
\\ 1\\ 1\\ 1}, \colvec{1\\ 1\\ 1\\ 0}, \colvec{1\\ 1\\ 0 \\ 0}, \colvec{1\\ 0 \\ 0 \\ 0}
$$to the basis $${\mathscr B}_2 = \colvec{a
\\ 0\\ 1\\ 1}, \colvec{1\\ 1\\ 0\\ 1}, \colvec{1\\ 0\\ a\\ 1}, \colvec{1\\ 1 \\ 1 \\ 1}.$$
\end{dingautolist}
\begin{answer}[1] $a = 1$, [2] $(A(a))^{-1} = \begin {bmatrix}  \dfrac{1}{ a-1}&0&0&- \dfrac{1}{ a-1}\cr -1&1-a&-1&a+1\cr
  -\dfrac{1}{a-1}&-1&0&\dfrac{a}{a-1}\cr 1&a
&1&-a-1\end {bmatrix}$ [3] $$  \begin{bmatrix} 0& \dfrac{1}{a-1} &
\dfrac{1}{a-1}& \dfrac{1}{a-1}\cr 0&-a-1&-a& -1\cr 0&-
 -\dfrac{a}{a-1}&-\dfrac{a}{a-1}&-\dfrac{1}{ a-1}\cr 1&2+a&a+1&1\cr\end{bmatrix}  $$
\end{answer}
\end{pro}
\end{multicols}
\chapter{Linear Transformations}

\section{Linear Transformations}

\begin{df}
Let $\vecspace{V}{+}{\cdot}{ \BBF }$ and $\vecspace{W}{+}{\cdot}{
\BBF }$ be vector spaces over the same field $\BBF$. A {\em linear
transformation} or {\em homomorphism} \index{linear transformation}
\index{linear homomorphism}
$$\fun{L}{\v{a}}{L(\v{ a})}{V}{W},$$is a function which
is \begin{itemize} \item {\bf Linear:} $L(\v{a} + \v{b}) = L(\v{a})
+ L(\v{b}),$ \item {\bf Homogeneous:} $L(\alpha\v{a}) = \alpha
L(\v{a}),$ for $\alpha\in \BBF.$
\end{itemize}
\end{df}
\begin{rem}
It is clear that the above two conditions can be summarised
conveniently into $$L(\v{a} + \alpha\v{b}) = L(\v{a}) + \alpha
L(\v{b}).       $$
\end{rem}

\begin{exa}
Let $$\fun{L}{A}{\tr{A}}{\mat{n\times n}{\BBR}}{\BBR}.  $$ Then $L$
is linear, for if $(A, B)\in\mat{n\times n}{\BBR}$, then $$L(A +
\alpha B) = \tr{A + \alpha B} = \tr{A} + \alpha\tr{B} = L(A) +
\alpha L(B).
$$
\end{exa}
\begin{exa}
Let $$\fun{L}{A}{A^T}{\mat{n\times n}{\BBR}}{\mat{n\times n}{\BBR}}.
$$ Then $L$ is linear, for if $(A, B)\in\mat{n\times n}{\BBR}$, then $$L(A +
\alpha B) = (A + \alpha B)^T = A^T + \alpha B^T = L(A) + \alpha
L(B).
$$

\end{exa}
\begin{exa}
For a point $(x, y)\in\BBR^2$, its reflexion about the $y$-axis is
$(-x, y)$. Prove that
$$\fun{R}{(x, y)}{(-x, y)}{\BBR^2}{\BBR^2}  $$is linear.
\end{exa}
\begin{solu}Let $(x_1, y_1)\in\BBR^2$, $(x_2, y_2)\in\BBR^2$, and
$\alpha\in\BBR$. Then
$$\begin{array}{lll}
R((x_1, y_1) + \alpha (x_2, y_2) ) & = & R((x_1 + \alpha x_2, y_1
+ \alpha y_2)) \\
& = & (-(x_1 + \alpha x_2), y_1 + \alpha y_2) \\
& = & (-x_1, y_1) + \alpha (-x_2, y_2) \\
& = & R((x_1, y_1)) + \alpha  R((x_2, y_2)),
\end{array}$$whence $R$ is linear.
\end{solu}

\begin{exa}
Let $L:\BBR^2\rightarrow \BBR^4$ be a linear transformation with
$$L\colvec{1 \\ 1} = \colvec{-1 \\ 1 \\ 2\\ 3}; \ \ \ \ L\colvec{-1 \\ 1} = \colvec{2 \\ 0 \\ 2\\ 3}.
$$Find $L\colvec{5 \\ 3}.$
\end{exa}
\begin{solu}Since $$ \colvec{5 \\ 3} = 4\colvec{1 \\ 1} - \colvec{-1
\\ 1},  $$ we have
$$ L\colvec{5 \\ 3} = 4 L\colvec{1 \\ 1} -  L\colvec{-1 \\ 1}  = 4\colvec{-1 \\ 1 \\ 2\\ 3} -   \colvec{2 \\ 0 \\ 2\\ 3}
= \colvec{-6 \\ 4\\ 6\\ 9}.    $$
\end{solu}

\begin{thm}\label{thm:linear_takes_0_to_0}Let $\vecspace{V}{+}{\cdot}{ \BBF }$ and $\vecspace{W}{+}{\cdot}{ \BBF }$ be
vector spaces over the same field $\BBF$, and let $L:V \rightarrow
W$ be a linear transformation. Then
\begin{itemize}
\item $L(\v{0}_V) = \v{0}_W$. \item $\forall \v{x}\in V, L(-\v{x})
= - L(\v{x}).$
\end{itemize}
\end{thm} \begin{pf} We have $$L(\v{0}_V)  = L(\v{0}_V +
\v{0}_V) = L(\v{0}_V) + L(\v{0}_V),$$ hence $$ L(\v{0}_V) -
L(\v{0}_V) = L(\v{0}_V).$$ Since
$$L(\v{0}_V) - L(\v{0}_V) = \v{0}_W,$$we obtain the
first  result.

\bigskip

Now $$ \v{0}_W = L(\v{0}_V) = L(\v{x}+ (-\v{x}))= L(\v{x}) +
L(-\v{x}),
$$from where the second result follows.
\end{pf}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Consider $L:\BBR^3 \rightarrow \BBR^3$,
$$L\colvec{x \\ y \\ z} = \colvec{x - y - z \\ x + y + z \\ z } .$$
Prove that $L$ is linear. \label{exa:linear_1}\begin{answer} Let
$\alpha\in \BBR.$ Then
$$\begin{array}{lll}
L\colvec{x + \alpha a\\ y + \alpha b \\ z + \alpha c} & = &
\colvec{(x + \alpha a) - (y + \alpha b) - (z + \alpha c) \\ (x +
\alpha a) + (y + \alpha b) + (z + \alpha c) \\  z + \alpha c} \vspace{2mm} \\
& = & \colvec{x - y - z \\ x + y + z \\ z } + \alpha \colvec{a - b
- c \\ a + b + c\\ c } \vspace{2mm} \\
& = & L\colvec{x \\ y \\ z} + \alpha L\colvec{a \\ b \\ c}, \\
\end{array}$$proving that $L$ is a linear transformation.
\end{answer}
\end{pro}
\begin{pro}
Let $A\in\gl{n}{\BBR}$ be  a fixed matrix.  Prove that
$$\fun{L}{H}{-A^{-1}HA^{-1}}{\mat{n\times n}{\BBR}}{\mat{n\times n}{\BBR}}$$is a
linear transformation. \begin{answer} $$\begin{array}{lll} L(H +
\alpha H') & = & -A^{-1}(H + \alpha
H')A^{-1}\\
& = & -A^{-1}HA^{-1} + \alpha(-A^{-1}H'A^{-1}) \\
& = & L(H) + \alpha L(H'), \\
\end{array}$$proving that $L$ is linear.
\end{answer}
\end{pro}
\begin{pro} \index{convex set}
Let $V$ be a vector space and let $S \subseteq V.$ The set $S$ is
said to be {\em convex} if $\forall \alpha \in [0; 1], \forall {\bf
x, y} \in S$, $(1 - \alpha){\bf x} + \alpha {\bf y} \in S$, that is,
for any two points in $S$, the straight line joining them also
belongs to $S$. Let $T: V \rightarrow W$ be a linear transformation
from the vector space $V$ to the vector space $W$. Prove that $T$
maps convex sets into convex sets. \begin{answer} Let $S$ be convex
and let $\v{a}, \v{b}\in T(S).$ We must prove that $\forall \alpha
\in [0; 1], \ (1 - \alpha)\v{a} + \alpha\v{b} \in T(S).$ But since
$\v{a}, \v{b}$ belong to $T(S)$, $\exists \v{x}\in S, \v{y}\in S$
with $T(\v{x}) = \v{a}, T(\v{y}) = \v{b}$. Since $S$ is convex, $(1
- \alpha)\v{x} + \alpha\v{y} \in S$. Thus
$$T((1 -
\alpha)\v{x} + \alpha\v{y}) \in T(S),$$ which means that
$$(1 -
\alpha)T(\v{x}) + \alpha T(\v{y}) \in T(S),$$that is,
$$\ (1 - \alpha)\v{a} +
\alpha\v{b} \in T(S),$$as we wished to show.

\end{answer}
\end{pro}


\end{multicols}


\section{Kernel and Image of a Linear
Transformation}
\begin{df}\index{linear transformation!kernel} \index{linear transformation!image}
Let  $\vecspace{V}{+}{\cdot}{ \BBF }$ and $\vecspace{W}{+}{\cdot}{
\BBF }$ be vector spaces over the same field $\BBF$, and
$$\fun{T}{\v{v}}{T(\v{v})}{V}{W}  $$be a linear
transformation. The {\em kernel} of $T$ is the set
$$\ker{T} = \{\v{v}\in V: T(\v{v}) = \v{0}_W\}.    $$
The {\em image} of $T$ is the set
$$\im{T} = \{\v{w}\in w:\exists \v{v}\in V\ {\rm such\ that\ } T(\v{v}) = \v{w}\} = T(V).    $$
\end{df}
\begin{rem}
Since $T(\v{0}_V) = \v{0}_W$ by Theorem
\ref{thm:linear_takes_0_to_0}, we have $\v{0}_V\in\ker{T}$ and
$\v{0}_W\in \im{T}$.
\end{rem}
\begin{thm}
Let  $\vecspace{V}{+}{\cdot}{ \BBF }$ and $\vecspace{W}{+}{\cdot}{
\BBF }$ be vector spaces over the same field $\BBF$, and
$$\fun{T}{\v{v}}{T(\v{v})}{V}{W}  $$be a linear
transformation. Then $\ker{T}$ is a vector subspace of $V$ and
$\im{T}$ is a vector subspace of $W$.
\end{thm}
\begin{pf}
Let $(\v{v}_1,\v{v}_2)\in (\ker{T})^2$ and $\alpha \in \BBF$. Then
$T(\v{v}_1) = T(\v{v}_2) = \v{0}_V$. We must prove that $\v{v}_1 +
\alpha \v{v}_2\in\ker{T}$, that is, that $T(\v{v}_1 + \alpha
\v{v}_2) = \v{0}_W$. But
$$T(\v{v}_1 + \alpha \v{v}_2) = T(\v{v}_1) + \alpha
T(\v{v}_2) = \v{0}_V + \alpha \v{0}_V = \v{0}_V
$$proving that $\ker{T}$ is a subspace of $V$.

\bigskip

Now, let $(\v{w}_1,\v{w}_2)\in (\im{T})^2$ and $\alpha \in \BBF$.
Then $\exists (\v{v}_1,\v{v}_2)\in V^2$ such that $T(\v{v}_1) =
\v{w}_1$ and $T(\v{v}_2) = \v{w}_2$. We must prove that $\v{w}_1 +
\alpha \v{w}_2\in\im{T}$, that is, that $\exists \v{v}$ such that
$T(\v{v}) = \v{w}_1 + \alpha \v{w}_2$. But
$$\v{w}_1 + \alpha \v{w}_2 =  T(\v{v}_1) + \alpha
T(\v{v}_2) = T(\v{v}_1 + \alpha \v{v}_2),
$$ and so we may take $\v{v} = \v{v}_1 + \alpha \v{v}_2$. This
 proves that $\im{T}$ is a subspace of $W$.


\end{pf}

\begin{thm}
Let  $\vecspace{V}{+}{\cdot}{ \BBF }$ and $\vecspace{W}{+}{\cdot}{
\BBF }$ be vector spaces over the same field $\BBF$, and
$$\fun{T}{\v{v}}{T(\v{v})}{V}{W}  $$be a linear
transformation. Then $T$ is injective if and only if $\ker{T} =
\v{0}_V$.
\end{thm}
\begin{pf}
Assume that $T$ is injective. Then there is a unique $\v{x}\in V$
mapping to $\v{0}_W$:
$$T(\v{x}) = \v{0}_W.
$$ By Theorem \ref{thm:linear_takes_0_to_0},  $T(\v{0}_V) = \v{0}_W$, i.e., a linear
transformation takes the zero vector of one space to the zero
vector of the target space, and so we must have $\v{x} = \v{0}_V$.

\bigskip

Conversely, assume that $\ker{T} = \{\v{0}_V\}$, and that
$T(\v{x}) = T(\v{y})$. We must prove that $\v{x} = \v{y}$. But
$$\begin{array}{lll}T(\v{x}) = T(\v{y}) & \implies &
T(\v{x}) - T(\v{y}) = \v{0}_W\\ & \implies  & T(\v{x} - \v{y}) =
\v{0}_W \\ & \implies  & (\v{x} - \v{y})\in\ker{T}\\ & \implies  &
\v{x} - \v{y} =
\v{0}_V \\
& \implies  &\v{x} = \v{y},
\end{array}$$as we wanted to shew.  \end{pf}
\begin{thm}[Dimension Theorem]
Let  $\vecspace{V}{+}{\cdot}{ \BBF }$ and $\vecspace{W}{+}{\cdot}{
\BBF }$ be vector spaces of finite dimension over the same field
$\BBF$, and
$$\fun{T}{\v{v}}{T(\v{v})}{V}{W}  $$be a linear
transformation. Then $$\dim\ker{T} + \dim\im{T} = \dim V.  $$
\label{thm:dimension_theorem}\end{thm}
\begin{pf}
Let $\{\v{v}_1, \v{v}_2, \ldots , \v{v}_k\}$ be a basis for
$\ker{T}$. By virtue of Theorem \ref{thm:enlargement_of_basis}, we
may extend this to a basis ${\mathscr A} = \{\v{v}_1, \v{v}_2,
\ldots , \v{v}_k, \v{v}_{k +1}, \v{v}_{k +2}, \ldots , \v{v}_n\}$
of $V$. Here $n = \dim V$. We will now shew that ${\mathscr
B}=\{T(\v{v}_{k +1}), T(\v{v}_{k +2}), \ldots , T(\v{v}_n)\}$ is a
basis for $\im{T}$.  We prove that (i) ${\mathscr B}$ spans
$\im{T}$, and (ii) ${\mathscr B}$ is a linearly independent
family.

\bigskip
Let $\v{w}\in \im{T}$. Then $\exists \v{v}\in V$ such that
$T(\v{v}) = \v{w}$. Now since ${\mathscr A}$ is a basis for $V$ we
can write
$$ \v{v} = \sum _{i = 1} ^n \alpha_i\v{v}_i.  $$Hence
$$\v{w} =  T(\v{v}) = \sum _{i = 1} ^n \alpha_iT(\v{v}_i) = \sum _{i = k + 1} ^n \alpha_iT(\v{v}_i), $$
since $T(\v{v}_i) = \v{0}_V$ for $1 \leq i \leq k$. Thus
${\mathscr B}$ spans $\im{T}$.

\bigskip

To prove the linear independence of the ${\mathscr B}$ assume that
$$\v{0}_W = \sum _{i = k + 1} ^n \beta_iT(\v{v}_i) = T\left(\sum _{i = k + 1} ^n \beta_i\v{v}_i \right). $$
This means that $\sum _{i = k + 1} ^n \beta_i\v{v}_i\in\ker{T}$,
which is impossible unless $\beta_{k + 1} = \beta_{k + 2} = \cdots =
\beta_n = 0_{\BBF }$.

\end{pf}
\begin{cor}
If $\dim V = \dim W < +\infty$, then $T$ is injective if and only
if it is surjective.
\end{cor}
\begin{pf} Simply observe that if $T$ is injective then $\dim\ker{T} = 0$, and if $T$ is surjective $\im{T} =T(V) = W$ and $\im{T} = \dim{W}$. \end{pf}

\begin{exa}
 Let $$\fun{L}{A}{A^T - A}{\mat{2\times 2}{\BBR}}{\mat{2\times 2}{\BBR}}.  $$ Observe that $L$ is linear. Determine $\ker{L}$ and $\im{L}.$
\end{exa}
\begin{solu}Put  $A = \begin{bmatrix} a & b \cr c & d\end{bmatrix}$
and assume $L(A) = {\bf 0}_2$. Then
$$ \begin{bmatrix} 0 & 0 \cr 0 & 0\end{bmatrix} =L(A) =
\begin{bmatrix} a & c \cr b & d\end{bmatrix} - \begin{bmatrix} a & b \cr c & d\end{bmatrix} =
(c - b)\begin{bmatrix} 0& 1\cr -1 & 0\end{bmatrix}. $$This means
that $c = b$. Thus $$\ker{L} = \left\{\begin{bmatrix} a & b \cr b &
d\end{bmatrix}: (a, b, d)\in \BBR^3\right\},  $$
$$\im{L} = \left\{\begin{bmatrix} 0 & k \cr -k
& 0\end{bmatrix}:  k\in \BBR\right\}.  $$ This means that
$\dim\ker{L} = 3$, and so $\dim\im{L} = 4 - 3 = 1$.
\end{solu}
\begin{exa}
Consider the linear transformation $L:\mat{2\times 2}{\BBR}
\rightarrow \BBR_3[X] $ given by
$$L\begin{bmatrix} a & b \cr c & d \cr  \end{bmatrix} = (a + b)X^2 + (a - b)X^3.   $$
Determine $\ker{L}$ and $\im{L}$. \end{exa} \begin{solu}We have $$0
= L\begin{bmatrix} a & b \cr c & d \cr
\end{bmatrix} = (a + b)X^2 + (a - b)X^3 \implies a+b = 0, \ a-b = 0, \implies a = b = 0.   $$
Thus
$$ \ker{L}  = \left\{\begin{bmatrix} 0 & 0 \cr c & d \cr  \end{bmatrix}: (c, d)\in\BBR^2 \right\}. $$
Thus $\dim\ker{L} = 2$ and hence $\dim\im{L} = 2$. Now
$$(a + b)X^2 + (a - b)X^3 \implies a(X^2 + X^3) + b(X^2 - X^3).   $$
Clearly $X^2 + X^3$, and $X^2 - X^3$ are linearly independent and
span $\im{L}$. Thus $$\im{L} = \span{X^2 + X^3, X^2 - X^3}.  $$
\end{solu}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
In problem \ref{exa:linear_1} we saw that $L:\BBR^3 \rightarrow
\BBR^3$,
$$L\colvec{x \\ y \\ z} = \colvec{x - y - z \\ x + y + z \\ z }
$$is linear. Determine $\ker{L}$ and $\im{L}$.
\begin{answer} Assume $\colvec{x\\ y \\ z}\in\ker{L}$. Then
$$L\colvec{x \\ y \\ z} = \colvec{0 \\ 0 \\ 0},  $$that is
$$x - y - z = 0,   $$
$$x + y + z = 0,   $$
$$z = 0.$$ This implies that $x - y = 0$ and $x + y = 0$, and so
$x = y = z = 0$. This means that $$\ker{L} = \left\{\colvec{0
\\ 0 \\ 0}\right\},$$and $L$ is injective.

\bigskip

By the Dimension Theorem \ref{thm:dimension_theorem}, $\dim\im{L}
= \dim V - \dim\ker{L} = 3 - 0 = 3$, which means that
 $$\im{L} = \BBR^3$$ and $L$ is surjective.
\end{answer}
\end{pro}
\begin{pro}
Consider the function $L:\BBR^4 \to \BBR^2$ given by
$$ L\colvec{x\\ y \\ z\\ w} = \colvec{x+y \\ x-y}. $$
\begin{enumerate}
\item Prove that $L$ is linear.
\item Determine $\ker{L}$.
\item Determine $\im{L}$.
\end{enumerate}
\begin{answer}
\noindent
\begin{enumerate}
\item  If $a$ is any scalar,
$$  L\left(\colvec{x\\ y \\ z\\ w}+a\colvec{x'\\ y' \\ z'\\ w'}\right)=
L\colvec{x+ax'\\ y+ay'\\ z+az' \\ w+aw'} = \colvec{(x+ax')+(y+ay')\\
(x+ax')-(y+ay')}=\colvec{x+y\\ x-y}+a\colvec{x'+y'\\ x'-y'}=
L\colvec{x\\ y \\ z\\ w}+aL\colvec{x'\\ y' \\ z'\\ w'},  $$ whence
$L$ is linear.
\item We have,$$ L\colvec{x\\ y \\ z\\ w} = \colvec{x+y \\ x-y}=\colvec{0\\ 0}\implies x=y, x=-y \implies x=y=0 \implies \ker{L}=
\left\{\colvec{0\\ 0 \\ z\\ w}: z\in \BBR, w\in\BBR\right\}. $$ Thus
$\dim\ker{L}=2$. In particular, the transformation is not injective.
\item From the previous part, $\dim\im{L}=4-2=2$. Since $\im{L}\subseteq
\BBR^2$ and $\dim \im{L}=2$, we must have $\im{L}=\BBR^2$. In
particular, the transformation is surjective.

\end{enumerate}
\end{answer}

\end{pro}

\begin{pro}
Let $$\fun{L}{\v{a}}{L(\v{a})}{\BBR^3}{\BBR^4}$$ satisfy
$$L\colvec{1\\ 0 \\ 0} = \colvec{1 \\ 0 \\ -1 \\ 0}; \ \ \  L\colvec{1\\ 1 \\ 0} = \colvec{2 \\ -1 \\ 0 \\ 0}; \ \ \
 L\colvec{0\\ 0 \\ 1} = \colvec{1 \\ -1 \\ 1 \\ 0}.$$ Determine
 $\ker{L}$ and $\im{L}$.
\begin{answer} Assume that $\colvec{a\\ b \\ c}\in\ker{T}$,
$$\colvec{a\\ b \\ c} = (a - b)\colvec{1\\ 0 \\ 0} + b\colvec{1\\ 1 \\ 0} + c\colvec{0\\ 0 \\ 1}. $$
Then $$\begin{array}{lll}\colvec{0 \\ 0 \\ 0\\ 0 } &  = &  T\colvec{a\\ b \\
c}\\
& = & (a -
b)T\colvec{1\\ 0 \\ 0} + bT\colvec{1\\ 1 \\ 0} + cT\colvec{0\\ 0 \\
1}\vspace{2mm}\\
& = & (a - b)\colvec{1 \\ 0 \\ -1 \\ 0} + b\colvec{2 \\ -1 \\ 0 \\
0} + c\colvec{1 \\ -1 \\ 1 \\ 0} \vspace{2mm}\\
& = & \colvec{a + b + c \\ -b - c \\ -a + b + c \\
0}.\end{array}$$ It follows that $a = 0$ and $b = -c$. Thus
$$ \ker{T} = \left\{c\colvec{0 \\ -1 \\ 1}: c\in\BBR\right\},$$and
so $\dim\ker{T} = 1$.

\bigskip
By the Dimension Theorem \ref{thm:dimension_theorem}, $$\dim\im{T}
= \dim V - \dim\ker{T} = 3 - 1 = 2.$$ We readily see that
$$\colvec{2 \\ -1 \\ 0 \\ 0} = \colvec{1 \\ 0 \\ -1 \\ 0} + \colvec{1 \\ -1 \\ 1 \\ 0},
$$ and so $$\im{T} = \span{\colvec{1 \\ 0 \\ -1 \\ 0}, \colvec{1 \\ -1 \\ 1 \\ 0}}.  $$
\end{answer}
\end{pro}
\begin{pro} It is easy to see that $L:\BBR^2 \rightarrow
\BBR^3$,$$L\colvec{x \\ y} = \colvec{x + 2y  \\ x + 2y
\\ 0 }$$ is linear. Determine $\ker{L}$ and $\im{L}$.
\begin{answer} Assume that $$L\colvec{x \\ y} = \colvec{x + 2y  \\ x + 2y
\\ 0 } = \colvec{0 \\ 0 \\ 0}.  $$ Then $x = -2y $ and so $$\colvec{x \\ y} = y\colvec{-2 \\ 1} .
$$This means that $\dim\ker{L} = 1$ and $\ker{L}$ is the line
through the origin and $(-2, 1)$. Observe that $L$ is not
injective.

\bigskip

By the Dimension Theorem \ref{thm:dimension_theorem}, $\dim\im{L}
= \dim V - \dim\ker{L} = 2 - 1 = 1$. Assume that $\colvec{a \\ b\\
c}\in \im{L}$. Then $\exists (x, y)\in \BBR^2$ such that
$$L\colvec{x \\ y} = \colvec{x + 2y  \\ x +
2y
\\ 0 } = \colvec{a \\ b \\ c}.  $$ This means that
$$\colvec{a \\ b \\ c} = \colvec{x + 2y  \\ x +
2y
\\ 0 } = (x + 2y)\colvec{1 \\ 1 \\ 0}.  $$Observe that $L$ is not
surjective.
\end{answer}
\end{pro}
\begin{pro} It is easy to see that $L:\BBR^2 \rightarrow
\BBR^3$,$$L\colvec{x \\ y} = \colvec{x - y  \\ x + y
\\ 0 }$$ is linear. Determine $\ker{T}$ and $\im{T}$.
\begin{answer} Assume that $$L\colvec{x \\ y} = \colvec{x  - y  \\ x + y
\\ 0 } = \colvec{0 \\ 0 \\ 0}.  $$ Then $x + y = 0 = x - y $, that is, $x = y = 0$, meaning that
 $$\ker{L} = \left\{\colvec{0\\ 0}\right\},$$ and so $L$ is
injective.

\bigskip

By the Dimension Theorem \ref{thm:dimension_theorem}, $\dim\im{L}
= \dim V - \dim\ker{L} = 2 - 0 = 2$. Assume that $\colvec{a \\ b\\
c}\in \im{L}$. Then $\exists (x, y)\in \BBR^2$ such that
$$L\colvec{x \\ y} = \colvec{x  - y  \\ x +
y
\\ 0 } = \colvec{a \\ b \\ c}.  $$ This means that
$$\colvec{a \\ b \\ c} = \colvec{x - y  \\ x +
y
\\ 0 } = x\colvec{1 \\ 1 \\ 0} + y\colvec{-1 \\ 1 \\ 0} .  $$
Since $$ \colvec{1 \\ 1 \\ 0},\colvec{-1 \\ 1 \\ 0}     $$are
linearly independent, they span a subspace of dimension $2$ in
$\BBR^3$, that is, a plane containing the origin. Observe that $L$
is not surjective.
\end{answer}
\end{pro}

\begin{pro} It is easy to see that $L:\BBR^3 \rightarrow
\BBR^2$,$$L\colvec{x \\ y\\ z} = \colvec{x -  y - z  \\  y - 2z}$$
is linear. Determine $\ker{L}$ and $\im{L}$. \begin{answer} Assume
that
$$L\colvec{x \\ y\\ z} = \colvec{x -  y - z
\\  y - 2z} = \colvec{0
\\  0}.$$Then $y = 2z; x = y + z = 3z$. This means that
$\ker{L} = \left\{z\colvec{3 \\2 \\ 1}: z\in\BBR \right\}.$ Hence
$\dim \ker{L} = 1$, and so $L$ is not injective.

\bigskip

Now, if $$L\colvec{x \\ y\\ z} = \colvec{x -  y - z
\\  y - 2z} = \colvec{a
\\  b}.$$Then
$$\colvec{a \\ b} = \colvec{x -  y - z
\\  y - 2z} = x\colvec{1
\\  0} + y\colvec{-1
\\  1} + z\colvec{-1
\\  -2}.$$Now, $$-3\colvec{1
\\  0}   - 2\colvec{-1
\\  1}  = \colvec{-1
\\  -2}$$and $$\colvec{1
\\  0}, \colvec{-1
\\  1}$$are linearly independent. Since  $\dim\im{L} = 2$, we
have $\im{L} = \BBR^2$, and so $L$ is surjective.

\end{answer}
\end{pro}
\begin{pro} Let $$\fun{L}{A}{\tr{A}}{\mat{2\times 2}{\BBR}}{\BBR}.$$ Determine $\ker{L}$ and $\im{L}$.
\begin{answer} Assume that $$ 0 = \tr{\begin{bmatrix} a & b \cr c & d \cr
\end{bmatrix}} = a + d.
$$ Then $a = -d$ and so,
$$\begin{bmatrix} a & b \cr c & d
\cr \end{bmatrix} = \begin{bmatrix} -d & b \cr c & d \cr
\end{bmatrix}  = d\begin{bmatrix} -1 & 0 \cr 0 & 1
\cr \end{bmatrix} + b\begin{bmatrix} 0 & 1 \cr 0 & 0 \cr
\end{bmatrix} + c\begin{bmatrix} 0 & 0 \cr 1 & 0 \cr
\end{bmatrix},  $$and so $\dim \ker{L} = 3.$ Thus $L$ is not injective. $L$ is surjective, however. For
if $\alpha\in \BBR$, then  $$\alpha  = \tr{\begin{bmatrix} \alpha &
0 \cr 0 & 0 \cr
\end{bmatrix}}.
$$
\end{answer}
\end{pro}
\begin{pro}
\begin{enumerate}
\item  Demonstrate that $$ \fun{L}{A}{A^T +
A}{\mat{2\times 2}{\BBR}}{\mat{2\times 2}{\BBR}}$$ is a linear
transformation. \item Find a basis for $\ker{L}$ and find
$\dim\ker{L}$ \item Find a basis for $\im{L}$ and find $\dim\im{L}$.
\end{enumerate} \begin{answer}
\begin{enumerate} \item Let $(A,B)^2\in\mat{2\times 2}{\BBR}, \alpha \in \BBR$. Then $$\begin{array}{lll}L(A + \alpha B) & = & (A + \alpha B)^T + (A + \alpha B)\\
& = & A^T + B^T + A + \alpha B \\
& = & A^T + A + \alpha B^T + \alpha B  \\
& = & L(A) + \alpha L(B), \end{array} $$ proving that $L$ is
linear. \item Assume that $A = \begin{bmatrix}a & b \cr c & d\cr
\end{bmatrix}\in \ker{L}$. Then
$$ \begin{bmatrix}0 & 0 \cr 0 & 0\cr \end{bmatrix}= L(A) = \begin{bmatrix}a & b \cr c & d\cr \end{bmatrix} +
\begin{bmatrix}a & c \cr b & d\cr \end{bmatrix} = \begin{bmatrix}2a &  b + c \cr b + c & 2d\cr \end{bmatrix},  $$
whence $a = d = 0$ and $b = -c$. Hence
$$\ker{L} = \span{\begin{bmatrix}0 & -1 \cr 1 & 0\cr \end{bmatrix}},   $$ and so $\dim \ker{L} = 1$.
\item By the Dimension Theorem, $\dim \im{L} = 4 -1 = 3$. As
above, $$\begin{array}{lll}L(A) & =  &
\begin{bmatrix}2a &  b + c \cr b + c & 2d\cr \end{bmatrix}\\
& = & a\begin{bmatrix}2 & 0 \cr 0 & 0\cr \end{bmatrix} + (b +
c)\begin{bmatrix}0 &  1 \cr 1 & 0\cr \end{bmatrix} +
d\begin{bmatrix}0 & 0 \cr 0 & 2\cr \end{bmatrix},
\end{array} $$from where $$\im{L} = \span{\begin{bmatrix}2 & 0 \cr 0 & 0\cr \end{bmatrix},\begin{bmatrix}0 &  1 \cr 1 & 0\cr \end{bmatrix},\begin{bmatrix}0 & 0 \cr 0 & 2\cr \end{bmatrix}}.  $$
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro}
Let $V$ be an $n$-dimensional vector space, where the
characteristic of the underlying field is different from $2$. A
linear transformation $T:V\rightarrow V$ is {\em idempotent} if
$T^2 = T$. Prove that if $T$ is idempotent, then
\begin{dingautolist}{202}
\item  $I - T$ is idempotent ($I$ is the identity function).
\item  $I+T$ is invertible. \\

\item  $\ker{T} = \im{I-T}$ \\


\end{dingautolist}
\begin{answer}
\begin{dingautolist}{202}
\item Observe that $$ (I-T)^2 = I -2T+T^2 = I-2T+T = I-T,
$$proving the result. \item  The inverse is $I-\frac{1}{2}T$, for
$$ (I + T)(I-\frac{1}{2}T) = I+T-\frac{1}{2}T - \frac{1}{2}T^2 = I+T-\frac{1}{2}T - \frac{1}{2}T
= I,  $$proving the claim. \item We have
$$\begin{array}{lll}\v{x}\in \ker{T}
& \iff & \v{x} - T(\v{x}) \in \ker{T} \\
& \iff & I(\v{x}) - T(\v{x}) \in \ker{T} \\
& \iff & (I-T)(\v{x}) \in \ker{T} \\
& \iff & \v{x} \in \im{I-T}. \\
  \end{array} $$



\end{dingautolist}


\end{answer}

\end{pro}
\end{multicols}
\section{Matrix Representation} Let  $V, W$ be
two vector spaces over the same field $\BBF$. Assume that  $\dim V =
m$ and $\{\v{v}_i\}_{i\in [1;m]}$ is an ordered basis for $V$, and
that $\dim W = n$ and ${\mathscr A} = \{\v{w}_i\}_{i\in [1;n]}$ an
ordered basis for $W$. Then
$$ \begin{array}{ccccc}L(\v{v}_1) &  = & a_{11}\v{w}_1 +  a_{21}\v{w}_2 + \cdots +
a_{n1}\v{w}_n & = & \colvec{a_{11} \\ a_{21} \\ \vdots \\
a_{n1}}_{\mathscr A}
\\
L(\v{v}_2)  & =  & a_{12}\v{w}_1 +  a_{22}\v{w}_2 +
\cdots + a_{n2}\v{w}_n   & = & \colvec{a_{12} \\ a_{22} \\ \vdots \\
a_{n2}}_{\mathscr A}         \\
\vdots & \vdots &   \vdots & \vdots & \vdots  \\
 L(\v{v}_m) & = &
a_{1m}\v{w}_1 +  a_{2m}\v{w}_2 + \cdots +
a_{nm}\v{w}_n  & = & \colvec{a_{1n} \\ a_{2n} \\ \vdots \\
a_{nm}}_{\mathscr A}\\
\end{array}. $$
\begin{df}
The $n\times m$ matrix $$ M_L = \begin{bmatrix} a_{11} & a_{12} &
\cdots & a_{1n} \cr a_{21} & a_{12} & \cdots & a_{2n} \cr \vdots &
\vdots & \vdots & \vdots \cr
 a_{n1} &
a_{n2} & \cdots & a_{nm} \cr
\end{bmatrix}
$$formed by the column vectors above is called the {\em matrix representation of the linear map $L$ with respect to
the bases $\{\v{v}_i\}_{i\in [1;m]}, \{\v{w}_i\}_{i\in [1;n]}$.}
\end{df}

\begin{exa}
Consider $L:\BBR^3 \rightarrow \BBR^3$,
$$L\colvec{x \\ y \\ z} = \colvec{x - y - z \\ x + y + z \\ z } .$$
Clearly $L$ is a linear transformation.
\begin{enumerate}
 \item  Find the
matrix corresponding to $L$ under the standard ordered basis.
\item Find
the matrix corresponding to $L$ under the ordered basis $\colvec{1 \\ 0 \\
0}, \colvec{1 \\ 1 \\ 0}, \colvec{1 \\ 0
\\ 1},$ for both the domain and the image of $L$.

\end{enumerate}
\end{exa}
\begin{solu}
\begin{enumerate}
\item The matrix will be a $3\times 3$ matrix. We have $L\colvec{ 1 \\ 0  \\ 0 } = \colvec{ 1 \\ 1 \\
0 }$, $L\colvec{ 0 \\ 1 \\ 0 } = \colvec{ -1 \\ 1 \\ 0}$, and $L\colvec{0 \\
0\\ 1} = \colvec{-1 \\ 1 \\ 1}$, whence the desired matrix is
$$\begin{bmatrix} 1 & -1 & -1 \cr 1 & 1 & 1 \cr 0 & 0 & 1 \cr\end{bmatrix}.$$
\item Call this basis ${\mathscr  A}$. We have
$$L\colvec{ 1 \\ 0  \\ 0 } = \colvec{ 1 \\ 1 \\
0 } = 0 \colvec{ 1 \\ 0 \\
0 } + 1\colvec{ 1 \\ 1 \\
0 } + 0\colvec{ 1 \\ 0 \\
1 } = \colvec{ 0 \\ 1 \\
0 }_{\mathscr  A},$$ $$L\colvec{ 1\\ 1 \\ 0 } = \colvec{ 0 \\ 2 \\ 0} = -2\colvec{ 1 \\ 0 \\
0 } + 2\colvec{ 1 \\ 1 \\
0 } + 0\colvec{ 1 \\ 0 \\
1 } = \colvec{ -2 \\ 2 \\
0 }_{\mathscr  A},$$ and $$L\colvec{1 \\
0\\ 1} = \colvec{0 \\ 2 \\ 1} = -3\colvec{ 1 \\ 0 \\
0 } + 2\colvec{ 1 \\ 1 \\
0 }+ 1\colvec{ 1 \\ 0 \\
1} = \colvec{ -3 \\ 2 \\
1 }_{\mathscr  A},$$ whence the desired matrix is
$$\begin{bmatrix} 0 & -2 & -3 \cr 1 & 2 & 2 \cr 0 & 0 & 1 \cr\end{bmatrix}.$$
\end{enumerate}
\end{solu}
\begin{exa}
Let $\BBR_n[x]$ denote the set of polynomials with real coefficients
with degree at most $n$.
\begin{enumerate} \item  Prove that $$\fun{L}{p(x)}{p''(x)}{\BBR_3[x]}{\BBR_1[x]}
$$is a linear transformation. Here $p''(x)$ denotes the second derivative
of $p(x)$ with respect to $x$.  \item  Find the matrix of $L$ using
the ordered bases $\{1, x, x^2, x^3\}$ for $\BBR_3[x]$ and $\{1,
x\}$ for $\BBR_2[x]$.  \item  Find the matrix of $L$ using the
ordered bases $\{1, x, x^2, x^3\}$ for $\BBR_3[x]$ and $\{1, x +
2\}$ for $\BBR_1[x]$. \item Find a basis for $\ker{L}$ and find
$\dim\ker{L}$.
\item
 Find a basis for $\im{L}$ and find $\dim\im{L}$.
\end{enumerate}\end{exa}
\begin{solu}
\begin{enumerate}
\item Let $(p(x), q(x))\in(\BBR_3[x])^2$ and $\alpha\in\BBR$. Then
$$L(p(x) + \alpha q(x)) = (p(x) + \alpha q(x))'' = p''(x) + \alpha q''(x) = L(p(x)) + \alpha L(q(x)),$$
whence $L$ is linear. \item We have $$\begin{array}{ccccccccc}L(1)
& =
&  \dfrac{{\rm d}^2}{{\rm d}x^2} 1  & = & 0 & = & 0(1) + 0(x) & = & \colvec{0\\ 0}, \vspace{2mm}\\
L(x) &  = & \dfrac{{\rm d}^2}{{\rm d}x^2} x & = &  0 & = & 0(1) +
0(x) & = & \colvec{0\\ 0}, \vspace{2mm}\\  L(x^2) & = &
\dfrac{{\rm
d}^2}{{\rm d}x^2} x^2 &  = & 2 & = & 2(1) + 0(x) & = & \colvec{2\\
0},\vspace{2mm} \\  L(x^3) & = & \dfrac{{\rm d}^2}{{\rm d}x^2}
x^3& = & 6x & = & 0(1) + 6(x) & = & \colvec{0\\ 6},
\end{array}$$whence the matrix representation of $L$ under the standard
basis  is
$$\begin{bmatrix} 0 & 0 & 2 & 0 \cr 0 & 0 & 0 & 6 \cr
\end{bmatrix}. $$
\item We have $$\begin{array}{ccccccccc}L(1) & =
&  \dfrac{{\rm d}^2}{{\rm d}x^2} 1  & = & 0 & = & 0(1) + 0(x + 2) & = & \colvec{0\\ 0}, \vspace{2mm}\\
L(x) &  = & \dfrac{{\rm d}^2}{{\rm d}x^2} x & = &  0 & = & 0(1) +
0(x + 2) & = & \colvec{0\\ 0}, \vspace{2mm}\\  L(x^2) & = &
\dfrac{{\rm
d}^2}{{\rm d}x^2} x^2 &  = & 2 & = & 2(1) + 0(x + 2) & = & \colvec{2\\
0}, \vspace{2mm}\\  L(x^3) & = & \dfrac{{\rm d}^2}{{\rm d}x^2}
x^3& = & 6x & = & -12(1) + 6(x + 2) & = & \colvec{-12\\ 6},
\end{array}$$whence the matrix representation of $L$ under the standard
basis  is
$$\begin{bmatrix} 0 & 0 & 2 & -12 \cr 0 & 0 & 0 & 6 \cr
\end{bmatrix}. $$
\item Assume that $p(x) = a + bx + cx^2 + dx^3\in\ker{L}$. Then $$
0= L(p(x)) = 2c + 6dx, \ \ \ \forall x\in \BBR.$$ This means that $c
= d = 0$. Thus $a, b$ are free and $$\ker{L} = \{a + bx: (a,
b)\in\BBR^2\}.
$$Hence $\dim\ker{L} = 2$.
\item By the Dimension Theorem, $\dim\im{L} = 4 - 2 = 2$. Put
$q(x) = \alpha + \beta x + \gamma x^2 + \delta x^3$. Then
$$L(q(x)) = 2\gamma  + 6\delta (x) = (2\gamma)(1) + (6\delta)(x).
$$Clearly $\{1, x\}$ are linearly independent and span $\im{L}$.
Hence $$\im{L} = \span{1, x} = \BBR_1[x].$$
\end{enumerate}
\end{solu}
\begin{exa}\noindent \begin{enumerate} \item  A linear transformation $T:\BBR^3
\rightarrow \BBR^3 $ is such that
$$T(\v{i}) = \colvec{2 \\ 1\\ 1}; \ \ \ T(\v{j}) = \colvec{3 \\ 0\\ -1}.$$It is known that
$$\im{T} = \span{T(\v{i}), T(\v{j})}  $$and that $$\ker{T} = \span{\colvec{1\\ 2 \\ -1}}. $$
Argue that there must be $\lambda$  and $\mu $ such that
$$T(\v{k})  = \lambda T(\v{i})  + \mu T(\v{j}).$$

\item Find $\lambda$  and $\mu $, and hence, the matrix
representing $T$ under the standard ordered basis.
\end{enumerate} \end{exa}
\begin{solu}\begin{enumerate} \item Since $T(\v{k}) \in \im{T}$ and
$\im{T}$ is generated by $T(\v{i}) $ and $T(\v{k}) $ there must be
$(\lambda, \mu)\in\BBR^2$ with
$$ T(\v{k})  = \lambda T(\v{i})  + \mu T(\v{j}) = \lambda \colvec{2\\ 1 \\ 1}  + \mu\colvec{3 \\ 0 \\ -1}
= \colvec{2\lambda + 3\mu \\ \lambda \\ \lambda - \mu}.  $$ \item
The matrix of $T$ is $$\begin{bmatrix}T(\v{i}) & T(\v{j}) &
T(\v{k})\cr \end{bmatrix} = \begin{bmatrix} 2 & 3 &  2\lambda +
3\mu \cr 1 & 0 & \lambda \cr 1 & -1 &  \lambda - \mu\end{bmatrix}.
$$ Since $\colvec{1\\2\\ -1}\in\ker{T}$ we must have
$$\begin{bmatrix} 2 & 3 &  2\lambda + 3\mu \cr 1 & 0 & \lambda \cr
1 & -1 &  \lambda - \mu\end{bmatrix}\colvec{1\\2\\ -1} =
\colvec{0\\0 \\ 0}.   $$Solving the resulting system of linear
equations we obtain $\lambda = 1, \mu = 2$. The required matrix is
thus
$$\begin{bmatrix} 2 & 3 &  8 \cr 1 & 0 & 1\cr
1 & -1 &  -1\end{bmatrix}.$$
\end{enumerate}
\end{solu}
\begin{rem}
If the linear mapping $L:V \rightarrow W$, $\dim V = n, \dim W = m$
has matrix representation $A\in\mat{m\times n}{ \BBF }$,  then $\dim
\im{L} = \rank{A}$.
\end{rem}
\section*{\psframebox{Homework}}
\begin{pro}
Let $T:\BBR ^4\rightarrow \BBR^3$ be a linear transformations such
that
$$ T\colvec{1 \\ 1\\ 1\\ 1} = \colvec{0 \\ 0 \\ 1}, \qquad
 T\colvec{1 \\ 0\\ 1\\ 0} = \colvec{1 \\ 1 \\ -1}, \qquad
 T\colvec{1 \\ 1\\ 1\\ 0} = \colvec{0 \\ 0 \\ -1}, \qquad
 T\colvec{-1 \\ -2\\ 0\\ 0} = \colvec{1 \\ 1 \\ 1}.
  $$Find the matrix of $T$ with respect to the canonical bases. Find
  the dimensions and describe $\ker{T}$ and $\im{T}$.
\begin{answer}
Observe that
$$\colvec{a\\ b\\ c\\ d} = d\colvec{1\\ 1 \\ 1\\ 1} + (2a-c-b)\colvec{1 \\ 0\\ 1\\ 0} +(-d-2a+2c+b)\colvec{1 \\ 1\\ 1\\ 0}+(-a+c)\colvec{-1 \\ -2\\ 0\\ 0}.  $$
Hence
$$\begin{array}{lll}T\colvec{a\\ b\\ c\\ d} &  = &  dT\colvec{1\\ 1 \\ 1\\ 1} + (2a-c-b)T\colvec{1 \\ 0\\ 1\\ 0} +(-d-2a+2c+b)T\colvec{1 \\ 1\\ 1\\ 0}+(-a+c)T\colvec{-1 \\ -2\\ 0\\ 0}\\
&  = &  d\colvec{0 \\ 0\\ 1} + (2a-c-b)\colvec{1 \\ 1\\ -1} +(-d-2a+2c+b)\colvec{0\\ 0\\ -1}+(-a+c)\colvec{1 \\ 1\\ 1}\\
& = & \colvec{a-b\\ a-b \\ -a+2d}.
\end{array} $$
This gives
$$ T\colvec{1\\ 0 \\ 0 \\ 0} = \colvec{1\\ 1\\ -1}, \quad
T\colvec{0\\ 1 \\ 0 \\ 0} = \colvec{-1\\ -1\\ 0}, \quad T\colvec{0\\
0 \\ 1 \\ 0} = \colvec{0\\ 0\\ 0}, \quad T\colvec{0\\ 0 \\ 0 \\ 1} =
\colvec{0\\ 0\\ 2}. $$ The required matrix is therefore
$$\begin{bmatrix} 1 & -1 & 0 & 0 \cr 1 & -1 & 0 & 0 \cr -1 & 0 & 0 & 2 \cr \end{bmatrix}.  $$
This matrix has rank $2$, and so $\dim\im{T}=2$.  We can use
$\left\{\colvec{1\\ 1\\ -1}, \colvec{-1\\ -1\\ 0}\right\}$ as a
basis for $\im{T}$. Thus by the dimension theorem
$\dim\ker{T}=2$. If $\colvec{0\\ 0\\ 0}=T\colvec{a\\
b \\ c \\ d} = \colvec{a-b\\ a-b\\ -a+2d}$, Hence the vectors in
$\ker{T}$ have the form $\colvec{2d\\ 2d\\ c \\ d}$ and hence we may
take $\left\{\colvec{2\\ 2\\ 0 \\ 1}, \colvec{0\\ 0\\ 1 \\
0}\right\}$ as a basis for $\ker{T}$.
\end{answer}
\end{pro}
\begin{pro} \begin{enumerate} \item   A linear transformation $T:\BBR^3
\rightarrow \BBR^3$ has as image the plane with equation $x + y + z
= 0$ and as kernel the line $x = y = z$. If
$$T\colvec{1\\ 1\\ 2} =   \colvec{a\\ 0\\ 1}, \ \ \ T\colvec{2\\
1\\ 1} =   \colvec{3\\ b\\ -5},\ \ \  T\colvec{1\\ 2\\ 1} =
\colvec{-1\\ 2\\ c}.$$Find $a, b, c$. \item Find the matrix
representation of $T$ under the standard basis.

\end{enumerate}\begin{answer}\begin{enumerate} \item Since the image of $T$ is the plane $x +
y + z = 0$, we must have $$a + 0 + 1 = 0 \implies a = -1,
$$
$$3 + b - 5 = 0 \implies b = 2,  $$
$$-1 + 2 + c = 0 \implies c = -1.  $$
\item Observe that $\colvec{1\\ 1\\ 1}\in\ker{T}$ and so $$
T\colvec{1\\ 1\\ 1} =   \colvec{0\\ 0\\ 0}. $$ Thus
$$ T\colvec{1\\ 0\\ 0} = T\colvec{2\\ 1\\ 1} - T\colvec{1\\ 1\\ 1} =   \colvec{3\\ 2\\ -5},  $$
$$ T\colvec{0\\ 1\\ 0} = T\colvec{1\\ 2\\ 1} - T\colvec{1\\ 1\\ 1} =   \colvec{-1\\ 2\\ -1},  $$
$$ T\colvec{0\\ 0\\ 1} = T\colvec{1\\ 1\\ 2} - T\colvec{1\\ 1\\ 1} =   \colvec{-1\\ 0\\ 1}.  $$
The required matrix is therefore
$$\begin{bmatrix}3 & -1 & -1 \cr 2 & 2 & 0 \cr -5 & -1 & 1 \cr      \end{bmatrix}.   $$
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro} \begin{enumerate} \item  Prove that $T:\BBR^2
\rightarrow \BBR^3$ $$T\colvec{x \\ y} = \colvec{x + y \\ x - y \\
2x + 3y}   $$ is a linear transformation. \item  Find a basis for
$\ker{T}$ and find $\dim\ker{T}$ \item   Find a basis for $\im{T}$
and find  $\dim\im{T}$. \item  Find the matrix
of $T$ under the ordered bases ${\mathscr A} = \left\{\colvec{1 \\
2}, \colvec{1\\ 3}\right\}$ of $\BBR^2$ and ${\mathscr B} =
\left\{\colvec{1 \\ 1\\ 1}, \colvec{1\\ 0\\ -1}, \colvec{0\\ 1 \\
0}\right\}$ of $\BBR^3$.
\end{enumerate}\begin{answer} \begin{enumerate} \item Let $\alpha\in\BBR$. We have
$$ \begin{array}{lll}
T\left(\colvec{x \\ y}+ \alpha \colvec{u \\ v}\right) & = &  T\left(\colvec{x + \alpha u \\ y + \alpha v}\right) \\
& = & \colvec{x + \alpha u + y + \alpha v \\ x + \alpha u - y - \alpha v \\ 2(x + \alpha u) + 3(y + \alpha v)} \\
& = &  \colvec{x + y  \\ x - y  \\ 2x + 3y}
+   \alpha\colvec{u +  v \\ u - v \\ 2u + 3v} \\
& = &T\left(\colvec{x \\ y}\right) + \alpha T\left( \colvec{u \\
v}\right),
\end{array}$$
proving that $T$ is linear. \item We have $$T\left(\colvec{x \\
y}\right)  = \colvec{0\\ 0 \\ 0} \iff  \colvec{x + y  \\ x - y  \\
2x + 3y} =    \colvec{0\\ 0 \\ 0} \iff x = y = 0,$$$\dim\ker{T} =
0$, and whence $T$ is injective. \item By the Dimension Theorem,
$\dim \im{T} = 2 - 0 = 2$. Now, since
$$T\left(\colvec{x \\ y}\right)  = \colvec{x + y  \\ x - y  \\ 2x + 3y} =  x \colvec{1\\ 1\\ 2} + y\colvec{1 \\ -1 \\ 3}, $$
whence $$\im{T} = \span{\colvec{1\\ 1\\ 2}, \colvec{1 \\ -1 \\
3}}.  $$ \item We have
$$ T\left(\colvec{1 \\ 2}\right) = \colvec{3\\ -1\\ 8} = \frac{11}{2}\colvec{1 \\ 1\\ 1} - \frac{5}{2}\colvec{1 \\ 0\\ -1}
-\frac{13}{2}\colvec{0 \\ 1\\ 0} = \colvec{11/2 \\ -5/2\\
-13/2}_{\mathscr B}, $$ and
$$ T\left(\colvec{1 \\ 3}\right) = \colvec{4\\ -2\\ 11} = \frac{15}{2}\colvec{1 \\ 1\\ 1} - \frac{7}{2}\colvec{1 \\ 0\\ -1}
-\frac{19}{2}\colvec{0 \\ 1\\ 0} = \colvec{15/2 \\ -7/2\\
-19/2}_{\mathscr B}. $$ The required matrix is
$$\begin{bmatrix}11/2 & 15/2 \cr -5/2 & -7/2 \cr -13/2 & -19/2 \cr \end{bmatrix}_{\mathscr B}. $$
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro}
Let $$\fun{L}{\v{a}}{L(\v{a})}{\BBR^3}{\BBR^2},$$ where
$$L\colvec{x \\ y \\ z} = \colvec{x + 2y \\ 3x - z}.$$ Clearly $L$ is linear. Find a
matrix representation for $L$ if
\begin{enumerate}
\item The bases for both $\BBR^3$ and $\BBR^2$ are both the
standard ordered bases. \item The ordered basis for $\BBR^3$ is $\dis{\colvec{1 \\
0 \\ 0 }, \colvec{1 \\ 1 \\ 0 }, \colvec{1 \\ 1 \\ 1 }}$ and
$\BBR^2$ has the standard ordered basis . \item The ordered basis
for $\BBR^3$ is $\dis{\colvec{1
\\ 0 \\ 0 }, \colvec{1 \\ 1 \\ 0 }, \colvec{1 \\ 1
\\ 1 }}$ and
the ordered basis for $\BBR^2$ is $\dis{{\mathscr  A}  = \left\{ \colvec{1 \\ 0}, \colvec{1 \\
1}\right\}}$.
\end{enumerate}
\begin{answer} The matrix will be a $2\times 3$ matrix. In each case, we
find the action of $L$ on the basis elements of $\BBR^3$ and express
the result in the given basis for $\BBR^3$.
\begin{enumerate}
\item We have
$$L\left(\colvec{1 \\ 0 \\ 0 }\right) = \colvec{1 \\ 3}, L\left(\colvec{0 \\ 1 \\ 0 }\right) = \colvec{2 \\ 0},
L\left(\colvec{0 \\ 0 \\ 1 }\right) = \colvec{0 \\ -1}. $$The
required matrix is
$$\begin{bmatrix} 1 & 2 & 0 \cr 3 & 0 & -1 \cr \end{bmatrix}.$$
\item We have
$$L\left(\colvec{1 \\ 0 \\ 0 }\right) = \colvec{1 \\ 3}, L\left(\colvec{1 \\ 1 \\ 0 }\right) = \colvec{3 \\ 3},
L\left(\colvec{1 \\ 1 \\ 1 }\right) = \colvec{3 \\ 2}. $$The
required matrix is
$$\begin{bmatrix} 1 & 3 & 3 \cr 3 & 3 & 2 \cr \end{bmatrix}.$$
\item We have
$$L\left(\colvec{1 \\ 0 \\ 0 }\right) = \colvec{1 \\ 3} = -2\colvec{1 \\
0} + 3\colvec{1 \\ 1} = \colvec{-2 \\ 3}_{\mathscr  A} ,$$

$$L\left(\colvec{1 \\ 1 \\ 0 }\right) = \colvec{3 \\ 3} = 0\colvec{1 \\
0} + 3\colvec{1 \\ 1} = \colvec{0 \\ 3}_{\mathscr  A}, $$
$$
L\left(\colvec{1 \\ 1 \\ 1 }\right) = \colvec{3 \\ 2} = 1\colvec{1 \\
0} + 2\colvec{1 \\ 1} = \colvec{1 \\ 2}_{\mathscr  A}. $$The
required matrix is
$$\begin{bmatrix} -2 & 0 & 1 \cr 3 & 3 & 2 \cr \end{bmatrix}.$$
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro} A linear transformation $T:\BBR^2 \rightarrow \BBR^2$
satisfies $\ker{T} = \im{T}$, and $T\colvec{1\\ 1} = \colvec{2 \\
3}$. Find the matrix representing $T$ under the standard ordered
basis. \begin{answer} Observe that $\colvec{2 \\ 3}\in \im{T} =
\ker{T}$ and so
$$T\colvec{2 \\ 3}  = \colvec{0 \\ 0}.  $$Now
$$T\colvec{1 \\ 0}  = T\left( 3\colvec{1 \\ 1} - \colvec{2 \\ 3}\right) =  3T\colvec{1 \\ 1} - T\colvec{2 \\ 3} = \colvec{6 \\ 9},$$
and
$$T\colvec{0 \\ 1}  = T\left( \colvec{2 \\ 3} - 2\colvec{1 \\ 1} \right) =
T\colvec{2 \\ 3}- 2T\colvec{1 \\ 1}  = \colvec{-4 \\ -6}.$$ The
required matrix is thus
$$\begin{bmatrix}6 & -4 \cr 9 & -6 \cr  \end{bmatrix}.  $$
\end{answer}
\end{pro}



\begin{pro}
Find the matrix representation for the linear map
$$\fun{L}{A}{\tr{A}}{\mat{2\times 2}{\BBR}}{\BBR},
$$under the standard basis $${\mathscr A} = \left\{\begin{bmatrix} 1 & 0 \cr 0 & 0 \cr \end{bmatrix},
\begin{bmatrix} 0 & 1 \cr 0 & 0 \cr \end{bmatrix}, \begin{bmatrix} 0 & 0 \cr 1 & 0 \cr \end{bmatrix},
\begin{bmatrix} 0 & 0 \cr 0 & 1 \cr \end{bmatrix}  \right\}$$for
$\mat{2\times 2}{\BBR}$. \begin{answer} The matrix will be a
$1\times 4$ matrix. We have
$$\tr{\begin{bmatrix} 1 & 0 \cr 0 & 0 \cr \end{bmatrix}} = 1,
$$
$$\tr{\begin{bmatrix} 0 & 1 \cr 0 & 0 \cr \end{bmatrix}} = 0,
$$
$$\tr{\begin{bmatrix} 0 & 0 \cr 1 & 0 \cr \end{bmatrix}} = 0,
$$
$$\tr{\begin{bmatrix} 0 & 0 \cr 0 & 1 \cr \end{bmatrix}} = 1.
$$
Thus $$ M_L = (1 \ \ 0 \ \ 0 \ \ 1).    $$
\end{answer}
\end{pro}
\begin{pro}
Let $A\in\mat{n\times p}{\BBR}$, $B\in\mat{p\times q}{\BBR}$, and
$C\in\mat{q\times r}{\BBR}$, be such that $\rank{B} = \rank{AB}$.
Shew that
$$\rank{BC} = \rank{ABC}.$$ \begin{answer} First observe that
$\ker{B} \subseteq \ker{AB}$ since $\forall X \in\mat{q\times
1}{\BBR}$,
$$BX = 0 \implies (AB)X = A(BX) = 0.$$Now
$$\begin{array}{lll}\dim\ker{B} &  = &  q - \dim\im{B} \\ & = & q - \rank{B} \\ &  = &  q - \rank{AB} \\ &  = &  q - \dim\im{AB} \\
&  = &  \dim \ker{AB}. \end{array}$$ Thus $\ker B = \ker{AB}.$
Similarly, we can demonstrate that $\ker{ABC} = \ker{BC}.$ Thus
$$ \begin{array}{lll}\rank{ABC} & = &  \dim\im{ABC} \\ & = &  r - \dim\ker{ABC} \\ &  = &  r - \dim\ker{BC} \\ &  = & \dim\im{BC} \\
&  = &  \rank{BC}.\end{array}$$
\end{answer}
\end{pro}
\chapter{Determinants}

\section{Permutations}
\begin{df}
Let $S$ be a finite set with $n \geq 1$ elements. A {\em
permutation} is a bijective function $\tau : S \rightarrow S$. It
is easy to see that there are $n!$ permutations from $S$ onto
itself. \index{permutation}
\end{df}
Since we are mostly concerned with the {\em action} that $\tau$
exerts on $S$ rather than with the particular names of the
elements of $S$, we will take $S$ to be the set $S = \{1, 2, 3,
\ldots , n \}$. We indicate a permutation $\tau$ by means of the
following convenient diagram
$$\tau = \begin{bmatrix} 1 & 2 & \cdots & n \cr \tau (1) & \tau (2) & \cdots & \tau (n) \end{bmatrix}.$$
\begin{df}
The notation $S_n$ will denote the set of all permutations on
$\{1, 2, 3, \ldots , n \}$. Under this notation, the  composition
of two permutations $(\tau, \sigma)\in S_n ^2$ is
$$ \begin{array}{lll}\tau\circ \sigma & = & \begin{bmatrix} 1 & 2 & \cdots & n \cr \tau (1) & \tau (2) & \cdots & \tau (n) \end{bmatrix}
\circ\begin{bmatrix} 1 & 2 & \cdots & n \cr \sigma (1) & \sigma
(2) & \cdots & \sigma (n) \end{bmatrix} \\ &  = &
\begin{bmatrix} 1 & 2 & \cdots & n \cr (\tau \circ \sigma)  (1) & (\tau \circ \sigma)  (2) & \cdots &
(\tau \circ \sigma) (n) \end{bmatrix}\end{array}.  $$ The $k$-fold
composition of $\tau$ is
$$ \underbrace{\tau \circ \cdots \circ \tau}_{k\ {\rm compositions}} = \tau^k.       $$

\end{df}

\begin{rem}
We usually do away with the $\circ$ and write $\tau\circ\sigma$
simply as $\tau\sigma$. This ``product of permutations'' is thus
simply function composition.
\end{rem}
 Given a permutation
$\tau: S \rightarrow S$, since $\tau$ is bijective,
$$\tau ^{-1}: S \rightarrow S$$ exists and
is also a permutation. In fact if
$$\tau = \begin{bmatrix} 1 & 2 & \cdots & n \\ \tau (1) & \tau (2) & \cdots & \tau (n) \end{bmatrix},$$
then
$$\tau^{-1} = \begin{bmatrix}\tau (1) & \tau (2) & \cdots & \tau (n) \\ 1 & 2 & \cdots & n \\  \end{bmatrix}.$$
\vspace{2cm}
\begin{figure}[htb]
$$\psset{unit=2pc}\psline[linewidth=2pt](-1, 0)(1, 0)(0, 1.4142135623730950488016887242097)(-1,0)
\psline[linestyle=dotted](0,1.4142135623730950488016887242097)(0,0)
\uput[u](0, 1.4142135623730950488016887242097){1}
\psline[linestyle=dotted](-1,0)(.5,
0.70710678118654752440084436210485) \uput[l](-1, 0){2}
\psline[linestyle=dotted](1,0)(-.5,
0.70710678118654752440084436210485) \uput[r](1,0){3}
$$\vspace{1cm}\caption{$S_3$ are rotations and reflexions.} \label{fig:S_3}
\end{figure}

\begin{exa}
The set $S_3$ has  $3! = 6$ elements, which are given below.
\begin{enumerate}
\item $\idefun: \{1, 2, 3\} \rightarrow \{1, 2, 3\}$ with
$$\idefun  = \begin{bmatrix} 1 & 2 & 3 \\  1 & 2 & 3 \end{bmatrix}.$$
\item $\tau_1: \{1, 2, 3\} \rightarrow \{1, 2, 3\} $ with
$$\tau_1  = \begin{bmatrix}1 & 2 & 3 \\  1 & 3 & 2 \end{bmatrix}.$$

\item $\tau_2: \{1, 2, 3\}  \rightarrow \{1, 2, 3\} $ with
$$\tau_2  = \begin{bmatrix} 1 & 2 & 3 \\ 3 & 2 & 1 \end{bmatrix}.$$
\item $\tau_3: \{1, 2, 3\}  \rightarrow \{1, 2, 3\} $ with
$$\tau_3  = \begin{bmatrix} 1 & 2 & 3 \\ 2 & 1 & 3 \end{bmatrix}.$$

\item $\sigma_1: \{1, 2, 3\}  \rightarrow \{1, 2, 3\} $ with
$$\sigma_1  = \begin{bmatrix} 1 & 2 & 3 \\ 2 & 3 & 1 \end{bmatrix}.$$

\item $\sigma_2: \{1, 2, 3\} \rightarrow \{1, 2, 3\} $ with
$$\sigma_2  = \begin{bmatrix} 1 & 2 & 3 \\ 3 & 1 & 2 \end{bmatrix}.$$

\end{enumerate}
\label{ex:S_3}\end{exa}

\begin{exa}
The compositions $\tau_1 \circ \sigma_1$ and $\sigma_1\circ
\tau_1$ can be found as follows.
$$ \tau_1 \circ \sigma_1  = \begin{bmatrix} 1 & 2 & 3 \\  1 & 3 & 2 \end{bmatrix}\circ
\begin{bmatrix} 1 & 2 & 3 \\ 2 & 3 & 1
\end{bmatrix} = \begin{bmatrix}1 & 2 & 3 \\ 3 & 2 & 1 \end{bmatrix} = \tau_2 .$$
(We read from right to left $1 \rightarrow 2 \rightarrow 3$ (``1
goes to 2, 2 goes to 3, so 1 goes to 3''), etc. Similarly
$$ \sigma_1 \circ \tau_1  = \begin{bmatrix} 1 & 2 & 3 \\ 2 & 3 & 1
\end{bmatrix}\circ\begin{bmatrix}1 & 2 & 3 \\  1 & 3 & 2 \end{bmatrix}
 = \begin{bmatrix} 1 & 2 & 3 \\ 2 & 1 & 3 \end{bmatrix} =
 \tau_3.$$Observe in particular that $\sigma_1 \circ \tau_1  \neq \tau_1 \circ \sigma_1 .$
 Finding all the other products we deduce the following
 ``multiplication table'' (where the ``multiplication'' operation
 is really composition of functions).

$$\begin{array}{|c||c|c|c|c|c|c|}
\hline \circ & \idefun & \tau_1 & \tau_2 & \tau_3  & \sigma_1 & \sigma_2\\
\hline \hline  \idefun & \idefun & \tau_1  & \tau_2  & \tau_3 & \sigma_1 & \sigma_2   \\
\hline  \tau_1 & \tau_1 &   \idefun & \sigma_1 &\sigma_2  & \tau_2 & \tau_3   \\
\hline \tau_2 & \tau_2 & \sigma_2  & \idefun & \sigma_1 & \tau_3&\tau_1 \\
\hline \tau_3 & \tau_3 & \sigma_1& \sigma_2 & \idefun &\tau_1 & \tau_2 \\
\hline \sigma_2 & \sigma_2 & \tau_2& \tau_3& \tau_1& \idefun & \sigma_1 \\
\hline \sigma_1 & \sigma_1 &  \tau_3 & \tau_1 & \tau_2& \sigma_2 &\idefun \\
\hline
\end{array}$$
\label{ex:tau}\end{exa}

The permutations in example \ref{ex:S_3} can be conveniently
interpreted as follows. Consider an equilateral triangle with
vertices labelled $1$, $2$ and $3$, as in figure \ref{fig:S_3}.
Each $\tau_a$ is a reflexion (``flipping'') about the line joining
the vertex $a$ with the midpoint of the side opposite $a$. For
example $\tau_1$ fixes $1$ and flips $2$ and $3$. Observe that two
successive flips return the vertices to their original position
and so $(\forall a\in\{1, 2, 3\})(\tau_a ^2 = \idefun)$.
Similarly, $\sigma_1$ is a rotation of the vertices by an angle of
$120^\circ$. Three successive rotations restore the vertices to
their original position and so $\sigma_1 ^3 = \idefun$.


\begin{exa}
To find $\tau_1^{-1}$ take the representation of $\tau_1$ and
exchange the rows:
$$ \tau_1^{-1}  = \begin{bmatrix}1 & 3 & 2 \\  1 & 2 & 3 \\  \end{bmatrix}.$$
This is more naturally written as
$$ \tau_1^{-1}  = \begin{bmatrix}1 & 2 & 3 \\  1 & 3 & 2 \\  \end{bmatrix}.$$
Observe that $\tau_1^{-1} = \tau _1$.
\end{exa}
\begin{exa}
To find $\sigma_1^{-1}$ take the representation of $\sigma_1$ and
exchange the rows:
$$ \sigma_1^{-1}  = \begin{bmatrix}2 & 3 & 1 \\  1 & 2 & 3 \\  \end{bmatrix}.$$
This is more naturally written as
$$ \sigma_1^{-1}  = \begin{bmatrix}1 & 2 & 3 \\  3 & 1 & 2 \\  \end{bmatrix}.$$
Observe that $\sigma_1^{-1} = \sigma_2$.
\end{exa}




\section{Cycle Notation} We now present a
shorthand notation for permutations by introducing the idea of a
{\em cycle}. Consider in $S_9$ the permutation
$$\tau = \begin{bmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 \\
2 & 1 & 3 & 6 & 9 & 7 & 8 & 4 & 5 \end{bmatrix}.$$ We start with
1. Since 1 goes to 2 and 2 goes back to 1, we write $(12)$. Now we
continue with 3. Since 3 goes to 3, we write $(3)$. We continue
with 4. As 4 goes 6, 6 goes to 7, 7 goes 8, and 8 goes back to 4,
we write $(4678)$. We consider now $5$ which goes to 9 and 9 goes
back to 5, so we write $(59)$. We have written $\tau$ as a product
of disjoint cycles
$$\tau = (12)(3)(4678)(59).$$This prompts the following
definition.
\begin{df}
Let $l\geq 1$ and let $i_1,\ldots,i_l\in\{1,2,\ldots n\}$ be
distinct. We write  $(i_1\ i_2\ \ldots\ i_l)$ for the element
$\sigma\in S_n$ such that $\sigma(i_r)=i_{r+1}, \ \ 1\leq r<l$,
$\sigma(i_l)=i_1$ and  $\sigma(i)=i$ for
$i\not\in\{i_1,\ldots,i_l\}$. We say that  $(i_1\ i_2\ \ldots\
i_l)$ is a {\em cycle of length $l$}. The {\em order} of a cycle
is its length. Observe that if $\tau$ has order $l$ then $\tau^l =
\idefun$.\end{df}
\begin{rem}
Observe that $(i_2\ \ldots\ i_l\ i_1)=(i_1\ \ldots\ i_l)$ etc.,
and that $(1)=(2)=\cdots=(n)= \idefun$. In fact, we have
$$(i_1\ \ldots\ i_l)=(j_1\ \ldots\ j_m)$$ if and only if (1) $l=m$
and if  (2) $l>1$: $\exists a$ such that $\forall k$: $i_k =
j_{k+a \mod l}$. Two cycles $(i_1,\ldots,i_l)$ and
$(j_1,\ldots,j_m)$ are disjoint if $\{i_1, \ldots ,
i_l\}\cap\{j_1,  \ldots , j_m\}=\varnothing$. Disjoint cycles
commute and if $\tau = \sigma_1\sigma_2 \cdots \sigma_t$ is the
product of disjoint cycles of length $l_1, l_2, \ldots , l_t$
respectively, then $\tau$ has order
$$ \lcm{l_1, l_2, \ldots , l_t}.$$
\end{rem}
\begin{exa}
A cycle decomposition for $\alpha\in S_9,$
$$\alpha = \begin{bmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 \\
1 & 8 & 7 & 6 & 2 & 3 & 4 & 5 & 9 \end{bmatrix}$$ is
$$(285)(3746).$$ The order of $\alpha$ is $\lcm{3, 4} = 12$.
\label{ex:alpha}\end{exa}
\begin{exa}
The cycle decomposition $\beta = (123)(567)$ in $S_9$ arises from
the permutation
$$\beta = \begin{bmatrix}1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 \\
2 & 3 & 1 & 4 & 6 & 7 & 5 & 8 & 9 \\ \end{bmatrix}.$$ Its order is
$\lcm{3,3} = 3$. \label{ex:beta}\end{exa}
\begin{exa}

Find a shuffle of a deck of $13$ cards that requires $42$ repeats
to return the cards to their original order.
\end{exa}
\begin{solu}Here is one (of many possible ones). Observe that $7 + 6
= 13$ and $7\times 6 = 42$. We take the permutation
$$(1\ 2\ 3\ 4\ 5\ 6\ 7)(8\ 9\ 10\ 11\ 12\ 13)$$which has order 42.
This corresponds to the following shuffle: For $$i\in \{1, 2, 3,
4, 5, 6, 8, 9, 10, 11, 12\},$$ take the $i$th card to the ($i +
1$)th place, take the $7$th card to the first position and the
13th card to the $8$th position. Query: Of all possible shuffles
of 13 cards, which one takes the longest to restitute the cards to
their original position?
\end{solu}
\begin{exa}
Let a shuffle of a deck of 10 cards be made as follows: The top
card is put at the bottom, the deck is cut in half, the bottom
half is placed on top of the top half, and then the resulting
bottom card is put on top. How many times must this shuffle be
repeated to get the cards in the initial order? Explain.
\end{exa}
\begin{solu}Putting the top card at the bottom corresponds to
$$\begin{bmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7  & 8 & 9 & 10\cr
2 & 3 & 4 & 5 & 6 & 7  & 8 & 9 & 10 & 1 \cr
\end{bmatrix}.
$$ Cutting this new arrangement in half and putting the lower half on top corresponds to
$$\begin{bmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7  & 8 & 9 & 10\cr
 7  & 8 & 9 & 10 & 1 & 2 & 3 & 4 & 5 & 6 \cr
\end{bmatrix}.
$$
Putting the bottom card of this new arrangement on top corresponds
to
$$\begin{bmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7  & 8 & 9 & 10\cr
  6 & 7  & 8 & 9 & 10 & 1 & 2 & 3 & 4 & 5  \cr
\end{bmatrix} = (1\ 6)(2\ 7)(3\ 8)(4\ 9)(5\ 10).
$$The order of this permutation is ${\rm lcm} (2, 2, 2, 2, 2) = 2$, so
in 2 shuffles the cards are restored to their original position.
\end{solu}
The above examples illustrate the general case, given in the
following theorem.
\begin{thm}
Every permutation in $S_n$ can be written as a product of disjoint
cycles.\label{thm:permu_is_prod_cycles}
\end{thm}
\begin{pf}
Let $\tau \in S_n, a_1\in\{1, 2, \ldots, n\}$. Put $\tau ^k (a_1)
= a_{k + 1}, k \geq 0$. Let $a_1, a_2, \ldots , a_s$ be the
longest chain with no repeats. Then we have $\tau (a_s) = a_1$. If
the  $\{a_1, a_2, \ldots , a_s\}$ exhaust $\{1, 2, \ldots, n\}$
then we have $\tau = (a_1\ a_2\ \ldots\ a_s)$. If not, there exist
$b_1\in \{1, 2, \ldots, n\} \setminus\{a_1, a_2, \ldots , a_s\}$.
Again, we find the longest chain of distinct $b_1, b_2, \ldots,
b_t$ such that $\tau (b_k) = b_{k + 1}, k = 1, \ldots, t -1$ and
$\tau (b_t) = b_1.$ If  the $\{a_1, a_2, \ldots , a_s, b_1, b_2,
\ldots, b_t\}$ exhaust all the $\{1, 2, \ldots, n\}$ we have $\tau
= (a_1\ a_2\ \ldots\ a_s)( b_1\ b_2\ \ldots\ b_t)$. If not we
continue the process and find $$\tau = (a_1\ a_2\ \ldots\ a_s)(
b_1\ b_2\ \ldots\ b_t)(c_1\ldots )\ldots . $$This process stops
because we have only $n$ elements.
\end{pf}
\begin{df}
A {\em transposition} is a cycle of length $2$.\footnote{A cycle
of length $2$ should more appropriately be called a {\em
bicycle}.}
\end{df}


\begin{exa}
The cycle $(23468)$ can be written as a product of transpositions
as follows
$$(23468) = (28)(26)(24)(23).$$ Notice that this decomposition as
the product of transpositions is not unique. Another decomposition
is $$(23468) = (23)(34)(46)(68).$$
\end{exa}
\begin{lem}
Every permutation is the product of transpositions.
\label{lem:permu_is_prod_bicycles}\end{lem}
\begin{pf}
It is enough to observe that
$$ (a_1\ a_2\ \ldots\ a_s) = (a_1\ a_s)(a_1\ a_{s - 1}) \cdots (a_1\ a_2)
$$and appeal to Theorem \ref{thm:permu_is_prod_cycles}.
\end{pf}
Let  $\sigma\in S_n$ and let $(i, j)\in \{1, 2, \ldots, n\}^2, \ \
i\neq j$. Since $\sigma$ is a permutation, $\exists (a, b)\in \{1,
2, \ldots, n\}^2, \ \  a\neq b$, such that $\sigma (j) - \sigma
(i) = b - a$. This means that $$ \left|\prod_{1\leq i<j\leq n}
\frac{\sigma(i)-\sigma(j)}{i-j}\right| = 1 . $$
\begin{df} Let  $\sigma\in S_n$. We define the {\em sign} $\sgn\sigma$ of $\sigma$ as
$$\sgn{\sigma} = \prod_{1\leq i<j\leq n}
\frac{\sigma(i)-\sigma(j)}{i-j} = (-1)^{\sigma}.$$ If
$\sgn{\sigma} = 1$, then we say that $\sigma$ is an {\em even
permutation}, and if $\sgn{\sigma} = -1$ we say that $\sigma$ is
an {\em odd permutation}.
\end{df}
\begin{rem}
Notice that in fact $$\sgn{\sigma} =(-1)^{{\bf I}(\sigma)},$$where
${\bf I}(\sigma)=\#\{(i,j)\,|\,1\leq i<j\leq n\;{\rm and
}\;\sigma(i)>\sigma(j)\}$, i.e., ${\bf I}(\sigma)$ is the number
of inversions that $\sigma$ effects to the identity permutation
$\idefun$.\end{rem}
\begin{exa}
The transposition $(1\ 2)$ has one inversion.
\end{exa}
\begin{lem}
 For any transposition $(k\ l)$ we have $\sgn{(k\ l)} = -1$.
\end{lem}
\begin{pf}
Let $\tau$ be transposition that exchanges $k$ and $l$, and assume
that $k<l$:
$$
 \tau=
\begin{bmatrix}
 1&2&\dots&k-1&k&k+1&\dots&l-1&l&l+1&\dots&n\\
 1&2&\dots&k-1&l&k+1&\dots&l-1&k&l+1&\dots&n
\end{bmatrix}
$$
Let us count the number of inversions of $\tau$:
\begin{itemize}
 \item The pairs $(i,j)$ with $i\in\{1, 2, \ldots, k - 1\}\cup\{l, l + 1, \ldots, n\}$ and $i<j$
 do not suffer an inversion;
 \item The pair $(k,j)$ with $k<j$ suffers an inversion if and only if
 $j\in \{k+1, k + 2, \ldots , l\}$, making  $l-k$
inversions;
 \item If $i\in\{k+1, k + 2, \ldots , l-1\}$ and $i<j$, $(i,j)$
 suffers an
inversion if and only if $j=l$, giving $l-1-k$ inversions.
\end{itemize}
This gives a total of ${\bf I}(\tau)=(l-k)+(l-1-k)=2(l-k-1)+1$
inversions when $k < l$. Since this number is odd, we have
$\sgn{\tau}=(-1)^{{\bf I}(\tau)}=-1$. In general we see that the
transposition $(k\ l)$ has $2|k - l| - 1$ inversions.
\end{pf}
\begin{thm} \label{thm:signum_is_a_homomorphism} Let $(\sigma, \tau)\in S_n
^2$. Then
$$ \sgn{\tau\sigma} = \sgn{\tau}\sgn{\sigma}. $$
\end{thm}
\begin{pf}
We have
$$\begin{array}{lll}
\sgn{\sigma\tau} & = &  \prod_{1\leq i<j\leq n}
\frac{(\sigma\tau)(i)-(\sigma\tau)(j)}{i-j} \\ & = &
\left(\prod_{1\leq i<j\leq n}
\frac{\sigma(\tau(i))-\sigma(\tau(j))}
{\tau(i)-\tau(j)}\right)\cdot \left(\prod_{1\leq i<j\leq n}
\frac{\tau(i)-\tau(j)}{i-j}\right). \end{array}
$$
The second factor on this last equality is clearly  $\sgn{\tau}$,
we must shew that the first factor is $\sgn{\sigma}$. Observe now
that for  $1\leq a<b\leq n$ we have
$$
\frac{\sigma(a)-\sigma(b)}{a-b}=\frac{\sigma(b)-\sigma(a)}{b-a}.
$$Since
$\sigma$ and $\tau$ are permutations, $\exists b \neq a, \ \tau
(i) = a, \tau (j) = b$ and so $\sigma\tau (i) = \sigma (a),
\sigma\tau (j) = b$. Thus
$$ \frac{\sigma(\tau(i))-\sigma(\tau(j))}
{\tau(i)-\tau(j)} = \frac{\sigma (a) - \sigma (b)}{a - b} $$ and
so
$$\prod_{1\leq
i<j\leq n} \frac{\sigma(\tau(i))-\sigma(\tau(j))}
{\tau(i)-\tau(j)} = \prod_{1\leq a<b\leq n}
\frac{\sigma(a)-\sigma(b)} {a - b} = \sgn{\sigma}.$$
\end{pf}
\begin{cor}
The identity permutation is even. If $\tau\in S_n$, then
$\sgn{\tau} = \sgn{\tau^{-1}}$.
\label{cor:idefun_is_even}\end{cor}
\begin{pf}
Since there are no inversions in $\idefun$, we have $\sgn{\idefun}
= (-1)^0 = 1$. Since $\tau\tau^{-1} = \idefun$, we must have $1 =
\sgn{\idefun} = \sgn{\tau\tau^{-1}} = \sgn{\tau}\sgn{\tau^{-1}} =
(-1)^\tau (-1)^{\tau^{-1}}$ by Theorem
\ref{thm:signum_is_a_homomorphism}. Since the values on the
righthand of this last equality are $\pm 1$, we must have
$\sgn{\tau}=\sgn{\tau^{-1}}$.
\end{pf}
\begin{lem}
We have $\sgn{1\ 2\ \ldots\ l)}=(-1)^{l-1}$.
\label{lem:inv_in_1_through_l}\end{lem}
\begin{pf}
Simply observe that the number of inversions of $(1\ 2\ \ldots\
l)$ is $l- 1$.
\end{pf}
\begin{lem} \label{lem:signum_through_cycles}
Let $(\tau, (i_1\ \ldots\ i_l)\in S_n ^2$. Then
$$
\tau(i_1\ \ldots\ i_l)\tau^{-1} = (\tau(i_1)\ \ldots\
\tau(i_l)),$$ and if $\sigma\in S_n$ is a  cycle of length $l$
then
$$\sgn{\sigma}=(-1)^{l-1}$$.\\
\end{lem}
\begin{pf}
For $1\leq k<l$ we have $(\tau(i_1\ \ldots\
i_l)\tau^{-1})(\tau(i_k)) = \tau((i_1\ \ldots\ i_l)(i_k)) =
\tau(i_{k+1})$. On a $(\tau(i_1\ \ldots\ i_l)\tau^{-1})(\tau(i_l))
= \tau((i_1\ \ldots\ i_l)(i_l)) = \tau(i_1)$. For
$i\not\in\{\tau(i_1)\ \ldots\ \tau(i_l)\}$ we have
$\tau^{-1}(i)\not\in\{i_1\ \ldots\ i_l\}$ whence $(i_1\ \ldots\
i_l)(\tau^{-1}(i))=\tau^{-1}(i)$ etc.

\bigskip

Furthermore, write $\sigma=(i_1\ \ldots\ i_l)$. Let $\tau\in S_n$
be such that  $\tau(k)=i_k$ for $1\leq k\leq l$. Then
$\sigma=\tau(1\ 2\ \ldots\ l)\tau^{-1}$ and so we must have
$\sgn{\sigma} = \sgn{\tau}\sgn{(1\ 2\ \ldots\ l)}\sgn{\tau^{-1}}$,
which equals  $\sgn{(1\ 2\ \ldots\ l)}$ by virtue of Theorem
\ref{thm:signum_is_a_homomorphism} and Corollary
\ref{cor:idefun_is_even}. The result now follows by appealing to
Lemma \ref{lem:inv_in_1_through_l}
\end{pf}
\begin{cor}\label{cor:signum_through_bikes}
Let $\sigma=\sigma_1\sigma_2\cdots\sigma_r$ be a product of
disjoint cycles, each of length  $l_1,\ldots,l_r$, respectively.
Then
$$
\sgn{\sigma} = (-1)^{\sum_{i=1}^r (l_i-1)}.
$$Hence, the product of two even permutations is even, the product
of two odd permutations is even, and the product of an even
permutation and an odd permutation is odd.
\end{cor}
\begin{pf}
This follows at once from Theorem
\ref{thm:signum_is_a_homomorphism} and Lemma
\ref{lem:signum_through_cycles}.
\end{pf}
\begin{exa}
The cycle $(4678)$ is an odd cycle; the cycle $(1)$ is an even
cycle; the cycle $(12345)$ is an even cycle.
\end{exa}
\begin{cor}\label{cor:permu_is_prod_of_bikes}
Every permutation can be decomposed as a product of
transpositions. This decomposition is not necessarily unique, but
its parity is unique.
\end{cor}
\begin{pf}
This follows from Theorem \ref{thm:permu_is_prod_cycles}, Lemma
\ref{lem:permu_is_prod_bicycles}, and Corollary
\ref{cor:signum_through_bikes}.
\end{pf}
\begin{exa}[The $15$ puzzle] Consider a grid with $16$ squares, as shewn in (\ref{eq:15_puzzle_1}), where $15$ squares are numbered $1$ through $15$ and the 16th slot is
empty.
\begin{equation}\label{eq:15_puzzle_1}
\begin{array}{|c|c|c|c|}
\hline 1 & 2 & 3 & 4 \\ \hline 5 & 6 & 7 & 8 \\ \hline 9 & 10 & 11
& 12 \\ \hline 13 & 14 & 15 & \\ \hline
\end{array}\end{equation}
In this grid we may successively exchange the empty slot with any
of its neighbours, as for example
\begin{equation}\label{eq:15_puzzle_2}
\begin{array}{|c|c|c|c|}
\hline 1 & 2 & 3 & 4 \\ \hline 5 & 6 & 7 & 8 \\ \hline 9 & 10 & 11
& 12 \\ \hline 13 & 14 &  & 15 \\ \hline
\end{array}.
\end{equation}We ask whether through a series of valid moves we may arrive at the following position.
\begin{equation}\label{eq:15_puzzle_3}
\begin{array}{|c|c|c|c|}
\hline 1 & 2 & 3 & 4 \\ \hline 5 & 6 & 7 & 8 \\ \hline 9 & 10 & 11
& 12 \\ \hline 13 & 15 & 14 &  \\ \hline
\end{array}
\end{equation}
\end{exa}
\begin{solu}Let us shew that this is impossible. Each time we move a
square to the empty position, we make transpositions on the set
$\{1,2,\ldots,16\}$. Thus at each move, the permutation is
multiplied by a transposition and hence it changes sign. Observe
that the permutation corresponding to the square in
(\ref{eq:15_puzzle_3}) is  $(14\ 15)$ (the positions 14th and 15th
are transposed) and hence it is an odd permutation. But we claim
that the empty slot can only return to its original position after
an even permutation. To see this paint the grid as a checkerboard:
\begin{equation}\label{eq:15_puzzle_4}
\begin{array}{|c|c|c|c|} \hline
B & R & B & R\\ \hline R & B & R & B \\ \hline B & R & B & R\\
\hline R & B & R & B \\ \hline
\end{array}
\end{equation}
We see that after each move, the empty square changes from black
to red, and thus after an odd number of moves the empty slot is on
a red square. Thus the empty slot cannot return to its original
position in an odd number of moves. This completes the proof.
\end{solu}
\section*{\psframebox{Homework}}

\begin{pro}
Decompose the permutation
$$\begin{bmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 \cr
2 & 3 & 4 & 1 & 5 & 8 & 6 & 7 & 9 \end{bmatrix}$$as a product of
disjoint cycles and find its order. \begin{answer} This is clearly
$(1\ 2\ 3 \ 4)(6\ 8\ 7) $ of order ${\rm lcm} (4, 3) = 12$.
\end{answer}
\end{pro}

\section{Determinants} There are many ways of
developing the theory of determinants. We will choose a way that
will allow us to deduce the properties of determinants with ease,
but has the drawback of being computationally cumbersome. In the
next section we will shew that our way of defining determinants is
equivalent to a more computationally friendly one.
\bigskip

It may be pertinent here to quickly review some properties of
permutations. Recall that if $\sigma \in S_n$ is a cycle of length
$l$, then its signum $\sgn{\sigma} = \pm 1$ depending on  the
parity of $l - 1$. For example, in $S_7$, $$\sigma = (1 \ 3 \ 4 \
7 \ 6)
$$has length $5$, and the parity of $5 - 1 = 4$ is even, and so we
write $\sgn{\sigma} = +1$. On the other hand, $$\tau = (1 \ 3 \ 4
\ 7 \ 6 \ 5)   $$has length $6$, and the parity of $6 - 1 = 5$ is
odd, and so we write $\sgn{\tau} = -1$.

\bigskip

Recall also that if $(\sigma, \tau)\in S_n ^2$, then
$$ \sgn{\tau\sigma} = \sgn{\tau}\sgn{\sigma}. $$ Thus from the
above two examples
$$\sigma\tau = (1 \ 3 \ 4 \
7 \ 6)(1 \ 3 \ 4 \ 7 \ 6\ 5)
$$has signum $\sgn{\sigma} \sgn{\tau} = (+1)(-1) = -1$. Observe in
particular that for the identity permutation $\idefun\in S_n$ we
have $\sgn{\idefun} = +1$.

\begin{df}
Let $A\in\mat{n\times n}{ \BBF }, A = [a_{ij}]$ be a square matrix.
The {\em determinant of $A$} is defined and denoted by the  sum $$
\det A = \sum _{\sigma \in S_n} \sgn{\sigma} a_{1\sigma
(1)}a_{2\sigma (2)} \cdots a_{n\sigma (n)}.$$ \index{determinant}
\end{df}
\begin{rem}
The determinantal sum has $n!$ summands.
\end{rem}
\begin{exa}
If $n = 1$, then $S_1$ has only one member, $\idefun$, where
$\idefun (1) = 1$. Since $\idefun$ is an even permutation, $\sgn{
\idefun} = (+1)$ Thus if $A = (a_{11})$, then
$$\det A
 = a_{11}$$.
\end{exa}
\begin{exa}
If $n = 2$, then  $S_2$ has $2! = 2$   members,  $\idefun$ and
$\sigma =
 (1 \ 2)$. Observe that $\sgn{\sigma} = -1$. Thus if $$A = \begin{bmatrix}a_{11} & a_{12} \cr a_{21} & a_{22}\cr
 \end{bmatrix}$$ then
 $$\det A  = \sgn{\idefun}a_{1\idefun (1)}a_{2\idefun (2)} + \sgn{\sigma}a_{1\sigma (1)}a_{2\sigma (2)} = a_{11}a_{22} - a_{12}a_{21}.    $$
\end{exa}
\begin{exa}
From the above formula for $2\times 2$ matrices it follows that
$$\begin{array}{lll}\det A & = & \det \begin{bmatrix} 1 & 2 \cr 3 & 4 \cr   \end{bmatrix} \\ & = &  (1)(4) - (3)(2) = -2,\end{array} $$
$$\begin{array}{lll}\det B & = & \det \begin{bmatrix} -1 & 2 \cr 3 & 4 \cr
\end{bmatrix}(-1)(4) - (3)(2)\\ &  = &  -10, \end{array}$$ and $$ \det (A + B) = \det \begin{bmatrix} 0 & 4 \cr 6 & 8 \cr   \end{bmatrix}
 = (0)(8) - (6)(4) = -24.$$Observe in particular that $\det (A + B) \neq \det A + \det B$.
\end{exa}
\begin{exa}
If $n = 3$, then  $S_2$ has $3! = 6$   members:

$$\idefun, \tau_1 = (2 \ 3), \tau_2 = (1 \ 3), \tau_3 = (1 \ 2), \sigma_1 =  (1 \ 2 \ 3), \sigma_2 =  (1 \ 3 \ 2). $$.
Observe that $\idefun, \sigma_1, \sigma_2$ are even, and $\tau_1,
\tau_2, \tau_3$ are odd. Thus if $$A = \begin{bmatrix}a_{11} &
a_{12} & a_{13} \cr a_{21} & a_{22} & a_{23}\cr a_{31} & a_{32} &
a_{33}\cr
\end{bmatrix}$$then
 $$\begin{array}{lll}\det A  &  =  & \sgn{\idefun}a_{1\idefun (1)}a_{2\idefun (2)}a_{3\idefun (3)} + \sgn{\tau_1}a_{1\tau_1 (1)}a_{2\tau_1 (2)}a_{3\tau_1
 (3)}\\
& & \quad + \sgn{\tau_2}a_{1\tau_2 (1)}a_{2\tau_2 (2)}a_{3\tau_2
(3)} + \sgn{\tau_3}a_{1\tau_3 (1)}a_{2\tau_3 (2)}a_{3\tau_3 (3)}
\\
& & \quad + \sgn{\sigma_1}a_{1\sigma_1 (1)}a_{2\sigma_1
(2)}a_{3\sigma_1 (3)} + \sgn{\sigma_2}a_{1\sigma_2
(1)}a_{2\sigma_2 (2)}a_{3\sigma_2 (3)}
\\
&  = &  a_{11}a_{22}a_{33} - a_{11}a_{23}a_{32}  - a_{13}a_{22}a_{31}\\
& & \qquad - a_{13}a_{21}a_{33}  + a_{12}a_{23}a_{31} + a_{13}a_{21}a_{32}. \\
\end{array}
$$
\end{exa}
\begin{thm}[Row-Alternancy of Determinants]\label{thm:alternating_determinants}
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$. If $B \in
\mat{n\times n}{ \BBF }, B = [b_{ij}]$ is the matrix obtained by
interchanging  the $s$-th row of $A$ with its $t$-th row, then $\det
B = -\det A$.
\end{thm}
\begin{pf}
Let $\tau$ be the transposition
$$\tau =
\begin{bmatrix} s & t \cr \tau (t) & \tau (s) \cr
\end{bmatrix}.
$$Then $\sigma\tau (a) = \sigma (a)$ for $a\in\{1, 2, \ldots , n\}\setminus
\{s,t\}$. Also, $\sgn{\sigma\tau} = \sgn{\sigma}\sgn{\tau} =
-\sgn{\sigma}$.  As $\sigma$ ranges through all permutations of
$S_n$, so does
 $\sigma\tau$, hence
$$\begin{array}{lll}\det B & = & \sum _{\sigma\in S_n}\sgn{\sigma} b_{1\sigma (1)}b_{2\sigma (2)}\cdots
b_{s\sigma (s)}\cdots b_{t\sigma (t)} \cdots b_{n\sigma (n)} \\ &
= &  \sum _{\sigma\in S_n}\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma
(2)}\cdots a_{st}\cdots a_{ts} \cdots a_{n\sigma (n)} \\
& = & -\sum _{\sigma\in S_n}\sgn{\sigma\tau }a_{1\sigma\tau
(1)}a_{2\sigma\tau (2)}\cdots a_{s\sigma\tau
(s)}\cdots a_{t\sigma\tau  (t)} \cdots a_{n\sigma\tau (n)} \\
& = & -\sum _{\lambda \in S_n} \sgn{\lambda } a_{1\lambda
(1)}a_{2\lambda  (2)} \cdots a_{n\lambda  (n)} \\
& = & -\det A.
 \end{array} $$
\end{pf}
\begin{cor}If $A_{(r:k)}, 1 \leq k \leq n$ denote the rows of $A$
and $\sigma \in S_n$, then $$\det \begin{bmatrix} A_{(r:\sigma
(1))} \cr A_{(r:\sigma (2))} \cr \vdots \cr A_{(r:\sigma (n))}\cr
\end{bmatrix} = (\sgn{\sigma})\det A. $$An analogous result holds
for columns. \label{cor:alternating_determinants}\end{cor}
\begin{pf}
Apply the result of Theorem \ref{thm:alternating_determinants}
multiple times.
\end{pf}
\begin{thm} \label{thm:determinant_of_transpose}
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$. Then $$\det A^T =
\det A.$$
\end{thm}
\begin{pf}
Let $C = A^T$. By definition $$\begin{array}{lll}\det A^T & = &
\det C \\ & = &  \sum _{\sigma \in S_n} \sgn{\sigma} c_{1\sigma
(1)}c_{2\sigma (2)} \cdots c_{n\sigma (n)} \\ &   = &  \sum
_{\sigma \in S_n} \sgn{\sigma} a_{\sigma (1)1}a_{\sigma (2)2}
\cdots a_{\sigma (n)n}. \end{array}$$But the product $a_{\sigma
(1)1}a_{\sigma (2)2} \cdots a_{\sigma (n)n}$ also appears in $\det
A$ with the same signum $\sgn{\sigma}$, since the permutation
$$  \begin{bmatrix}\sigma (1) & \sigma (2) & \cdots & \sigma (n)\cr 1 & 2 & \cdots & n \cr
\end{bmatrix}$$is the inverse of the permutation
$$  \begin{bmatrix}1 & 2 & \cdots & n \cr \sigma (1) & \sigma (2) & \cdots & \sigma (n)\cr
\end{bmatrix}.$$
\end{pf}
\begin{cor}[Column-Alternancy of Determinants]\label{cor:alternating_determinants_1}
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$. If $C \in
\mat{n\times n}{ \BBF }, C = [c_{ij}]$ is the matrix obtained by
interchanging the $s$-th column of $A$ with its $t$-th column, then
$\det C = -\det A$.
\end{cor}
\begin{pf}
This follows upon combining Theorem
\ref{thm:alternating_determinants} and Theorem
\ref{thm:determinant_of_transpose}.
\end{pf}
\begin{thm}[Row Homogeneity of Determinants]\label{thm:row_homogeneity_of_determinants}
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$ and $\alpha\in
\BBF$. If $B \in \mat{n\times n}{ \BBF }, B = [b_{ij}]$ is the
matrix obtained by multiplying the $s$-th row of $A$ by $\alpha$,
then
$$\det B = \alpha \det A.
$$
\end{thm}
\begin{pf}
Simply observe that $$\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma
(2)}\cdots \alpha a_{s\sigma (s)} \cdots a_{n\sigma (n)} = \alpha
\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma (2)}\cdots  a_{s\sigma (s)}
\cdots a_{n\sigma (n)}.
$$

\end{pf}
\begin{cor}[Column Homogeneity of Determinants]\label{cor:column_homogeneity_of_determinants}
If $C \in \mat{n\times n}{ \BBF }, C = (C_{ij})$ is the matrix
obtained by multiplying the $s$-th column of $A$ by $\alpha$, then
$$\det C = \alpha \det A.
$$
\end{cor}
\begin{pf}
This follows upon using Theorem \ref{thm:determinant_of_transpose}
and Theorem \ref{thm:row_homogeneity_of_determinants}.
\end{pf}
\begin{rem}
It follows from Theorem \ref{thm:row_homogeneity_of_determinants}
and Corollary \ref{cor:column_homogeneity_of_determinants} that if a
row (or column) of a matrix consists of $0_{\BBF }$s only, then the
determinant of this matrix is $0_{\BBF }$.
\end{rem}
\begin{exa}
$$\det
\begin{bmatrix} x & 1 & a \cr x^2 & 1 & b \cr x^3 & 1 & c
\cr\end{bmatrix} = x\det\begin{bmatrix} 1 & 1 & a \cr x & 1 & b
\cr x^2 & 1 & c \cr\end{bmatrix}. $$
\end{exa}

\begin{cor}
$$\det (\alpha A) = \alpha ^n\det A.$$
\end{cor}
\begin{pf}
Since there are $n$ columns, we are able to pull out one factor of
$\alpha$ from each one.
\end{pf}
\begin{exa}
Recall that a matrix $A$ is {\em skew-symmetric} if $A =
 -A^T$. Let $A \in\mat{2001}{\BBR}$ be
 skew-symmetric. Prove that $\det A = 0.$
 \end{exa}\begin{solu}We have $$\det A = \det (-A^T) = (-1)^{2001}\det A^T = -\det A,$$
 and so $2\det A = 0,$ from where $\det A = 0$.
 \end{solu}
 \begin{lem}[Row-Linearity and Column-Linearity of Determinants]
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$. For a fixed row
$s$, suppose that $a_{sj} = b_{sj} + c_{sj}$ for each $j \in [1;n]$.
Then
$$\begin{array}{l}\tiny{\det \begin{bmatrix} a_{11}&  a_{12}&  \cdots &
a_{1n} \cr a_{21}&  a_{22}&  \cdots &  a_{2n} \cr \vdots & \vdots
& \cdots & \vdots & \vdots \cr a_{(s - 1)1}& a_{(s - 1)2}& \cdots
& a_{(s -1)n} \cr b_{s1} + c_{s1}& b_{s2} +c_{s2}& \cdots & b_{sn}
+c_{sn} \cr a_{(s+1)1}& a_{(s+1)2}& \cdots & a_{(s+1)n} \cr
\vdots & \vdots & \cdots & \vdots & \vdots \cr a_{n1}& a_{n2}&
\cdots & a_{nn} \cr
\end{bmatrix} } \vspace{2mm}\\   \tiny{=  \det   \begin{bmatrix} a_{11}&  a_{12}&  \cdots &  a_{1n} \cr
a_{21}&  a_{22}&  \cdots &  a_{2n} \cr \vdots & \vdots & \cdots &
\vdots & \vdots \cr a_{(s - 1)1}& a_{(s - 1)2}& \cdots & a_{(s
-1)n} \cr b_{s1}& b_{s2}& \cdots & b_{sn} \cr a_{(s+1)1}&
a_{(s+1)2}& \cdots & a_{(s+1)n} \cr  \vdots & \vdots & \cdots &
\vdots & \vdots \cr a_{n1}& a_{n2}& \cdots &  a_{nn} \cr
\end{bmatrix}}\vspace{2mm}\\ \qquad  \tiny{+ \det \begin{bmatrix} a_{11}&  a_{12}&  \cdots & a_{1n}
\cr a_{21}&  a_{22}&  \cdots &  a_{2n} \cr \vdots & \vdots &
\cdots & \vdots & \vdots \cr a_{(s - 1)1}& a_{(s - 1)2}& \cdots &
a_{(s -1)n} \cr c_{s1}& c_{s2}& \cdots & c_{sn} \cr a_{(s + 1)1}&
a_{(s+1)2}& \cdots & a_{(s+1)n} \cr  \vdots & \vdots & \cdots &
\vdots & \vdots \cr a_{n1}& a_{n2}& \cdots &  a_{nn} \cr
\end{bmatrix}}.
 \end{array} $$
 \label{lem:row_linearity_of_determinants}
An analogous result holds for columns.



 \end{lem}
\begin{pf}
Put $$\tiny{S =  \begin{bmatrix} a_{11}&  a_{12}&  \cdots & a_{1n}
\cr a_{21}&  a_{22}&  \cdots &  a_{2n} \cr \vdots & \vdots &
\cdots & \vdots & \vdots \cr a_{(s - 1)1}& a_{(s - 1)2}& \cdots &
a_{(s -1)n} \cr b_{s1} + c_{s1}& b_{s2} +c_{s2}& \cdots & b_{sn}
+c_{sn} \cr a_{(s+1)1}& a_{(s+1)2}& \cdots & a_{(s+1)n} \cr \vdots
& \vdots & \cdots & \vdots & \vdots \cr a_{n1}& a_{n2}& \cdots &
a_{nn} \cr
\end{bmatrix},} $$
$$\tiny{T =  \begin{bmatrix} a_{11}&  a_{12}&  \cdots &  a_{1n} \cr
a_{21}&  a_{22}&  \cdots &  a_{2n} \cr \vdots & \vdots & \cdots &
\vdots & \vdots \cr a_{(s - 1)1}& a_{(s - 1)2}& \cdots & a_{(s
-1)n} \cr b_{s1}& b_{s2}& \cdots & b_{sn} \cr a_{(s+1)1}&
a_{(s+1)2}& \cdots & a_{(s+1)n} \cr  \vdots & \vdots & \cdots &
\vdots & \vdots \cr a_{n1}& a_{n2}& \cdots &  a_{nn} \cr
\end{bmatrix}} $$ and $$\tiny{U = \begin{bmatrix} a_{11}&  a_{12}&  \cdots & a_{1n}
\cr a_{21}&  a_{22}&  \cdots &  a_{2n} \cr \vdots & \vdots &
\cdots & \vdots & \vdots \cr a_{(s - 1)1}& a_{(s - 1)2}& \cdots &
a_{(s -1)n} \cr c_{s1}& c_{s2}& \cdots & c_{sn} \cr a_{(s + 1)1}&
a_{(s+1)2}& \cdots & a_{(s+1)n} \cr  \vdots & \vdots & \cdots &
\vdots & \vdots \cr a_{n1}& a_{n2}& \cdots &  a_{nn} \cr
\end{bmatrix}.}  $$Then
$${\footnotesize \begin{array}{lll}\det S &  = & \sum _{\sigma\in S_n}\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma (2)}\cdots a_{(s-1)\sigma (s - 1)}(b_{s\sigma
(s)}\\ & & \qquad +  c_{s\sigma (s)})a_{(s+1)\sigma (s + 1)}
\cdots a_{n\sigma (n)} \\ & = &
 \sum _{\sigma\in S_n}\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma (2)}\cdots a_{(s-1)\sigma (s - 1)}b_{s\sigma
(s)}a_{(s+1)\sigma (s + 1)}   \cdots a_{n\sigma (n)} \\ & &  +
\sum _{\sigma\in S_n}\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma
(2)}\cdots a_{(s-1)\sigma (s - 1)}c_{s\sigma
(s)}a_{(s+1)\sigma (s + 1)}   \cdots a_{n\sigma (n)}  \\
& = & \det T + \det U.
\end{array} } $$
By applying the above argument to $A^T$, we obtain the result for
columns.

\end{pf}

\begin{lem}
If two rows or two columns of $A \in \mat{n\times n}{ \BBF }, A =
[a_{ij}]$ are identical, then $\det A = 0_{\BBF }$.
\label{lem:determinant_with_two_identical_rows}\end{lem}
\begin{pf}
Suppose $a_{sj} = a_{tj}$ for $s\neq t$ and for all $j\in [1;n]$.
In particular, this means that for any $\sigma\in S_n$ we have
$a_{s\sigma (t)} = a_{t\sigma (t)}$ and $a_{t\sigma (s)} =
a_{s\sigma (s)}$.  Let $\tau$ be the transposition
$$\tau =
\begin{bmatrix} s & t \cr \tau (t) & \tau (s) \cr
\end{bmatrix}.
$$Then $\sigma\tau (a) = \sigma (a)$ for $a\in\{1, 2, \ldots , n\}\setminus
\{s,t\}$. Also, $\sgn{\sigma\tau} = \sgn{\sigma}\sgn{\tau} =
-\sgn{\sigma}$. As $\sigma$ runs through all even permutations,
$\sigma\tau$ runs through all odd permutations, and viceversa.
Therefore
$$\begin{array}{lll}det A & = & \sum _{\sigma \in S_n}\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma (2)}\cdots a_{s\sigma
(s)}\cdots a_{t\sigma (t)} \cdots a_{n\sigma (n)} \\
& = &  \sum _{\stackrel{\sigma \in S_n}{\sgn{\sigma} =
1}}\left(\sgn{\sigma} a_{1\sigma (1)}a_{2\sigma (2)}\cdots
a_{s\sigma (s)}\cdots a_{t\sigma (t)} \cdots a_{n\sigma (n)}\right.\\
& & \left. \quad + \sgn{\sigma\tau} a_{1\sigma\tau
(1)}a_{2\sigma\tau (2)}\cdots a_{s\sigma\tau (s)}\cdots
a_{t\sigma\tau (t)} \cdots
a_{n\sigma\tau (n)}\right) \\
& = &  \sum _{\stackrel{\sigma \in S_n}{\sgn{\sigma} =
1}}\sgn{\sigma}\left( a_{1\sigma (1)}a_{2\sigma (2)}\cdots
a_{s\sigma (s)}\cdots a_{t\sigma (t)} \cdots a_{n\sigma (n)}\right.\\
& & \left. \quad -  a_{1\sigma (1)}a_{2\sigma (2)}\cdots
a_{s\sigma (t)}\cdots a_{t\sigma (s)} \cdots
a_{n\sigma (n)}\right) \\
& = &  \sum _{\stackrel{\sigma \in S_n}{\sgn{\sigma} =
1}}\sgn{\sigma}\left( a_{1\sigma (1)}a_{2\sigma (2)}\cdots
a_{s\sigma (s)}\cdots a_{t\sigma (t)} \cdots a_{n\sigma (n)}\right.\\
& & \left. \quad -  a_{1\sigma (1)}a_{2\sigma (2)}\cdots
a_{t\sigma (t)}\cdots a_{s\sigma (s)} \cdots
a_{n\sigma (n)}\right) \\
& = & 0_{\BBF }.
\end{array}$$
Arguing on $A^T$ will yield the analogous result for the columns.
\end{pf}
\begin{cor}
If two rows or two columns of $A \in \mat{n\times n}{ \BBF }, A =
[a_{ij}]$ are proportional, then $\det A = 0_{\BBF }$.
\label{cor:determinant_with_two_proportional_rows}\end{cor}
\begin{pf}
Suppose $a_{sj} = \alpha a_{tj}$ for $s\neq t$ and for all $j\in
[1;n]$. If $B$ is the matrix obtained by pulling out the factor
$\alpha$ from the $s$-th row then $\det A = \alpha \det B$. But now
the $s$-th and the $t$-th rows in $B$ are identical, and so $\det B
= 0_{\BBF }$. Arguing on $A^T$ will yield the analogous result for
the columns.

\end{pf}
\begin{exa}
$$\det \begin{bmatrix} 1 & a & b \cr 1 & a & c \cr 1 & a & d \cr   \end{bmatrix} =
a\det \begin{bmatrix} 1 & 1 & b \cr 1 & 1 & c \cr 1 & 1 & d \cr
\end{bmatrix} = 0,  $$since on the last determinant the first two columns are
identical.

\end{exa}

 \begin{thm}[Multilinearity of Determinants]\label{thm:multilinearity_of_determinants}
Let $A \in \mat{n\times n}{ \BBF }, A = [a_{ij}]$ and $\alpha\in
\BBF$. If $X \in \mat{n\times n}{ \BBF }, X = (x_{ij})$ is the
matrix obtained by the row transvection  $R_s + \alpha R_t
\rightarrow R_s$ then $\det X = \det A$. Similarly, if $Y \in
\mat{n\times n}{ \BBF }, Y = (y_{ij})$ is the matrix obtained by the
column transvection  $C_s + \alpha C_t \rightarrow C_s$ then $\det Y
= \det A$.
 \end{thm}
 \begin{pf}
For the row transvection it suffices to take $b_{sj} = a_{sj}$,
$c_{sj} = \alpha a_{tj}$ for $j\in [1;n]$ in Lemma
\ref{lem:row_linearity_of_determinants}. With the same notation as
in the lemma, $T = A$, and so, $$\det X = \det T + \det U = \det A
+ \det U.
$$But $U$ has its $s$-th and $t$-th rows proportional ($s\neq t$),
and so by Corollary \ref{cor:determinant_with_two_proportional_rows}
$\det U = 0_{\BBF }.$ Hence $\det X = \det A.$ To obtain the result
for column transvections it suffices now to also apply Theorem
\ref{thm:determinant_of_transpose}.
 \end{pf}


\begin{exa}
Demonstrate, {\em without actually calculating the determinant}
that $$\det\begin{bmatrix}2 & 9 & 9 \cr 4 & 6 & 8 \cr 7 & 4 & 1
\cr
\end{bmatrix}$$is divisible by $13$.
\end{exa}
\begin{solu}Observe that $299, 468$ and $741$ are all divisible by
13. Thus $$\det\begin{bmatrix}2 & 9 & 9 \cr 4 & 6 & 8 \cr 7 & 4 &
1 \cr \end{bmatrix} \stackrel{C_3 + 10C_2 + 100C_1 \rightarrow
C_3}{ = }\det\begin{bmatrix}2 & 9 & 299 \cr 4 & 6 & 468 \cr 7 & 4
& 741 \cr \end{bmatrix} = 13\det\begin{bmatrix}2 & 9 & 23 \cr 4 &
6 & 36 \cr 7 & 4 & 57 \cr \end{bmatrix},$$which shews that the
determinant is divisible by $13$.
\end{solu}

 \begin{thm}
The determinant of a triangular matrix (upper or lower) is the
product of its diagonal elements.
 \end{thm}
 \begin{pf}
Let $A\in\mat{n\times n}{ \BBF }, A = [a_{ij}]$ be a triangular
matrix. Observe that if $\sigma \neq \idefun$ then $a_{i\sigma
(i)}a_{\sigma(i)\sigma^2(i)} = 0_{\BBF }$ occurs in the product
$$a_{1\sigma (1)}a_{2\sigma (2)}
\cdots a_{n\sigma (n)}.  $$Thus $$\begin{array}{lll}\det A & = &
\sum _{\sigma \in S_n}\sgn{\sigma}a_{1\sigma (1)}a_{2\sigma
(2)}\cdots a_{n\sigma (n)}\\ &  = &  \sgn{\idefun}a_{1\idefun
(1)}a_{2\idefun (2)}\cdots a_{n\idefun (n)} = a_{11}a_{22} \cdots
a_{nn}. \end{array}
$$
 \end{pf}
 \begin{exa}
The determinant of the $n\times n$ identity matrix ${\bf I}_n$ over
a field $\BBF$ is $$\det  {\bf I}_n = 1_{\BBF }. $$
 \end{exa}

\begin{exa}
Find $$\det\begin{bmatrix} 1 & 2 & 3 \cr 4 & 5 & 6 \cr  7 & 8 & 9
\cr
\end{bmatrix}.$$
\end{exa}
\begin{solu}We have $$\begin{array}{lll}\det\begin{bmatrix} 1 & 2 &
3 \cr 4 & 5 & 6 \cr  7 & 8 & 9 \cr
\end{bmatrix} & \grstep{\stackrel{C_2 - 2C_1 \rightarrow C_2}{C_3 - 3C_1 \rightarrow C_3}}
& \det\begin{bmatrix} 1 & 0 & 0 \cr 4 & -3 & -6 \cr  7 & -6 &
-12\cr
\end{bmatrix} \\
& = & (-3)(-6)\det\begin{bmatrix} 1 & 0 & 0 \cr 4 & 1 & 1 \cr  7 &
2 & 2\cr
\end{bmatrix} \\
& = & 0,
\end{array}$$
since in this last matrix the second and third columns are
identical and so Lemma
\ref{lem:determinant_with_two_identical_rows} applies.
\end{solu}
\begin{thm}
Let $(A, B)\in (\mat{n\times n}{ \BBF })^2$. Then $$\det (AB) =
(\det A)(\det B).
$$
\label{thm:product_of_determinants}\end{thm}
\begin{pf}
Put $D = AB, D = (d_{ij}), d_{ij} = \sum _{k = 1} ^n
a_{ik}b_{kj}$. If $A_{(c:k)}, D_{(c:k)}, 1 \leq k \leq n$ denote
the columns of $A$ and $D$, respectively, observe that $$
D_{(c:k)} = \sum _{l = 1} ^n b_{lk}A_{(c:l)}, \    \  1 \leq k
\leq n.
$$
Applying Corollary \ref{cor:column_homogeneity_of_determinants}
and Lemma \ref{lem:row_linearity_of_determinants} multiple times,
we obtain
$$\begin{array}{lll}\det D  & =  & \det (D_{(c:1)},  D_{(c:2)}, \ldots , D_{(c:n)}) \\ &
=&  \sum _{j_1 = 1} ^n\sum _{j_2 = 1} ^n \cdots \sum _{j_n = 1} ^n
b_{1j_1}b_{2j_2}\cdots b_{nj_n}\\ & & \quad \cdot\det
(A_{(c:j_1)}, A_{(c:j_2)}, \ldots , A_{(c:j_n)}). \end{array}
$$
By Lemma \ref{lem:determinant_with_two_identical_rows}, if any two
of the $A_{(c:j_l)}$ are identical, the determinant on the right
vanishes. So each one of the $j_l$ is different in the
non-vanishing terms and so the map
$$\fun{\sigma}{l}{j_l}{\{1, 2, \ldots , n\}}{\{1, 2, \ldots , n\}}
$$is a permutation. Here $j_l = \sigma (l)$. Therefore, for the non-vanishing $$\det (A_{(c:j_1)},  A_{(c:j_2)},
\ldots , A_{(c:j_n)})$$ we have in view of Corollary
\ref{cor:alternating_determinants},  $$\begin{array}{lll}\det
(A_{(c:j_1)}, A_{(c:j_2)}, \ldots , A_{(c:j_n)}) & = &
(\sgn{\sigma})\det (A_{(c:1)}, A_{(c:2)}, \ldots , A_{(c:n)}) \\ &
= &(\sgn{\sigma})\det A.\end{array}$$We deduce that
$$\begin{array}{lll}\det (AB) & = & \det D \\ & = & \sum _{j_n = 1} ^n
b_{1j_1}b_{2j_2}\cdots b_{nj_n}\det (A_{(c:j_1)},  A_{(c:j_2)},
\ldots , A_{(c:j_n)})\\
&  = & (\det A) \sum _{\sigma\in S_n} (\sgn{\sigma}) b_{1\sigma
(1)}b_{2\sigma (2)}\cdots b_{n\sigma (n)}\\
& = & (\det A)(\det B),
\end{array}$$as we wanted to shew.
\end{pf}
By applying the preceding theorem multiple times we obtain
\begin{cor}
If $A\in\mat{n\times n}{ \BBF }$ and if $k$ is a positive integer
then $$ \det A^k = (\det A)^k.
$$
\end{cor}
\begin{cor}
If $A\in\gl{n}{ \BBF }$ and if $k$ is a positive integer then $\det
A \neq 0_{\BBF }$ and
$$ \det A^{-k} = (\det A)^{-k}.
$$
\end{cor}
\begin{pf}
We have $AA^{-1} = {\bf I}_n$ and so by Theorem
\ref{thm:product_of_determinants} $(\det A)(\det A^{-1}) = 1_{\BBF
}$, from where the result follows.
\end{pf}

\section*{\psframebox{Homework}}
\begin{pro}
 Let $$\Omega =
\det
\begin{bmatrix} bc & ca & ab \cr a & b & c \cr a^2 & b^2 & c^2 \cr\end{bmatrix}
.
$$Without expanding either determinant, prove that
$$\Omega = \det
\begin{bmatrix} 1 & 1 & 1 \cr a^2 & b^2 & c^2 \cr a^3 & b^3 & c^3
\cr\end{bmatrix}.
$$
\begin{answer} Multiplying the first column of the given matrix  by $a$,
its second column by $b$, and its third column by $c$, we obtain
$$abc\Omega =  \begin{bmatrix} abc & abc & abc \cr a^2 & b^2 & c^2 \cr a^3 & b^3 & c^3 \cr\end{bmatrix}. $$ We may factor out
$abc$ from the first row of this last matrix thereby obtaining
$$abc\Omega = abc\det
\begin{bmatrix} 1 & 1 & 1 \cr a^2 & b^2 & c^2 \cr a^3 & b^3 & c^3
\cr\end{bmatrix}. $$Upon dividing by $abc$,
$$\Omega = \det
\begin{bmatrix} 1 & 1 & 1 \cr a^2 & b^2 & c^2 \cr a^3 & b^3 & c^3
\cr\end{bmatrix}.
$$
\end{answer}
\end{pro}
\begin{pro}
Demonstrate that $$\Omega = \det\begin{bmatrix} a - b - c & 2a &
2a \cr 2b & b - c - a & 2b \cr 2c & 2c & c - a - b \cr
\end{bmatrix} = (a + b + c)^3.
$$
\begin{answer} Performing $R_1 + R_2 + R_3 \rightarrow R_1$ we have
$$\begin{array}{l}
\Omega = \det\begin{bmatrix} a - b - c & 2a & 2a \cr 2b & b - c -
a & 2b \cr 2c & 2c & c - a - b \cr
\end{bmatrix}\vspace{2mm}\\ \qquad = \det\begin{bmatrix} a + b + c & a + b + c & a + b + c \cr 2b
& b - c - a & 2b \cr 2c & 2c & c - a - b \cr
\end{bmatrix}.\end{array}$$ Factorising $(a + b + c)$ from the first row of
this last determinant, we have
$$\Omega =   (a + b + c)\det\begin{bmatrix} 1 & 1 & 1 \cr 2b
& b - c - a & 2b \cr 2c & 2c & c - a - b \cr
\end{bmatrix}. $$Performing $C_2 - C_1 \rightarrow C_2$ and $C_3 - C_1 \rightarrow
C_3$,
$$\Omega =   (a + b + c)\det\begin{bmatrix} 1 & 0 & 0 \cr 2b
& -b - c - a & 0 \cr 2c & 0 & -c - a - b \cr
\end{bmatrix}. $$This last matrix is triangular, hence $$\Omega = (a + b + c)(-b - c - a)(-c -a - b) = (a + b + c)^3,  $$
as wanted.
\end{answer}
\end{pro}

\begin{pro} After the indicated column operations on a $3\times 3$
 matrix $A$ with $\det A = -540$, matrices $A_1, A_2, \ldots , A_5$
 are successively obtained:
 $$A \stackrel{C_1 + 3C_2 \rightarrow C_1}{\rightarrow} A_1
 \stackrel{C_2 \leftrightarrow C_3}{\rightarrow} A_2 \stackrel{3C_2
 - C_1 \rightarrow C_2}{\rightarrow} A_3 \stackrel{C_1 - 3C_2
 \rightarrow C_1}{\rightarrow} A_4 \stackrel{2C_1  \rightarrow
 C_1}{\rightarrow} A_5$$ Determine  the numerical values
 of $\det A_1, \det A_2, \det A_3, \det A_4$ and $\det A_5.$
 \begin{answer} $\det A_1 = \det A = -540$ by multilinearity. $\det A_2 = -\det
 A_1 = 540$ by alternancy. $\det A_3 = 3\det A_2 = 1620$  by both
 multilinearity and homogeneity from one column. $\det A_4 = \det A_3
 = 1620$ by multilinearity, and $\det A_5 = 2\det A_4 = 3240$ by
 homogeneity from one column.
 \end{answer}
\end{pro}

\begin{pro}
Prove, without actually expanding the determinant, that
$$\det \begin{bmatrix}1 & 2 & 3 & 7 & 0 \cr 6 & 1 & 5 & 14 & 1 \cr 8 & 6 & 1 & 21 & 3 \cr 7 & 3 & 8 & 7 & 1 \cr 2 & 4 & 6 & 0 & 4 \cr  \end{bmatrix}$$
is divisible by $1722$.
\end{pro}


\begin{pro} Let $A, B, C$ be $3\times 3$ matrices with $\det A = 3, \det
B^3 = -8,
 \det C = 2$. Compute (i) $\det ABC$, (ii) $\det 5AC$, (iii) $\det
 A^3B^{-3}C^{-1}$. Express your answers as fractions. \begin{answer} From the given data, $\det B = -2.$ Hence
 $$\det ABC = (\det A)(\det B)(\det C) = -12,$$
 $$\det 5AC = 5^3\det AC = (125)(\det A)(\det C) = 750,$$
 $$(\det A^3B^{-3}C^{-1}) = \frac{(\det A)^3}{(\det B)^3(\det C)} = -\frac{27}{16}.$$
\end{answer}
\end{pro}
\begin{pro}
Shew that $\forall A\in\mat{n\times n}{\BBR},$ $$\exists (X, Y)\in
(\mat{n\times n}{\BBR})^2, (\det X)(\det Y)\neq 0$$ such that
$$A = X + Y.$$ That is, any square matrix over $\BBR$ can be written as a sum of two matrices whose determinant is not zero.\begin{answer} Pick $\lambda \in \BBR \setminus \{0,
a_{11}, a_{22}, \ldots , a_{nn}\}$. Put
$$X = \begin{bmatrix} a_{11} - \lambda & 0 & 0 & \cdots &  0 \cr a_{21}  & a_{22} - \lambda & 0 & \cdots & 0 \cr
a_{31}  & a_{32}  & a_{33} - \lambda & \cdots &  0 \cr \vdots &
\vdots & \vdots & \vdots & \vdots \cr a_{n1} & a_{n2} & a_{n3} &
\cdots & a_{nn} - \lambda
\end{bmatrix}$$and
$$Y = \begin{bmatrix}
\lambda & a_{12} & a_{13} & \vdots & a_{1n} \cr   0 & \lambda &
a_{23} & \vdots & a_{2n} \cr
 0 & 0 &
\lambda & \vdots & a_{3n} \cr \vdots & \vdots & \vdots & \vdots &
\vdots \cr
 0 & 0 &
0 & \vdots & \lambda \cr


\end{bmatrix}$$ Clearly  $A = X + Y$, $\det X = (a_{11} - \lambda)(a_{22} -
\lambda)\cdots (a_{nn} - \lambda) \neq 0$, and $\det Y = \lambda^n
\neq 0$. This completes the proof.
\end{answer}
\end{pro}
\begin{pro}
Prove or disprove! The set $X = \{A\in\mat{n\times n}{ \BBF }: \det
A = 0_{\BBF }\}$ is a vector subspace of $\mat{n\times n}{ \BBF }$.
\begin{answer}
No.
\end{answer}
\end{pro}
\section{Laplace Expansion} We now develop a more
computationally convenient approach to determinants.

\bigskip
Put $$C_{ij} = \sum _{\stackrel{\sigma \in S_n}{\sigma (i) = j}}
(\sgn{\sigma})a_{1\sigma (1)}a_{2\sigma (2)}\cdots a_{n\sigma
(n)}. $$Then
\begin{equation}\label{eq:cofactor}\begin{array}{lll}\det A & = &
\sum _{\sigma \in S_n} (\sgn{\sigma})a_{1\sigma (1)}a_{2\sigma
(2)}\cdots a_{n\sigma (n)}
\\ &  = &   \sum _{i = 1} ^n a_{ij}\sum _{\stackrel{\sigma \in
S_n}{\sigma (i) = j}} (\sgn{\sigma})a_{1\sigma (1)}a_{2\sigma
(2)}\\ & &  \qquad \cdots a_{(i - 1)\sigma (i - 1)}a_{(i +
1)\sigma (i + 1)}\cdots a_{n\sigma (n)} \\& = &  \sum _{i = 1} ^n
a_{ij}C_{ij},
\end{array}\end{equation}is the expansion of $\det A$ along the $j$-th column.
Similarly,
$$\begin{array}{lll}\det A & = &  \sum _{\sigma \in S_n}
(\sgn{\sigma})a_{1\sigma (1)}a_{2\sigma (2)}\cdots a_{n\sigma (n)}
\\ & = &   \sum _{j = 1} ^n a_{ij}\sum _{\stackrel{\sigma \in
S_n}{\sigma (i) = j}} (\sgn{\sigma})a_{1\sigma (1)}a_{2\sigma
(2)}\\ & & \qquad \cdots a_{(i - 1)\sigma (i - 1)}a_{(i + 1)\sigma
(i + 1)}\cdots a_{n\sigma (n)}\\ & = & \sum _{j = 1} ^n
a_{ij}C_{ij},
\end{array}$$is the expansion of $\det A$ along the $i$-th row.
\begin{df}
Let $A\in\mat{n\times n}{ \BBF }, A = [a_{ij}]$. The $ij$-th minor
$A_{ij} \in\mat{n - 1}{\BBR}$ is the $(n - 1)\times (n - 1)$  matrix
obtained by deleting the $i$-th row and the $j$-th column from $A$.
\end{df}
\begin{exa}
If $$ A = \begin{bmatrix} 1 & 2 & 3 \cr 4 & 5 & 6 \cr  7 & 8 & 9
\cr
\end{bmatrix}$$then, for example,  $$A_{11} =\begin{bmatrix} 5 & 6 \cr 8 & 9 \cr \end{bmatrix}, \
\ \ A_{12} = \begin{bmatrix} 4 & 6 \cr 7 & 9 \cr  \end{bmatrix}, \
\ \ A_{21} = \begin{bmatrix} 2 & 3 \cr 8 & 9 \cr  \end{bmatrix}, \
\ \ A_{22} = \begin{bmatrix} 1 & 3 \cr 7 & 9 \cr  \end{bmatrix}, \
\ \ A_{33} = \begin{bmatrix} 1 & 2 \cr 4 & 5 \cr  \end{bmatrix}.
$$
\end{exa}
\begin{thm}\label{thm:laplace_expansion}
Let $A\in\mat{n\times n}{ \BBF }$. Then $$\det A = \sum _{i = 1} ^n
a_{ij}(-1)^{i + j}\det A_{ij} = \sum _{j = 1} ^n a_{ij}(-1)^{i +
j}\det A_{ij}.
$$
\end{thm}
\begin{pf}
It is enough to shew, in view of  \ref{eq:cofactor} that
$$ (-1)^{i + j}\det A_{ij} =  C_{ij}. $$Now,
$$\begin{array}{lll}C_{nn} & = & \sum _{\stackrel{\sigma\in S_n}{\sigma (n) = n}} \sgn{\sigma} a_{1\sigma (1)}a_{2\sigma (2)}\cdots a_{(n-1)\sigma (n-1)}
 \\ & = & \sum _{\tau\in S_{n-1}} \sgn{\tau} a_{1\tau (1)}a_{2\tau (2)}\cdots a_{(n - 1)\tau (n -1)} \\ & = & \det
A_{nn},\end{array}$$since the second sum shewn is the determinant
of the submatrix obtained by deleting the last row and last column
from $A$.

\bigskip


To find $C_{ij}$ for general $ij$ we perform some row and column
interchanges to $A$ in order to bring $a_{ij}$ to the $nn$-th
position. We thus bring the $i$-th row to the $n$-th row by a
series of transpositions, first swapping the $i$-th and the $(i +
1)$-th row, then swapping the new $(i + 1)$-th row and the $(i +
2)$-th row, and so forth until the original $i$-th row makes it to
the $n$-th row. We have made thereby $n - i$ interchanges. To this
new matrix we perform analogous interchanges to the $j$-th column,
thereby making $n - j$ interchanges. We have made a total of $2n -
i - j$ interchanges. Observe that $(-1)^{2n - i - j} = (-1)^{i +
j}$. Call the analogous quantities in the resulting matrix $A',
C'_{nn}, A'_{nn} $. Then
$$C_{ij} = C'_{nn} = \det A'_{nn} = (-1)^{i + j}\det A_{ij},
$$by virtue of Corollary \ref{cor:alternating_determinants}.

\end{pf}
\begin{rem}
It is irrelevant which row or column we choose to expand a
determinant of a square matrix. We always obtain the same result.
The sign pattern is given by $$\begin{bmatrix} + & - & + & - &
\cdots \cr - & + & - & + & \vdots \cr + & - & + & - & \vdots \cr
 \vdots  & \vdots  & \vdots  &  \vdots  & \vdots \cr
\end{bmatrix}
$$
\end{rem}
\begin{exa}
Find $$\det\begin{bmatrix} 1 & 2 & 3 \cr 4 & 5 & 6 \cr  7 & 8 & 9
\cr
\end{bmatrix}$$by expanding along the first row.
\end{exa}
\begin{solu}We have \begin{eqnarray*}\det A  & = & 1(-1)^{1 + 1}\det
\begin{bmatrix} 5 & 6 \cr 8 & 9 \end{bmatrix} + 2(-1)^{1 + 2}\det
\begin{bmatrix} 4 & 6 \cr 7 & 9 \end{bmatrix} + 3(-1)^{1 + 3}\det
\begin{bmatrix} 4 & 5 \cr 7 & 8 \end{bmatrix} \\ &  =  & 1(45 - 48) - 2(36
- 42) + 3(32 - 35 ) = 0. \end{eqnarray*}
\end{solu}

\begin{exa}
 Evaluate the {\em Vandermonde} determinant
 $$\det\begin{bmatrix}1 & 1 & 1 \cr a & b & c \cr a^2 & b^2 & c^2 \cr
 \end{bmatrix}.$$\end{exa}
 \begin{solu}$$\begin{array}{lll}\det\begin{bmatrix}1 & 1 & 1 \cr a & b & c \cr a^2 & b^2 & c^2 \cr \end{bmatrix}
 & = & \det\begin{bmatrix}1 & 0
 & 0 \cr a & b - a & c - a \cr a^2 & b^2 - a^2 & c^2 - a^2\cr \end{bmatrix} \\
 & = & \det \begin{bmatrix}b - a & c - a \cr b^2 - c^2 & c^2 - a^2 \end{bmatrix} \\ &
 = & (b - a)(c - a) \det\begin{bmatrix}1 & 1 \cr b + a & c + a \end{bmatrix} \\ &  = &
 (b - a)(c - a)(c - b).
 \end{array}$$
\end{solu}
 \begin{exa}
Evaluate the determinant
 $$\det A = \det\begin{bmatrix}1 & 2 & 3 & 4 & \cdots & 2000 \cr
 2 & 1 & 2 & 3 & \cdots & 1999 \cr 3 & 2 & 1 & 2 & \cdots & 1998
 \cr 4 & 3 & 2 & 1 & \cdots & 1997 \cr \cdots & \cdots & \cdots &
 \cdots & \cdots & \cdots \cr  2000 & 1999 & 1998 & 1997 & \cdots &
 1 \cr \end{bmatrix}.$$
 \end{exa}\begin{solu}Applying $R_n - R_{n + 1}\rightarrow R_n$ for $1
 \leq n \leq 1999,$ the determinant becomes
 $$\det\begin{bmatrix}-1 & 1 & 1 & 1 & \cdots  & 1 & 1 \cr
 -1 & -1 & 1 & 1 & \cdots & 1 & 1 \cr -1 & -1 & -1 & 1 & \cdots & 1
 & 1 \cr -1 & -1 & -1 & -1 & \cdots & 1 & 1 \cr \cdots & \cdots &
 \cdots & \cdots & \cdots & \cdots & \cdots \cr -1 & -1 & -1 & -1 &
 \cdots & -1 & 1 \cr 2000 & 1999 & 1998 & 1997 & \cdots  & 2 & 1
 \cr \end{bmatrix}.$$

 Applying now $C_n + C_{2000}\rightarrow C_n$ for $1 \leq n \leq
 1999,$ we obtain
 $$\det\begin{bmatrix}0 & 2 & 2 & 2 & \cdots  & 2 & 1 \cr
 0 & 0 & 2 & 2 & \cdots & 2 & 1 \cr 0 & 0 & 0 & 2 & \cdots  & 2 & 1
 \cr 0 & 0 & 0 & 0 & \cdots & 2 & 1 \cr \cdots & \cdots & \cdots &
 \cdots & \cdots & \cdots & \cdots \cr 0 & 0 & 0 & 0 & \cdots & 0 &
 1 \cr 2001 & 2000 & 1999 & 1998 & \cdots  & 3 & 1 \cr \end{bmatrix}.$$ This
 last determinant we expand along the first column. We have
 $$2001\det\begin{bmatrix}2 & 2 & 2 & \cdots  & 2 & 1 \cr
 0 & 2 & 2 & \cdots & 2 & 1 \cr 0 & 0 & 2 & \cdots  & 2 & 1 \cr  0
 & 0 & 0 & \cdots & 2 & 1 \cr  \cdots & \cdots & \cdots & \cdots &
 \cdots & \cdots \cr  0 & 0 & 0 & \cdots & 0 & 1 \cr \end{bmatrix} =
 2001(2^{1998}).$$
\end{solu}
\begin{df}Let $A\in\mat{n\times n}{ \BBF }$. The {\em classical adjoint} or
{\em adjugate} of $A$ is the $n\times n$ matrix $\adj{A}$ whose
entries are given by \index{matrix!adjoint}
$$[\adj{A}]_{ij} = (-1)^{i + j} \det A_{ji},
$$where $A_{ji}$ is the $ji$-th minor of $A$.
\end{df}
\begin{thm}Let $A\in\mat{n\times n}{ \BBF }$.
Then $$(\adj{A})A = A(\adj{A}) = (\det A){\bf I}_n.    $$
\end{thm}
\begin{pf}
We have $$ \begin{array}{lll} [A(\adj{A})]_{ij} & = & \sum_{k=1}
^n a_{ik}[\adj{A}]_{kj} \\
& = & \sum _{k=1} ^n a_{ik}(-1)^{i + k}\det A_{jk}.
\end{array}$$Now, this last sum is $\det A$ if $i = j$ by virtue of Theorem \ref{thm:laplace_expansion}. If $i\neq
j$ it is $0$, since then the $j$-th row is identical to the $i$-th
row and this determinant is $0_{\BBF }$ by virtue of Lemma
\ref{lem:determinant_with_two_identical_rows}. Thus on the diagonal
entries we get $\det A$ and the off-diagonal entries are $0_{\BBF
}$. This proves the theorem.
\end{pf}
The next corollary follows immediately.
\begin{cor}\label{cor:inverse_via_adjoint}Let $A\in\mat{n\times n}{ \BBF }$.
Then $A$ is invertible if and only $\det A \neq 0_{\BBF }$ and
$$A^{-1} = \dfrac{\adj{A}}{\det A}.
$$
\end{cor}

\section*{\psframebox{Homework}}

\begin{pro}
Find $$\det\begin{bmatrix} 1 & 2 & 3 \cr 4 & 5 & 6 \cr  7 & 8 & 9
\cr
\end{bmatrix}$$by expanding along the second column.
\begin{answer} We have
\begin{eqnarray*}\det A  & = & 2(-1)^{1 + 2}\det
\begin{bmatrix} 4 & 6 \cr 7 & 9 \end{bmatrix} + 5(-1)^{2 + 2}\det
\begin{bmatrix} 1 & 3 \cr 7 & 9 \end{bmatrix} + 8(-1)^{2 + 3}\det
\begin{bmatrix} 1 & 3 \cr 4 & 6 \end{bmatrix} \\ &  =  & -2(36 - 42) +
5(9 - 21) - 8(6 - 12) = 0. \end{eqnarray*}
\end{answer}
\end{pro}
\begin{pro}
Prove that $\det \begin{bmatrix} a & b & c \cr c &a& b \cr b &c & a
\cr
\end{bmatrix} = a^3+b^3+c^3-3abc. $ This type of matrix is called a
{\em circulant} matrix.
\begin{answer} Simply expand along the first row$$a\det\begin{bmatrix} a & b
\cr c & a
\end{bmatrix} - b\det\begin{bmatrix}
c & b \cr b & a
\end{bmatrix} + c\det\begin{bmatrix}
c & a \cr b & c\end{bmatrix} = a(a^2 -bc ) - b(ca - b^2) + c(c^2 -
ab) = a^3+b^3+c^3-3abc .
$$
\end{answer}

\end{pro}
\begin{pro}Compute the determinant
$$\det \begin{bmatrix}
     1 & 0 & -1 & 1 \cr
     2 & 0 & 0 & 1 \cr
     666 & -3  & -1 & 1000000 \cr
     1 & 0 & 0 &1 \cr
\end{bmatrix}.$$
\begin{answer} Since the second column has three $0$'s, it is advantageous
to expand along it, and thus we are reduced to calculate
$$-3(-1)^{3 + 2} \det \begin{bmatrix}
                1 & -1 & 1 \cr
                2 & 0 & 1 \cr
                1 & 0 & 1 \cr
\end{bmatrix}$$
Expanding this last determinant along the second column, the
original determinant is thus
$$-3(-1)^{3 + 2}(-1)(-1)^{1 + 2}\det \begin{bmatrix}
            2 & 1 \cr
            1 & 1 \cr
\end{bmatrix} = -3(-1)(-1)(-1)(1) = 3.$$
\end{answer}
\end{pro}
\begin{pro}
Prove that
$$\det \begin{bmatrix} x+a & b & c  \cr a & x+b & c \cr a & b & x+c
\cr
\end{bmatrix} = x^2(x+a+b+c). $$
\end{pro}
\begin{pro}
If
$$ \det\begin{bmatrix} 1 & 1 & 1 & 1 \cr x & a & 0 & 0 \cr
x & 0 & b & 0 \cr x & 0 & 0 & c\cr \end{bmatrix}  = 0, $$ and
$xabc\neq 0,$ prove that $$ \frac{1}{x} = \frac{1}{a} + \frac{1}{b}
+ \frac{1}{c}.  $$ \begin{answer} Expanding along the first column,
$$\begin{array}{lll} 0 & = &  \det\begin{bmatrix} 1 & 1 & 1 & 1 \cr x & a & 0 & 0 \cr
x & 0 & b & 0 \cr x & 0 & 0 & c\cr \end{bmatrix} \\
& = & \det\begin{bmatrix}a & 0 & 0 \cr 0 & b & 0 \cr 0 & 0 & c \cr
\end{bmatrix} - x\det\begin{bmatrix} 1 & 1 & 1 \cr 0 & b & 0 \cr 0
& 0 & c \cr \end{bmatrix} \\ & & \qquad  + x\det\begin{bmatrix}1 &
1 & 1 \cr a & 0 & 0 \cr 0 & 0 & c \cr  \end{bmatrix}
- x\det\begin{bmatrix} 1 & 1 & 1 \cr a & 0 & 0 \cr  0 & b & 0 \cr \end{bmatrix} \\
& = & xabc - xbc + x\det\begin{bmatrix}1 & 1 & 1 \cr a & 0 & 0 \cr
0 & 0 & c \cr  \end{bmatrix} - x\det\begin{bmatrix} 1 & 1 & 1 \cr
a & 0 & 0 \cr  0 & b & 0 \cr \end{bmatrix}.\\ \end{array}$$
Expanding these  last  two determinants along the third row,
$$\begin{array}{lll} 0 & = & abc - xbc
+ x\det\begin{bmatrix}1 & 1 & 1 \cr a & 0 & 0 \cr 0 & 0 & c \cr
\end{bmatrix}
- x\det\begin{bmatrix} 1 & 1 & 1 \cr a & a & 0 \cr  0 & b & 0 \cr \end{bmatrix}\\
& = &   abc - xbc + xc\det \begin{bmatrix} 1 & 1 \cr  a & 0 \cr\end{bmatrix}  + xb\det \begin{bmatrix} 1 & 1 \cr  a & 0 \cr\end{bmatrix} \\
& = & abc - xbc - xca -xab.
     \end{array}$$It follows that $$abc = x(bc + ab + ca), $$whence$$\frac{1}{x} = \frac{bc + ab + ca}{abc} = \frac{1}{a} +\frac{1}{b} + \frac{1}{c},  $$as wanted.

\end{answer}
\end{pro}
\begin{pro}
Consider the matrix $$A = \begin{bmatrix}a & -b & -c & -d \cr b  & a
& d & -c \cr c & -d & a & b \cr d & c & -b & a \cr  \end{bmatrix}.$$
\begin{dingautolist}{202}
\item  Compute $A^TA$. \item   Use the above to prove that $$
\det A = (a^2+b^2+c^2+d^2)^2.$$

\end{dingautolist}
\end{pro}
\begin{pro}
Prove that $$\det \begin{bmatrix}0 & a& b & 0 \cr a & 0 & b & 0
\cr 0 & a & 0 & b \cr 1 & 1 & 1& 1\cr
\end{bmatrix} = 2ab(a-b).
$$
\begin{answer} Expanding along the first row the determinant equals
$$\begin{array}{lll}-a\det\begin{bmatrix} a & b & 0 \cr
0 & 0 & b \cr 1 & 1 & 1 \cr \end{bmatrix} + b\det\begin{bmatrix} a
& 0 & 0 \cr 0 & a & b \cr 1 & 1 & 1 \cr\end{bmatrix}  & = &  ab
\det\begin{bmatrix} a & b \cr 1 & 1  \cr
\end{bmatrix} + ab\det\begin{bmatrix} a & b \cr 1 & 1  \cr
\end{bmatrix}  \\ & =  & 2ab (a - b), \end{array}$$as wanted.
\end{answer}
\end{pro}
\begin{pro}
Demonstrate that  $$\det \begin{bmatrix} a & 0 & b & 0 \cr 0 & a &
0 & b \cr c & 0 & d & 0 \cr 0 & c & 0 & d\cr
\end{bmatrix} = (ad - bc)^2.
$$
\begin{answer} Expanding along the first row, the determinant equals
$$ a\det\begin{bmatrix} a & 0 & b \cr 0 & d & 0 \cr c & 0 & d \cr  \end{bmatrix}
+ b \det\begin{bmatrix} 0 & a & b \cr c & 0 & 0 \cr 0 & c & d \cr
\end{bmatrix}.
$$Expanding the resulting two determinants along the second row,
we obtain $$ad\det\begin{bmatrix}a & b \cr c & d\cr \end{bmatrix}
+ b(-c)\det\begin{bmatrix}a & b \cr c & d\cr  \end{bmatrix} =
ad(ad - bc) - bc(ad - bc) = (ad - bc)^2,
$$as wanted.
\end{answer}
\end{pro}
\begin{pro}
Use induction to shew that
$$ \det\begin{bmatrix} 1 & 1 & 1 & \cdots & 1 &
1 \cr  1& 0 & 0 & \vdots & 0  & 0 \cr  0 & 1 & 0 & \cdots & 0  & 0
\cr 0 & 0 & 1& \cdots & 0  & 0 \cr \vdots & \vdots & \cdots &
\vdots & \vdots \cr 0 & 0 & 0 & \cdots& 1 & 0 \cr
\end{bmatrix} = (-1)^{n + 1}.$$
\label{exa:determinant_bunch_of_ones}\begin{answer} For $n = 1$ we
have $\det (1) = 1 = (-1)^{1 + 1}$. For $n = 2$ we have
$$\det \begin{bmatrix} 1 & 1 \cr 1 & 0 \cr
\end{bmatrix} = -1 = (-1)^{2 + 1}.
$$Assume that the result is true for $n -1$. Expanding the
determinant along the first column
$$ \begin{array}{lll}\det\begin{bmatrix} 1 & 1 & 1 & \cdots & 1 &
1 \cr  1& 0 & 0 & \vdots & 0  & 0 \cr  0 & 1 & 0 & \cdots & 0  & 0
\cr 0 & 0 & 1& \cdots & 0  & 0 \cr \vdots & \vdots & \cdots &
\vdots & \vdots \cr 0 & 0 & 0 & \cdots & 1 & 0 \cr
\end{bmatrix} &  = & 1\det\begin{bmatrix}  0 & 0 & \vdots & 0  & 0 \cr   1 & 0 & \cdots & 0  & 0
\cr  0 & 1& \cdots & 0  & 0 \cr  \vdots & \cdots & \vdots & \vdots
\cr  0 & 0 & \cdots & 1 & 0 \cr
\end{bmatrix} \\ & & \qquad - 1 \det\begin{bmatrix}  1 & 1 & \cdots & 1 &
1 \cr  1& 0 & \cdots & \vdots & 0   \cr  0 & 1 & \cdots & \cdots &
0 \cr 0 & 0 & \cdots & \cdots & 0   \cr \vdots & \vdots & \cdots &
\vdots \cr 0 & 0 & \cdots & 1 & 0 \cr
\end{bmatrix}\\
& = & 1(0) - (1)(-1)^{n}\\
& = &  (-1)^{n + 1},\\ \end{array}$$giving the result.
\end{answer}
\end{pro}
\begin{pro}
Let $$A = \begin{bmatrix} 1 & n & n & n & \cdots & n \cr n & 2 & n
& n & \vdots & n \cr n & n & 3 & n & \cdots & n \cr n & n & n & 4
& \cdots & n \cr \vdots & \vdots & \vdots & \cdots & \vdots \cr n
& n & n & n & n & n \cr
\end{bmatrix},
$$that is, $A\in\mat{n\times n}{\BBR}, A = [a_{ij}]$ is a matrix such that $a_{kk} = k$
and $a_{ij} = n$ when $i \neq j$. Find $\det A$. \begin{answer}
Perform $C_k - C_1 \rightarrow C_k$ for $k \in [2; n]$. Observe that
these operations do not affect the value of the determinant. Then
$$\det A = \det\begin{bmatrix} 1 & n - 1 & n - 1 & n - 1 & \cdots & n - 1 \cr n & 2 - n &
0 & 0 & \vdots & 0 \cr n & 0 & 3 - n & 0 & \cdots & 0 \cr n & 0 &
0 & 4-n & \cdots & 0 \cr \vdots & \vdots & \vdots & \cdots &
\vdots \cr n & 0 & 0 & 0 & 0 & 0 \cr
\end{bmatrix}.
$$Expand this last determinant along the $n$-th row, obtaining,
$$\begin{array}{lll}\det A & = &  (-1)^{1 + n}n\det\begin{bmatrix}  n - 1 & n - 1 & n - 1 & \cdots & n - 1 & n - 1 \cr  2 - n &
0 & 0 & \vdots & 0  & 0 \cr  0 & 3 - n & 0 & \cdots & 0  & 0 \cr 0
& 0 & 4-n & \cdots & 0  & 0 \cr  \vdots & \vdots & \cdots & \vdots
& \vdots \cr 0 & 0 & 0 & \cdots & -1 & 0  \cr
\end{bmatrix} \\
& = & (-1)^{1 + n}n(n - 1)(2 - n)(3 - n)\\ & & \qquad \cdots
(-2)(-1) \det\begin{bmatrix}  1 & 1 & 1 & \cdots & 1 & 1 \cr  1& 0
& 0 & \vdots & 0  & 0 \cr  0 & 1 & 0 & \cdots & 0  & 0 \cr 0 & 0 &
1& \cdots & 0  & 0 \cr  \vdots & \vdots & \cdots & \vdots & \vdots
\cr 0 & 0 & 0 & \cdots & 1 & 0  \cr
\end{bmatrix} \\
& = & -(n!)\det\begin{bmatrix}  1 & 1 & 1 & \cdots & 1 & 1 \cr  1&
0 & 0 & \vdots & 0  & 0 \cr  0 & 1 & 0 & \cdots & 0  & 0 \cr 0 & 0
& 1& \cdots & 0  & 0 \cr  \vdots & \vdots & \cdots & \vdots &
\vdots \cr 0 & 0 & 0 & \cdots & 1 & 0  \cr
\end{bmatrix} \\
& = & -(n!)(-1)^{n} \\
& = & (-1)^{n + 1}n!,
\end{array}$$
upon using the result of problem
\ref{exa:determinant_bunch_of_ones}.
\end{answer}
\end{pro}
\begin{pro}
Let $n\in\BBN , n > 1$ be an odd integer. Recall that the binomial
coefficients $\binom{n}{k}$ satisfy $\binom{n}{n} = \binom{n}{0} =
1$ and that for $1 \leq k \leq n$,
$$\binom{n}{k} = \binom{n - 1}{k - 1} + \binom{n - 1}{k}.  $$Prove that
$$ \det\begin{bmatrix} 1 & \binom{n}{1} & \binom{n}{2} & \cdots & \binom{n}{n - 1} & 1 \cr
1 & 1 & \binom{n}{1} & \cdots & \binom{n}{n - 2} & \binom{n}{n -
1} \cr \binom{n}{n - 1} & 1 & 1 & \cdots & \binom{n}{n - 3} &
\binom{n}{n - 2} \cr
 \cdots  &  \cdots  &  \cdots  & \cdots &  \cdots  &  \cdots  \cr
 \binom{n}{1} & \binom{n}{2} & \binom{n}{3} & \cdots & 1 & 1 \cr
 \end{bmatrix}  = (1 + (-1)^n)^n.$$
\begin{answer} Recall
that $\binom{n}{k} = \binom{n}{n - k}$, $$ \sum _{k = 0} ^n
\binom{n}{k} = 2^n  $$ and$$ \sum _{k = 0} ^n (-1)^{k}\binom{n}{k}
= 0, \ \ \ \ {\rm if}\ \ n > 0. $$ Assume that $n$ is odd. Observe
that then there are $n + 1$  (an even number) of columns and that
on the same row, $\binom{n}{k}$ is on a column of opposite parity
to that of $\binom{n}{n - k}$. By performing  $C_1 - C_2 + C_3 -
C_4 + \cdots + C_n - C_{n + 1} \rightarrow C_1$, the first column
becomes all $0$'s, whence the determinant if $0$ if $n$ is odd.
\end{answer}
\end{pro}
\begin{pro}
Let $A\in\gl{n}{ \BBF }$, $n>1$. Prove that $\det (\adj{A}) = (\det
A)^{n-1}$.
\end{pro}
\begin{pro} Let $(A, B, S)\in(\gl{n}{ \BBF })^3$. Prove that\begin{dingautolist}{202} \item $
\adj{\adj{A}} = (\det A)^{n-2}A$.

\item $\adj{AB} = \adj{A}\adj{B}$. \item $\adj{SAS^{-1}} = S(\adj{
A})S^{-1}$.
\end{dingautolist}\end{pro}
\begin{pro} If $A\in\gl{2}{\BBF}$, ,  and let  $k$ be
a positive integer. Prove that $\det (\underbrace{\mathrm{adj}
\cdots \mathrm{adj}}_{k}(A)) = \det A$.
\end{pro}
\begin{pro}
Find the determinant
$$\det \begin{bmatrix}  (b+c)^2 &  ab & ac \cr
ab & (a+c)^2 & bc\cr ac & bc & (a+b)^2 \cr
\end{bmatrix}  $$
{\em by hand, making explicit all your calculations.}
\begin{answer}
I will prove that
$$\det \begin{bmatrix}  (b+c)^2 &  ab & ac \cr
ab & (a+c)^2 & bc\cr ac & bc & (a+b)^2 \cr
\end{bmatrix} = 2abc(a+b+c)^3. $$
Using permissible row and column operations,
$$\begin{array}{lll}
\det \begin{bmatrix}  (b+c)^2 &  ab & ac \cr ab & (a+c)^2 & bc\cr ac
& bc & (a+b)^2 \cr
\end{bmatrix} & = & \det \begin{bmatrix}  b^2+2bc+c^2 &  ab & ac \cr ab & a^2+2ca+c^2 & bc\cr ac
& bc & a^2+2ab+b^2 \cr
\end{bmatrix}\\
& = \grstep{C_1+C_2+C_3\to C_1} & \det
\begin{bmatrix} b^2+2bc+c^2+ab+ac & ab & ac \cr ab+a^2+2ca+c^2+bc & a^2+2ca+c^2 & bc\cr
ac+bc+a^2+2ab+b^2 & bc & a^2+2ab+b^2 \cr
\end{bmatrix}\\
& =  & \det
\begin{bmatrix} (b+c)(a+b+c) & ab & ac \cr (a+c)(a+b+c) & a^2+2ca+c^2 & bc\cr
(a+b)(a+b+c) & bc & a^2+2ab+b^2 \cr
\end{bmatrix}\\
\end{array}$$
Pulling out a factor, the above equals
$$
 (a+b+c)\det
\begin{bmatrix} b+c & ab & ac \cr a+c & a^2+2ca+c^2 & bc\cr
a+b & bc & a^2+2ab+b^2 \cr
\end{bmatrix}
$$ and performing $R_1+R_2+R_3\to R_1$, this is  $$ (a+b+c)\det
\begin{bmatrix} 2a+2b+2c & ab+a^2+2ca+c^2+bc & ac+bc+ a^2+2ab+b^2 \cr a+c & a^2+2ca+c^2 & bc\cr
a+b & bc & a^2+2ab+b^2 \cr
\end{bmatrix}$$
Factoring this is
$$ (a+b+c)\det
\begin{bmatrix} 2(a+b+c) & (a+c)(a+b+c) & (a+b)(a+b+c) \cr a+c & a^2+2ca+c^2 & bc\cr
a+b & bc & a^2+2ab+b^2 \cr
\end{bmatrix},$$which in turn is  $$(a+b+c)^2\det
\begin{bmatrix} 2 & a+c & a+b \cr a+c & a^2+2ca+c^2 & bc\cr
a+b & bc & a^2+2ab+b^2 \cr
\end{bmatrix}$$
Performing $C_2-(a+c)C_1\to C_2$ and $C_3-(a+b)C_1\to C_3$ we obtain
 $$(a+b+c)^2\det
\begin{bmatrix} 2 & -a-c & -a-b \cr a+c & 0 & -a^2-ab-ac\cr
a+b &-a^2- ab-ac & 0 \cr
\end{bmatrix}$$
This last matrix we will expand by the second column, obtaining that
the original determinant is thus
$$ (a+b+c)^2\left((a+c)\det\begin{bmatrix}a+c & -a^2-ab-ac \cr  a+b & 0  \end{bmatrix} +(a^2+ab+ac)\det\begin{bmatrix} 2 & -a-b \cr a+c & -a^2-ab-ac \end{bmatrix}\right)  $$
This simplifies to
$$\begin{array}{lll}
(a+b+c)^2\left((a+c)(a+b)(a^2+ab+ac\right.)\\
\qquad \left.+(a^2+ab+ac)(-a^2-ab-ac+bc)\right) & = &
a(a+b+c)^3((a+c)(a+b)-a^2-ab-ac+bc)\\ & = &
2abc(a+b+c)^3,\end{array}
$$ as claimed.




\end{answer}


\end{pro}

\begin{pro}
The matrix  $$  \begin{bmatrix} a & b & c & d \cr d & a & b & c \cr c & d & a & b \cr b & c & d & a\cr         \end{bmatrix}
$$ is known as a {\em circulant matrix}. Prove that its determinant is $(a+b+c+d)(a-b+c-d)((a-c)^2+(b-d)^2)$.
\begin{answer}
 We have

 \begin{eqnarray*}\det \begin{bmatrix} a & b & c & d \cr d & a & b
& c \cr c & d & a & b \cr b & c & d & a\cr         \end{bmatrix} &
\grstep[=]{R_1+R_2+R_3+R_4\to R_1} & \det \begin{bmatrix} a+b+c+d &
a+b+c+d & a+b+c+d & a+b+c+d \cr d & a & b & c \cr c & d & a & b \cr
b & c & d & a\cr
\end{bmatrix}\\
& = & (a+b+c+d)\det \begin{bmatrix} 1 & 1 & 1 & 1 \cr d & a & b & c
\cr c & d & a & b \cr b & c & d & a\cr         \end{bmatrix}\\
& \grstep[=]{C_4-C_3+C_2-C_1\to C_4} & (a+b+c+d)\begin{bmatrix} 1 &
1 & 1 & 0\cr d & a & b & c-b+a-d \cr c & d & a & b-a+d-c \cr b & c &
d & a-d+c-b\cr
\end{bmatrix}\\
& = &  (a+b+c+d)(a-b+c-d)\begin{bmatrix} 1 & 1 & 1 & 0\cr d & a & b
& 1\cr c & d & a & -1 \cr b & c & d & 1\cr
\end{bmatrix}\\
& \grstep[=]{R_2+R_3\to R_2,\ R_4+R_3\to R_4} &
(a+b+c+d)(a-b+c-d)\begin{bmatrix} 1 & 1 & 1 & 0\cr d+c & a+d & b+a &
0\cr c & d & a & -1 \cr b+c & c+d & a+d & 0\cr
\end{bmatrix}\\
&= & (a+b+c+d)(a-b+c-d)\begin{bmatrix} 1 & 1 & 1 \cr d+c & a+d & b+a
\cr  b+c & c+d & a+d \cr
\end{bmatrix}\\
& \grstep[=]{C_1-C_3\to C_1, \ C_2-C_3\to C_2} &
(a+b+c+d)(a-b+c-d)\begin{bmatrix} 0 & 0 & 1 \cr d+c-b-a & d-b & b+a
\cr b+c-a-d & c-a
 & a+d \cr
\end{bmatrix}\\
& = & (a+b+c+d)(a-b+c-d)\begin{bmatrix}  d+c-b-a & d-b \cr b+c-a-d &
c-a
 \cr
\end{bmatrix}\\
& = & (a+b+c+d)(a-b+c-d) (d+c-b-a)(c-a)-(d-b)(b+c-a-d)
\\
& = & (a+b+c+d)(a-b+c-d)\\
& & \qquad
((c-a)(c-a)+(c-a)(d-b)-(d-b)(c-a)-(d-b)(b-d))
\\
& = & (a+b+c+d)(a-b+c-d)((a-c)^2+(b-d)^2).
\end{eqnarray*}
\begin{rem}
Since $$ (a-c)^2+(b-d)^2=(a-c+i(b-d))(a-c-i(b-d)), $$ the
above determinant is then
$$ (a+b+c+d)(a-b+c-d)(a+ib-c-id)(a-ib-c+id). $$
Generalisations of this determinant are possible using roots of
unity.
\end{rem}
\end{answer}
\end{pro}


\section{Determinants and Linear Systems}
\begin{thm}\label{thm:systems_determinants_invertibility}Let
$A\in\mat{n\times n}{ \BBF }$. The following are all equivalent
\begin{dingautolist}{202}
\item \label{thm:sys_det_1} $\det A \neq 0_{\BBF }$. \item
\label{thm:sys_det_2}$A$ is invertible. \item\label{thm:sys_det_3}
There exists a unique solution $X\in\mat{n\times 1}{ \BBF }$ to the
equation $AX = Y$. \item  \label{thm:sys_det_4}If $AX = {\bf
0}_{n\times 1}$ then $X = {\bf 0}_{n\times 1}$.
\end{dingautolist}\end{thm}
\begin{pf} We prove the implications in sequence: \\
$\ref{thm:sys_det_1} \implies \ref{thm:sys_det_2}$: follows from
Corollary \ref{cor:inverse_via_adjoint}\\ $\ref{thm:sys_det_2}
\implies \ref{thm:sys_det_3}$: If $A$ is invertible and $AX = Y$ then $X = A^{-1}Y$ is the unique solution of this equation.\\
$\ref{thm:sys_det_3} \implies \ref{thm:sys_det_4}$: follows by putting $Y = {\bf 0}_{n\times 1}$\\
$\ref{thm:sys_det_4} \implies \ref{thm:sys_det_1}$: Let $R$ be the
row echelon form of $A$. Since $RX = {\bf 0}_{n\times 1}$ has only
$X={\bf 0}_{n\times 1}$ as a solution, every entry on the diagonal
of $R$ must be non-zero, $R$ must be triangular, and hence $\det R
\neq 0_{\BBF }$. Since $A = PR$ where $P$ is an invertible $n\times
n$ matrix,
we deduce that $\det A = \det P\det R  \neq 0_{\BBF }$.\\
\end{pf}
The contrapositive form of the implications \ref{thm:sys_det_1}
and \ref{thm:sys_det_4}  will be used later. Here it is for future
reference.
\begin{cor}\label{cor:0determinant_non_null_kernel}
Let $A\in\mat{n\times n}{ \BBF }$. If there is $X\neq {\bf
0}_{n\times 1}$ such that $AX = {\bf 0}_{n\times 1}$ then $\det A =
0_{\BBF }$.
\end{cor}

\section*{\psframebox{Homework}}
\begin{pro}
For which $a$ is the matrix $\begin{bmatrix}-1 & 1 & 1 \cr 1 & a & 1
\cr 1 & 1 & a \cr \end{bmatrix}$ singular (non-invertible)?
\end{pro}

\chapter{Eigenvalues and Eigenvectors}
\section{Similar Matrices}
\begin{df}
We say that $A\in\mat{n\times n}{ \BBF }$ is {\em similar} to
$B\in\mat{n\times n}{ \BBF }$ if there exist a matrix $P\in\gl{n}{
\BBF }$ such that
$$B = PAP^{-1}.
$$ \index{matrix!similarity}
\end{df}
\begin{thm}
Similarity is an equivalence relation.
\end{thm}
\begin{pf}
Let $A\in\mat{n\times n}{ \BBF }$. Then $A = {\bf I}_nA{\bf I}_n
^{-1}$, so similarity is reflexive. If $B = PAP^{-1} $ ($P\in\gl{n}{
\BBF }$ ) then $A = P^{-1}BP$ so similarity is symmetric. Finally,
if $B = PAP^{-1}$ and $C = QBQ^{-1} $ ($P\in\gl{n}{ \BBF }$ ,
$Q\in\gl{n}{ \BBF }$) then $C = QPAP^{-1}Q^{-1} = QPA(QP)^{-1}$ and
so similarity is transitive.
\end{pf}
Since similarity is an equivalence relation, it partitions the set
of $n\times n$ matrices into equivalence classes by Theorem
\ref{thm:equiv_relation_yields_partition}.
\begin{df}
A matrix is said to be {\em diagonalisable} if it is similar to a
diagonal matrix.
\end{df}
Suppose that $$ A = \begin{bmatrix}\lambda_1 & 0 & 0 &  \cdots & 0
\cr 0 & \lambda_2 & 0 & \cdots & 0 \cr \vdots & \vdots & \vdots &
\cdots & \vdots \cr 0 & 0 & 0 & \cdots & \lambda_n
\end{bmatrix}.$$Then if $K$ is a positive integer
$$A^K =   \begin{bmatrix}\lambda_1 ^K & 0 & 0 &  \cdots & 0
\cr 0 & \lambda_2 ^K& 0 & \cdots & 0 \cr \vdots & \vdots & \vdots
& \cdots & \vdots \cr 0 & 0 & 0 & \cdots & \lambda_n ^K
\end{bmatrix}. $$In particular, if $B$ is similar to $A$ then
$$B^K = \underbrace{(PAP^{-1})(PAP^{-1})\cdots (PAP^{-1})}_{K\ \mathrm{factors}} = PA^KP^{-1} = P\begin{bmatrix}\lambda_1 ^K & 0 & 0 &  \cdots & 0
\cr 0 & \lambda_2 ^K& 0 & \cdots & 0 \cr \vdots & \vdots & \vdots
& \cdots & \vdots \cr 0 & 0 & 0 & \cdots & \lambda_n ^K
\end{bmatrix}P^{-1},   $$so we have a simpler way of computing
$B^K$. Our task will now be to establish when a particular square
matrix is diagonalisable.

\section{Eigenvalues and Eigenvectors}
Let $A\in\mat{n\times n}{ \BBF }$ be a square diagonalisable matrix.
Then there exist $P\in\gl{n}{ \BBF }$ and a diagonal matrix
$D\in\mat{n\times n}{ \BBF }$ such that $P^{-1}AP = D$, whence $AP =
DP$. Put
$$D =
\begin{bmatrix}\lambda_1 & 0 & 0 &  \cdots & 0 \cr 0 & \lambda_2 &
0 & \cdots & 0 \cr \vdots & \vdots & \vdots & \cdots & \vdots \cr
0 & 0 & 0 & \cdots & \lambda_n
\end{bmatrix}, \ \ \ P = [P_1; P_2; \cdots  ; P_n],  $$where the
$P_k$ are the columns of $P$. Then
$$AP=DP \implies [AP_1; AP_2; \cdots ; AP_n] = [\lambda_1P_1; \lambda_2P_2; \cdots ; \lambda_nP_n],   $$
from where it follows that $AP_k = \lambda_kP_k$. This motivates
the following definition.
\begin{df}
Let $V$ be a finite-dimensional vector space over a field $\BBF$ and
let  $T:V \rightarrow V$ be a linear transformation.  A scalar
$\lambda \in \BBF$ is called an {\em eigenvalue} of $T$ if there is
a $\v{v}\neq \v{0}$ (called an {\em eigenvector}) such that
$T(\v{v}) = \lambda\v{v}$. \index{eigenvalue} \index{eigenvector}
\end{df}
\begin{exa}
Shew that if $\lambda$ is an eigenvalue of $T:V \rightarrow V$, then
$\lambda ^k$ is an eigenvalue of $T^k:V \rightarrow V$, for
$k\in\BBN\setminus \{0\} $.
\end{exa}\begin{solu}Assume that $T(\v{v}) =
\lambda\v{v}$. Then $$T^2(\v{v}) = TT(\v{v}) = T(\lambda\v{v}) =
\lambda T(\v{v}) = \lambda(\lambda\v{v}) =
\lambda^2\v{v}.$$Continuing the iterations we obtain $T^k(\v{v}) =
\lambda^k \v{v}$, which is what we want.
\end{solu}

\begin{thm}
Let $A\in\mat{n\times n}{ \BBF }$ be the matrix representation of
$T:V \rightarrow V$. Then $\lambda\in \BBF$ is an eigenvalue of $T$
if an only if $\det (\lambda {\bf I}_n - A) = 0_{\BBF }.$
\end{thm}
\begin{pf}
$\lambda$ is an eigenvalue of $A$ $\iff$ there is $\v{v} \neq
\v{0}$ such that $A\v{v} = \lambda\v{v}$ $\iff$ $\lambda \v{v} -
A\v{v} = \v{0}$ $\iff$ $\lambda {\bf I}_n \v{v} - A\v{v}= \v{0}$
 $\iff$ $\det (\lambda {\bf I}_n - A) = 0_{\BBF }$ by Corollary \ref{cor:0determinant_non_null_kernel}.\end{pf}


\begin{df}
The equation $$\det (\lambda {\bf I}_n - A) = 0_{\BBF }
$$is called the {\em characteristic equation of $A$} or {\em secular equation of
$A$}. The polynomial $p(\lambda) = \det (\lambda {\bf I}_n - A)$
is the characteristic polynomial of $A$. \index{characteristic
equation}
\end{df}
\begin{exa}
Let $\dis{A =
\begin{bmatrix} 1 & 1 & 0 & 0  \cr 1 & 1 & 0 & 0 \cr 0 & 0 & 1 & 1 \cr 0 & 0 & 1 & 1 \cr \end{bmatrix}}$. Find
\begin{dingautolist}{202}
\item  The characteristic polynomial of $A$. \item  The
eigenvalues of $A$.  \item  The corresponding eigenvectors.
\end{dingautolist}
\end{exa}
\begin{solu}We have
\begin{dingautolist}{202}
\item $$\begin{array}{lll} \det (\lambda {\bf I}_4 - A) & = &
\det\begin{bmatrix} \lambda -1 & -1 & 0 & 0  \cr -1 & \lambda -1 &
0 & 0 \cr 0 & 0 & \lambda -1 & -1 \cr 0 & 0 & -1 & \lambda -1
\cr\end{bmatrix} \\
& = & (\lambda -1)\det \begin{bmatrix}\lambda - 1 & 0 & 0 \cr 0 &
\lambda -1 & -1 \cr 0 & -1 & \lambda -1 \cr
\end{bmatrix} + \det\begin{bmatrix} -1 & 0 & 0 \cr 0 & \lambda -1  & -1 \cr 0 & -1 & \lambda
-1\end{bmatrix}\\
& = & (\lambda -1)((\lambda -1)((\lambda -1)^2-1)) + (-((\lambda -1)^2 -1)) \\
& = & (\lambda -1)((\lambda -1)(\lambda -2)(\lambda)) - (\lambda
-2)(\lambda) \\
& = & (\lambda -2)(\lambda)((\lambda -1)^2-1) \\
& = & (\lambda -2)^2(\lambda)^2 \\
\end{array}$$
\item The eigenvalues are clearly $\lambda = 0$ and $\lambda = 2$.
\item If $\lambda = 0$, then
$$\begin{array}{lll}0 {\bf I}_4 - A & = & \begin{bmatrix} -1 & -1 & 0 & 0  \cr -1 & -1 & 0 & 0 \cr 0 & 0 & -1 & -1 \cr 0 & 0 & -1 & -1 \cr \end{bmatrix}.\\
   \end{array}$$This matrix has row-echelon form
$$\begin{bmatrix} -1 & -1 & 0 & 0  \cr  0 & 0 & -1 & -1 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0
\cr\end{bmatrix},
$$and if

$$\begin{bmatrix} -1 & -1 & 0 & 0  \cr  0 & 0 & -1 & -1 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0
\cr\end{bmatrix}\colvec{a\\ b \\ c\\ d} = \colvec{0 \\ 0\\ 0 \\
0},
$$  then $c = -d$ and $a = -b$

Thus the general solution of the system $(0 {\bf I}_4 - A)X = {\bf
0}_{n\times 1}$ is $$\colvec{a\\ b\\ c\\ d} = a\colvec{1
\\ -1 \\ 0 \\ 0} + c\colvec{0 \\ 0 \\ 1 \\ -1}.
$$
If $\lambda = 2$, then
$$\begin{array}{lll}2 {\bf I}_4 - A & = & \begin{bmatrix} 1 & -1 & 0 & 0  \cr -1 & 1 & 0 & 0 \cr 0 & 0 & 1 & -1 \cr 0 & 0 & -1 & 1 \cr \end{bmatrix}.\\
   \end{array}$$This matrix has row-echelon form
$$\begin{bmatrix} -1 & 1 & 0 & 0  \cr  0 & 0 & 1 & -1 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0
\cr\end{bmatrix},
$$and if

$$\begin{bmatrix} 1 & -1 & 0 & 0  \cr  0 & 0 & -1 & 1 \cr 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0
\cr\end{bmatrix}\colvec{a\\ b \\ c\\ d} = \colvec{0 \\ 0\\ 0 \\
0},
$$  then $c = d$ and $a = b$

Thus the general solution of the system $(2 {\bf I}_4 - A)X = {\bf
0}_{n\times 1}$ is $$\colvec{a\\ b\\ c\\ d} = a\colvec{1
\\ 1 \\ 0 \\ 0} + c\colvec{0 \\ 0 \\ 1 \\ 1}.
$$
Thus for $\lambda = 0$ we have the eigenvectors $$\colvec{1
\\ -1 \\ 0 \\ 0},  \colvec{0 \\ 0 \\ 1 \\ -1}  $$and for $\lambda = 2$ we have the eigenvectors $$\colvec{1
\\ 1 \\ 0 \\ 0},  \colvec{0 \\ 0 \\ 1 \\ 1}.  $$
\end{dingautolist}
\end{solu}
\begin{thm}
If $\lambda = 0_{\BBF }$ is an eigenvalue of $A$, then $A$ is
non-invertible.
\end{thm}
\begin{pf}
Put  $p(\lambda) = \det(\lambda {\bf I}_n - A)$. Then $p(0_{\BBF })
= \det(-A) = (-1)^n\det A$ is the constant term of the
characteristic polynomial. If $\lambda = 0_{\BBF }$ is an eigenvalue
then $$ p(0_{\BBF }) = 0_{\BBF } \implies \det A = 0_{\BBF }, $$and
hence $A$ is non-invertible by Theorem
\ref{thm:systems_determinants_invertibility}.
\end{pf}


\begin{thm}
Similar matrices have the  same characteristic polynomial.
\end{thm}
\begin{pf}  We have
$$\begin{array}{lll}\det (\lambda {\bf I}_n
-SAS^{-1} ) & = & \det (\lambda S{\bf I}_nS^{-1} -SAS^{-1} ) \\
& = & \det S(\lambda {\bf I}_n-A)S^{-1} \\
& = &(\det S)(\det(\lambda {\bf I}_n-A))(\det S^{-1}) \\
& = & (\det S)(\det(\lambda {\bf I}_n-A))\left(\dfrac{1}{\det
S}\right) \\
& = & \det(\lambda {\bf I}_n-A),
\end{array}
$$from where the result follows.\end{pf}

\section*{\psframebox{Homework}}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Find the eigenvalues and eigenvectors of $\dis{A =
\begin{bmatrix} 1 & -1 \cr -1 & 1 \cr  \end{bmatrix}}$
\begin{answer}We have $$\det (\lambda {\bf I}_2 - A)  = \det
\begin{bmatrix} \lambda -1 & 1 \cr 1 & \lambda - 1 \cr
\end{bmatrix} = (\lambda -1)^2 - 1 = \lambda (\lambda -2),
$$whence the eigenvalues are $0$ and $2$. For $\lambda = 0$ we
have $$ 0 {\bf I}_2 - A =   \begin{bmatrix} -1 & 1 \cr 1 & -1 \cr
\end{bmatrix}.$$This has row-echelon form
$$\begin{bmatrix} 1 & -1 \cr 0 & 0 \cr
\end{bmatrix}.$$
If
$$\begin{bmatrix} 1 & -1 \cr 0 & 0 \cr
\end{bmatrix}\colvec{a\\ b} = \colvec{0 \\ 0}$$then $a = b$.
Thus $$\colvec{a\\ b} = a\colvec{1 \\ 1}   $$ and we can take
$\dis{\colvec{1\\ 1}}$ as the eigenvector corresponding  to
$\lambda = 0$. Similarly, for $\lambda = 2,$  $$ 2 {\bf I}_2 - A =
\begin{bmatrix} 1 & 3 \cr 1 & 3 \cr
\end{bmatrix},$$which  has row-echelon form
$$\begin{bmatrix} 1 &  3\cr 0 & 0 \cr
\end{bmatrix}.$$
If
$$\begin{bmatrix} 1 & 3 \cr 0 & 0 \cr
\end{bmatrix}\colvec{a\\ b} = \colvec{0 \\ 0}$$then $a =-3b$.
Thus $$\colvec{a\\ b} = a\colvec{1 \\ -3}   $$ and we can take
$\dis{\colvec{1\\ -3}}$ as the eigenvector corresponding  to
$\lambda = 2$.
\end{answer}
\end{pro}
\begin{pro}
Let $A$ be a $2\times 2$ matrix over some some field $\BBF$. Prove
that the characteristic polynomial of $A$ is
$$ \lambda ^2 - (\tr{A})\lambda + \det A. $$
\end{pro}
\begin{pro}
A  matrix $A\in\mat{2\times 2}{\BBR}$ satisfies $\tr{A}=-1$ and
$\det{A}=-6$. Find the value of $\det ({\bf I}_2+A)$.
\end{pro}
\begin{pro}
A $2\times 2$ matrix $A$ with real entries has characteristic
polynomial $p(\lambda ) = \lambda ^2 + 2\lambda -1$. Find the value
of   $\det (2{\bf I}_2 + A)$.
\end{pro}

\begin{pro}
Let $\dis{A =
\begin{bmatrix}0 & 2 & -1  \cr 2 & 3 & -2  \cr -1 & -2 & 0 \cr\end{bmatrix}}$. Find
\begin{dingautolist}{202}
\item  The characteristic polynomial of $A$. \item  The
eigenvalues of $A$.  \item  The corresponding eigenvectors.
\end{dingautolist}
\begin{answer} \begin{dingautolist}{202} \item We have
$$\begin{array}{lll}\det (\lambda {\bf I}_3 - A) & = &
\det\begin{bmatrix}\lambda & -2 & 1  \cr -2 & \lambda- 3 & 2  \cr 1 & 2 & \lambda \cr\end{bmatrix} \\
 & = & \lambda\det\begin{bmatrix} \lambda - 3 & 2 \cr 2 & \lambda \cr  \end{bmatrix} +
 2\det\begin{bmatrix} -2 & 2 \cr 1 & \lambda \cr  \end{bmatrix} + \det\begin{bmatrix} -2 & \lambda - 3\cr 1 & 2\cr  \end{bmatrix}\\
 & = & \lambda (\lambda ^2 -3\lambda - 4) + 2(-2\lambda - 2) + (-\lambda - 1)\\
 & = & \lambda (\lambda - 4)(\lambda + 1) - 5(\lambda + 1) \\
 & = & (\lambda^2 - 4\lambda - 5)(\lambda + 1) \\
 & = & (\lambda + 1)^2(\lambda - 5)\end{array}$$
\item The eigenvalues are $-1, -1, 5$. \item If $\lambda = -1$,
$$\begin{array}{lll}(-{\bf I}_3 - A)  =
\begin{bmatrix}-1 & -2 & 1 \cr
-2 & -4 & 2 \cr 1 & 2 & -1 \cr \end{bmatrix}\colvec{a \\ b\\ c} =
\colvec{0 \\ 0 \\ 0} & \iff & a = -2b + c\\ & \iff & \colvec{a\\
b\\ c} = b\colvec{-2 \\ 1 \\ 0} + c\colvec{1 \\ 0 \\ 1}.
\end{array}$$We may take as eigenvectors $\colvec{-2 \\ 1 \\ 0}, \colvec{1 \\ 0 \\
1}$, which are clearly linearly independent.

\bigskip

If $\lambda = 5$,
$$\begin{array}{lll} (5{\bf I}_3 - A)  =
\begin{bmatrix}5 & -2 & 1 \cr
-2 & 2 & 2 \cr 1 & 2 & 5 \cr \end{bmatrix}\colvec{a \\ b\\ c} =
\colvec{0 \\ 0 \\ 0} & \iff & a = -c, b = -2c,\\ & \iff & \colvec{a\\
b\\ c} = c\colvec{-1 \\ -2 \\ 1}.
\end{array}$$We may take as eigenvector $\colvec{1 \\ 2 \\ -1}$.
\end{dingautolist}
\end{answer}
\end{pro}
\begin{pro}
Describe all matrices $A\in\mat{2\times 2}{\BBR}$ having eigenvalues
$1$ and $-1$.
\begin{answer}The characteristic polynomial of $A$ must be $\lambda
^2-1$, which means that $\tr{A}=0$ and $\det A = -1$. Hence $A$ must
be of the form $\begin{bmatrix}a & c \cr b & -a   \end{bmatrix}$,
with $-a^2-bc=-1$, that is, $a^2+bc=1$.
\end{answer}
\end{pro}

\begin{pro}
Let $A\in \mat{n\times n}{\BBR}$. Demonstrate that $A$ has the same
characteristic polynomial as its transpose.
\begin{answer}
We must shew that $\det (\lambda {\bf I}_n - A)=\det (\lambda {\bf
I}_n - A)^T$. Now, recall that the determinant of a square matrix is
the same as the determinant of its transpose. Hence
$$ \det (\lambda {\bf I}_n - A) = \det ((\lambda {\bf I}_n - A)^T) = \det (\lambda {\bf I}_n ^T - A^T) = \det (\lambda {\bf I}_n - A^T),  $$
as we needed to shew.
\end{answer}
\end{pro}

\end{multicols}
\section{Diagonalisability}
In this section we find conditions for diagonalisability.

\begin{thm}
Let $\{\v{v}_1,\v{v}_2, \ldots, \v{v}_k \}\subset V$ be the
eigenvectors corresponding to the {\em different} eigenvalues
$\{\lambda_1, \lambda_2, \ldots, \lambda_k\}$ (in that order).
Then these eigenvectors are linearly independent.
\end{thm}
\begin{pf}
Let $T:V\rightarrow V$ be the underlying linear transformation. We
proceed by induction. For $k = 1$ the result is clear. Assume that
every set of $k-1$ eigenvectors that correspond to $k-1$ distinct
eigenvalues is linearly independent and let the eigenvalues
$\lambda_1, \lambda_2, \ldots , \lambda_{k-1}$ have corresponding
eigenvectors $\v{v}_1, \v{v}_2, \ldots , \v{v}_{k-1}$. Let
$\lambda$ be a eigenvalue different from the $\lambda_1,
\lambda_2, \ldots , \lambda_{k-1}$ and let its corresponding
eigenvector be $\v{v}$. If $\v{v}$ were linearly dependent of the
$\v{v}_1, \v{v}_2, \ldots , \v{v}_{k-1}$, we would have
\begin{equation}\label{pf:distinct_eigen1}x\v{v} + x_1\v{v}_1 +
x_2\v{v}_2  + \cdots + x_{k-1}\v{v}_{k-1}  = \v{0}.
\end{equation}Now $$T(x\v{v} + x_1\v{v}_1  + x_2\v{v}_2  +
\cdots + x_{k-1}\v{v}_{k-1})  = T(\v{0}) = \v{0},
$$by Theorem \ref{thm:linear_takes_0_to_0}. This implies that
\begin{equation}\label{pf:distinct_eigen2}x\lambda\v{v} + x_1\lambda_1\v{v}_1  +
x_2\lambda_2\v{v}_2  + \cdots + x_{k-1}\lambda_{k-1}\v{v}_{k-1}  =
\v{0}.\end{equation} From  \ref{pf:distinct_eigen2} take away
$\lambda$ times \ref{pf:distinct_eigen1}, obtaining
\begin{equation}\label{pf:distinct_eigen3}
x_1(\lambda_1-\lambda)\v{v}_1  + x_2(\lambda_2\v{v}_2  + \cdots +
x_{k-1}(\lambda_{k-1}-\lambda)\v{v}_{k-1}  = \v{0}
\end{equation}

Since $\lambda - \lambda_i \neq 0_{\BBF }$ \ref{pf:distinct_eigen3}
is saying that the eigenvectors $\v{v}_1, \v{v}_2, \ldots ,
\v{v}_{k-1}$ are linearly dependent, a contradiction. Thus the claim
follows for $k$ distinct eigenvalues and the result is proven by
induction.
\end{pf}
\begin{thm}A matrix $A\in\mat{n\times n}{ \BBF }$ is diagonalisable if and only if it possesses $n$ linearly
independent eigenvectors.
\end{thm}
\begin{pf}
Assume first that $A$ is diagonalisable, so there exists
$P\in\gl{n}{ \BBF }$ and $$D = \begin{bmatrix}\lambda_1 & 0 & 0 &
\cdots & 0 \cr 0 & \lambda_2 & 0 & \cdots & 0 \cr \vdots & \vdots &
\vdots & \cdots & \vdots \cr 0 & 0 & 0 & \cdots & \lambda_n
\end{bmatrix} $$ such that $$P^{-1}AP = \begin{bmatrix}\lambda_1 & 0 & 0 &  \cdots & 0
\cr 0 & \lambda_2 & 0 & \cdots & 0 \cr \vdots & \vdots & \vdots &
\cdots & \vdots \cr 0 & 0 & 0 & \cdots & \lambda_n
\end{bmatrix}.   $$
Then $$[AP_1; AP_2; \cdots ; AP_n] = AP =
P\begin{bmatrix}\lambda_1 & 0 & 0 & \cdots & 0 \cr 0 & \lambda_2 &
0 & \cdots & 0 \cr \vdots & \vdots & \vdots & \cdots & \vdots \cr
0 & 0 & 0 & \cdots & \lambda_n
\end{bmatrix} = [\lambda_1P_1; \lambda_2P_2; \cdots ; \lambda_nP_n],
$$where the $P_k$ are the columns of $P$. Since $P$ is invertible,
the $P_k$ are linearly independent by virtue of Theorems
\ref{thm:lin_ind_columns} and
\ref{thm:systems_determinants_invertibility}.

\bigskip
Conversely, suppose now that $\v{v}_1, \ldots , \v{v}_n$ are $n$
linearly independent eigenvectors, with corresponding eigenvalues
$\lambda_1, \lambda_2, \ldots , \lambda_n$. Put $$P = [\v{v}_1;
\ldots ; \v{v}_n], \ \ \ D =
\begin{bmatrix}\lambda_1 & 0 & 0 &  \cdots & 0 \cr 0 & \lambda_2 &
0 & \cdots & 0 \cr \vdots & \vdots & \vdots & \cdots & \vdots \cr
0 & 0 & 0 & \cdots & \lambda_n
\end{bmatrix}.
$$Since $A\v{v}_i = \lambda_i\v{v}_i$ we see that $AP =
PD$. Again $ P$ is invertible by Theorems
\ref{thm:lin_ind_columns} and
\ref{thm:systems_determinants_invertibility} since the $\v{v}_k$
are linearly independent. Left multiplying by $P^{-1}$ we deduce
$P^{-1}AP = D$, from where $A$ is diagonalisable.
\end{pf}
\begin{exa} \label{exa:diagonalisable3x3}Shew that the following matrix is diagonalisable:
 $$\begin{bmatrix}1 & - 1 & -1 \cr 1 &  3 & 1 \cr -3 & 1 & -1 \cr\end{bmatrix}$$and
 find a diagonal matrix $D$ and an invertible matrix $P$ such that
 $$A = PDP^{-1}.$$ \end{exa}
 \begin{solu}
 Verify that the characteristic polynomial of $A$ is
 $$\lambda^3-3\lambda^2-4\lambda+12 = (\lambda - 2)(\lambda + 2)(\lambda - 3).$$
 The eigenvector for $\lambda = -2$ is
 $$\colvec{1 \\ -1 \\ 4}.$$
 The eigenvector for $\lambda = 2$ is
 $$\colvec{-1 \\ 0 \\ 1}.$$
 The eigenvector for $\lambda = 3$ is
 $$\colvec{-1 \\ 1 \\ 1}.$$
 We may take
 $$D = \begin{bmatrix}-2 & 0 & 0 \cr  0 & 2 & 0 \cr 0 & 0 & 3 \cr\end{bmatrix}, \ \ P = \begin{bmatrix}1 & -1 & 4 \cr -1 & 0 & 1 &\cr -1 & 1 & 1 \cr\end{bmatrix}.$$
 We also find
 $$P^{-1} = \begin{bmatrix}\frac{1}{5} & -1 & \frac{1}{5} \cr   0 & -1  & 1 \cr \frac{1}{5} & 0 & \frac{1}{5} \cr \end{bmatrix}.$$
\end{solu}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}Let $A$ be a $2\times 2$ matrix with eigenvalues $1$ and $-2$ and corresponding
 eigenvectors $\dis{\begin{bmatrix}1 \cr 0\cr\end{bmatrix}}$ and $\dis{\begin{bmatrix}1 \cr
 -1\cr\end{bmatrix}}$, respectively. Determine $A^{10}.$ \begin{answer}
 Put $$D = \begin{bmatrix}1 & 0 \cr 0 & -2\end{bmatrix}, \ \ , P = \begin{bmatrix}1 & 0 \cr
 1 & -1 \cr \end{bmatrix}.$$ We find
 $$P^{-1} = \begin{bmatrix}1 & 1 \cr 0 & -1\end{bmatrix}.$$

 Since $A = PDP^{-1}$
 $$A^{10} = PD^{10}P^{-1} = \begin{bmatrix}1 & 0 \cr
 1 & -1 \cr \end{bmatrix}\begin{bmatrix}1 & 0 \cr 0 & 1024\end{bmatrix}\begin{bmatrix}1 & 1 \cr 0 & -1\end{bmatrix}
 = \begin{bmatrix}1 & -1023 \cr 0 & 1024\end{bmatrix}.
 $$
 \end{answer}
 \end{pro}
 \begin{pro}
Consider the matrix $A=\begin{bmatrix} 9  & -4 \cr 20 & -9 \cr
\end{bmatrix}$.
\begin{enumerate}
\item  Find the characteristic polynomial of $A$.
\item  Find the eigenvalues of $A$.
\item  Find the eigenvectors of $A$.
\item  If $A^{20} = \begin{bmatrix}a & b \cr c & d \cr
\end{bmatrix}$, find $a+d$.

\end{enumerate}
\begin{answer}
\noindent
\begin{enumerate}
\item $A$ has characteristic polynomial $\det\begin{bmatrix} \lambda -9  & 4 \cr -20 & \lambda+9 \cr
\end{bmatrix}=(\lambda -9)(\lambda +9)+80=\lambda ^2-1=(\lambda -1)(\lambda +1)$.
\item $(\lambda -1)(\lambda +1)=0\implies \lambda \in \{-1,1\}$.
\item For $\lambda =-1$ we have $$ \begin{bmatrix} 9  & -4 \cr 20 & -9 \cr
\end{bmatrix}\colvec{a\\ b}=-1\colvec{a\\ b}\implies 10a=4b \implies a=\dfrac{2b}{5}, $$
so we can take $\colvec{2\\ 5}$ as an eigenvector.

\bigskip

For $\lambda =1$ we have $$ \begin{bmatrix} 9  & -4 \cr 20 & -9 \cr
\end{bmatrix}\colvec{a\\ b}=1\colvec{a\\ b}\implies 8a=4b \implies a=\dfrac{b}{2}, $$
so we can take $\colvec{1\\ 2}$ as an eigenvector.


\item We can do this problem in at least three ways. The quickest is
perhaps the following.


\bigskip

Recall that a $2\times 2$ matrix has
characteristic polynomial $\lambda ^2 - (\tr{A})\lambda+\det A$.
Since $A$ has eigenvalues $-1$ and $1$, $A^{20}$ has eigenvalues
$1^{20}=1$ and $(-1)^{20}=1$, i.e., the sole of $A^{20}$ is $1$
and so $A^{20}$ has characteristic polynomial $(\lambda
-1)^2=\lambda ^2 -2\lambda + 1$. This means that $-\tr{A^{20}}=-2$
and so $\tr{A^{20}}=2$.


\bigskip

The direct way would be to argue that
$$\begin{array}{lll}
A^{20} & = & \begin{bmatrix} 2 & 1 \cr 5 &
2\end{bmatrix}\begin{bmatrix} -1 & 0 \cr 0 & 1\end{bmatrix}^{20}
\begin{bmatrix} 2 & 1 \cr 5 & 2\end{bmatrix} ^{-1} \\
& = &\begin{bmatrix} 2 & 1 \cr 5 & 2\end{bmatrix}\begin{bmatrix} 1 &
0 \cr 0 & 1\end{bmatrix}
\begin{bmatrix} 2 & 1 \cr 5 & 2\end{bmatrix}^{-1}\\
& = & \begin{bmatrix} 1 & 0 \cr 0 & 1\end{bmatrix},
\end{array}$$and so $a+d=2$.
One may also use the fact that $\tr{XY}=\tr{YX}$ and hence
$$\tr{A^{20}}=\tr{PD^{20}P^{-1}}=\tr{PP^{-1}D^{20}}=\tr{D^{20}}=2. $$
\end{enumerate}

\end{answer}
 \end{pro}
\begin{pro} Let $A\in\mat{3\times 3}{\BBR}$ have characteristic polynomial $$(\lambda + 1)^2(\lambda - 3).$$
One of the eigenvalues has two eigenvectors $\begin{bmatrix}1 \cr
0 \cr 0\cr\end{bmatrix}$ and $\begin{bmatrix}1 \cr 1\cr 0 \cr
\end{bmatrix}$. The other eigenvalue has corresponding eigenvector
$\colvec{1 \\ 1 \\ 1}$. Determine $A$.\begin{answer} Put $$D =
\begin{bmatrix}-1 & 0 & 0 \cr 0 & -1 & 0 \cr 0 & 0 & 3 \cr
\end{bmatrix}, \ \ \ X = \begin{bmatrix}1 & 1 & 1 \cr 0 & 1 & 1 \cr 0
& 0 & 1 \cr
\end{bmatrix}.$$ Then we know that $A = XDX^{-1}$ and so we need to
find $X^{-1}$. But this is readily obtained by performing $R_1 -
R_2 \rightarrow R_1$ and $R_2 - R_3 \rightarrow R_3$ in the
augmented matrix
$$\begin{bmat}{ccc|ccc}1 & 1 & 1 & 1 & 0 & 0 \cr
 0 & 1 & 1 & 0 & 1 & 0 \cr 0
& 0 & 1 & 0 & 0 & 1 \cr\end{bmat},  $$getting
$$X^{-1} = \begin{bmatrix}1 & -1 & 0 \cr 0 & 1 & -1 \cr 0 & 0 & 1 \cr \end{bmatrix}.$$
Thus $$\begin{array}{lll} A & =  & XDX^{-1} \\
& = & \begin{bmatrix}1 & 1 & 1 \cr 0 & 1 & 1 \cr 0 & 0 & 1 \cr
\end{bmatrix} \begin{bmatrix}-1 & 0 & 0 \cr 0 & -1 & 0 \cr 0
& 0 & 3 \cr
\end{bmatrix}\begin{bmatrix}1 & -1 & 0 \cr 0 & 1 & -1 \cr 0 & 0 & 1 \cr
\end{bmatrix}\\
& = &\begin{bmatrix}-1 & 0 & 4 \cr 0 & -1 & 4 \cr 0 & 0 & 3 \cr
\end{bmatrix}.
\end{array}$$
\end{answer}
\end{pro}
\begin{pro}
Let
$$A =
\begin{bmatrix} 0 & 0 & 0 & 1 \cr 0 & 0 & 1 & 0 \cr 0 & 1 & 0 & 0
\cr 1 & 0 & 0 & 0 \cr\end{bmatrix}.$$
\begin{enumerate}
\item  Find $\det A$. \item  Find $A^{-1}$.

\item  Find $\rank{A - {\bf I}_4}$. \item  Find $\det (A - {\bf
I}_4)$. \item  Find the characteristic polynomial of $A$. \item
 Find the eigenvalues of $A$. \item  Find the
eigenvectors of $A$. \item  Find $A^{10}$.


\end{enumerate}
\begin{answer}
The determinant is $1$, $A = A^{-1}$, and the characteristic
polynomial is $(\lambda ^2 - 1)^2$.
\end{answer}
\end{pro}
\begin{pro}
Consider the matrix $$A = \begin{bmatrix} 1 & a & 1 \cr 0 & 1 & b
\cr 0 & 0  & c   \cr
\end{bmatrix}.  $$
\begin{dingautolist}{202}
\item  Find the characteristic polynomial of $A$.
\item  Explain whether $A$ is diagonalisable when $a=0,
c=1$.
\item  Explain whether $A$ is diagonalisable when $a\neq 0,
c=1$.
\item  Explain whether $A$ is diagonalisable when $
c\neq 1$.

\end{dingautolist}
\end{pro}
\begin{pro}
Find a closed formula for $A^n$, if
$$ A = \begin{bmatrix} -7 & -6 \cr 12 & 10 \cr  \end{bmatrix} .$$
\begin{answer}
We find
$$ \det (\lambda {\bf I}_2-A)= \det  \begin{bmatrix} \lambda +7 & 6 \cr -12 & \lambda -10 \cr  \end{bmatrix} = \lambda ^2-3\lambda +2=(\lambda-1)(\lambda-2).  $$
A short calculation shews that the eigenvalue $\lambda =2$ has
eigenvector $\colvec{2\\ -3}$ and that the eigenvalue $\lambda =1$
has eigenvector $\colvec{3\\ -4}$. Thus we may form $$D =
\begin{bmatrix} 2 & 0 \cr 0 & 1\cr \end{bmatrix}, \quad P =\begin{bmatrix} 2 & -3 \cr 3 & -4\cr \end{bmatrix}
, \quad P^{-1}=\begin{bmatrix} 4 & -3 \cr 3 & 1\cr \end{bmatrix}.$$
This gives
$$ A=PDP^{-1} \implies A^n = PD^nP^{-1} = \begin{bmatrix} 2 & -3 \cr 3 & -4\cr \end{bmatrix}\begin{bmatrix} 2^n & 0 \cr 0 & 1\cr \end{bmatrix}\begin{bmatrix} 4 & -3 \cr 3 & 1\cr \end{bmatrix}
=\begin{bmatrix} -8\cdot 2^n+9 &  -6\cdot 2^n+6 \cr 12\cdot 2^n-12 &
9\cdot 2^n-8\end{bmatrix}.  $$
\end{answer}

\end{pro}

\begin{pro}
Let $U\in\mat{n\times n}{\BBR}$ be a square matrix all whose entries
are equal to $1$.
\begin{enumerate}
\item  Demonstrate that $U^2 = nU$. \item  Find $\det U$.\item
Prove that $\det(\lambda {\bf I}_n - U) = \lambda ^{n - 1}(\lambda
- n).$ \item  Shew that $\dim\ker U = n - 1$. \item  Shew that
$$U = P\begin{bmatrix}n & 0 & \cdots & 0 \cr 0 & 0 & \cdots & 0 \cr \vdots & \vdots & \vdots & \vdots \cr
0 & 0 & \cdots & 0 \cr\end{bmatrix} P^{-1}, $$where
$$P = \begin{bmatrix}1 & 1 & 0 & \cdots & 0 & 0 \cr 1 & 0 & 1 & \cdots & 0 & 0 \cr
1 & 0 & 0 & \ddots & \vdots & \vdots \cr \vdots & \vdots & \vdots
& \ddots & \vdots & \vdots \cr  1 & 0 & 0 & \cdots & 0 & 1 \cr 1 &
-1 & -1 & \cdots & -1 & -1 \cr\end{bmatrix}.$$
\end{enumerate}


\end{pro}
\end{multicols}
\section{Theorem of Cayley and Hamilton }

\begin{thm}[Cayley-Hamilton] A matrix $A\in\mat{n}{\BBF}$ satisfies
its characteristic polynomial.
\end{thm}
\begin{pf}
Put $B = \lambda {\bf I}_n - A$. We can write $$\det B = \det
(\lambda {\bf I}_n - A) = \lambda ^n + b_1\lambda ^{n - 1} +
b_2\lambda ^{n - 2}+ \cdots +  b_n,$$as $\det (\lambda {\bf I}_n -
A)$ is a polynomial of degree $n$.

\bigskip

Since $\adj{B}$ is a matrix obtained by using $(n-1)\times (n-1)$
determinants from $B$, we may write
$$\adj{B} = \lambda ^{n-1}B_{n-1} + \lambda ^{n-2}B_{n-2} + \cdots + B_0.  $$
Hence

$$\det (\lambda {\bf I}_n - A){\bf I}_n = (B)(\adj{B}) = (\lambda {\bf I}_n - A)(\adj{B}),$$
from where $$\lambda ^n{\bf I}_n + b_1{\bf I}_n\lambda ^{n - 1} +
b_2{\bf I}_n\lambda ^{n - 2}+ \cdots +  b_n{\bf I}_n  = (\lambda
{\bf I}_n - A)( \lambda ^{n-1}B_{n-1} + \lambda ^{n-2}B_{n-2} +
\cdots + B_0). $$ By equating coefficients we deduce

$$\begin{array}{lll} {\bf I}_n & = & B_{n-1}\\
b_1{\bf I}_n & = & -AB_{n-1}+B_{n-2}\\
b_2{\bf I}_n & = & -AB_{n-2}+B_{n-3}\\
& \vdots & \\
b_{n-1}{\bf I}_n & = & -AB_1 + B_0 \\
b_{n}{\bf I}_n & = & -AB_0. \\



  \end{array} $$
Multiply now the $k$-th row by $A^{n-k}$ (the first row appearing
is really the $0$-th row):
$$\begin{array}{lll} A^{n} & = & A^{n}B_{n-1}\\
b_1A^{n-1} & = & -A^{n}B_{n-1}+A^{n-1}B_{n-2}\\
b_2A^{n-2} & = & -A^{n-1}B_{n-2}+A^{n-2}B_{n-3}\\
& \vdots & \\
b_{n-1}A & = & -A^2B_1 + AB_0 \\
b_{n}{\bf I}_n & = & -AB_0. \\
  \end{array} $$
Add all the rows and through telescopic cancellation obtain
$$A^n + b_1A^{n-1} + \cdots + b_{n-1}A + b_n{\bf I}_n= {\bf 0}_n,    $$
from where the theorem follows.
\end{pf}
\begin{exa}
From example \ref{exa:diagonalisable3x3} the matrix
 $$\begin{bmatrix}1 & - 1 & -1 \cr 1 &  3 & 1 \cr -3 & 1 & -1 \cr\end{bmatrix}$$
has characteristic polynomial $$(\lambda - 3)(\lambda - 2)(\lambda
+ 2)= \lambda^3-3\lambda^2-4\lambda+12,$$hence the inverse of this
matrix can be obtained by observing that

$$A^3 - 3A^2 - 4A + 12{\bf I}_3  = {\bf 0}_3 \implies A^{-1} = -\dfrac{1}{12}\left(A^2 - 3A - 4{\bf I}_3\right) = \left[ \begin {array}{ccc} 1/3&1/6&-1/6\\\noalign{\medskip}1/6&1/3&1/6\\\noalign{\medskip}-5/6&-1/6&-1/3\end {array}
\right] .
    $$


\end{exa}

\section*{\psframebox{Homework}}
\begin{pro}
A $3\times 3$ matrix $A$ has characteristic polynomial $\lambda
(\lambda - 1)(\lambda + 2)$. What is the characteristic polynomial
of $A^2$?
\begin{answer}
The eigenvalues of $A$ are $0$, $1$, and $-2$. Those of $A^2$ are
$0$, $1$, and $4$. Hence, the characteristic polynomial of $A^2$ is
$\lambda (\lambda - 1)(\lambda - 4)$.
\end{answer}

\end{pro}





\chapter{Linear Algebra and Geometry}
\section{Points and Bi-points in \protect$\BBR^2\protect$}
$\BBR^2$ is the set of all points $A = \colpoint{a_1\\ a_2}$ with
real number coordinates on the plane, as in figure
\ref{fig:pointinr2}. We use the notation $\zeropoint = \colpoint{0\\
0}$ to denote the {\em origin}.

\vspace{2cm}
\begin{figure}[h]
$$\psset{unit=2pc}\psline[linewidth=2pt, linecolor=red]{->}(-3,0)(3,
0)\uput[r](3,0){x} \psline[linewidth=2pt,
linecolor=blue]{->}(0,-3)(0, 3)\uput[u](0,3){y}
\psdots[dotstyle=*,dotscale=1.5](0,0)(1.5,1.5)\uput[ur](1.5, 1.5){A
= \colpoint{a_1\\ a_2}} \uput[ur](0, 0){\zeropoint}
\psline[linestyle=dashed](0,1.5)(1.5,
1.5)\psline[linestyle=dashed](1.5, 0)(1.5, 1.5)
$$\vspace{2cm}\footnotesize\hangcaption{Rectangular coordinates in $\BBR^2$.}
 \label{fig:pointinr2}\end{figure}


\bigskip
Given $A = \colpoint{a_1\\ a_2}\in\BBR^2$ and $B = \colpoint{b_1\\
b_2}\in\BBR^2$ we define their addition as

\begin{equation} A + B = \colpoint{a_1\\ a_2} + \colpoint{b_1\\ b_2} = \colpoint{a_1 + b_1\\ a_2 + b_2}.       \label{eq:addition_in_r2}\end{equation}
Similarly, we define the scalar multiplication of a point of
$\BBR^2$ by the scalar $\alpha\in\BBR$ as
\begin{equation} \alpha A= \alpha \colpoint{a_1\\ a_2} = \colpoint{\alpha a_1\\ \alpha a_2}.       \label{eq:multiplication_in_r2}\end{equation}

\begin{rem}
Throughout this chapter, unless otherwise noted, we will use the
convention that a point $A\in \BBR^2$ will have its coordinates
named after its letter, thus $$ A = \colpoint{a_1\\ a_2}.$$

\end{rem}


\begin{df}
Consider the points $A\in\BBR^2, B\in \BBR^2$. By the {\em bi-point}
starting at $A$ and ending at $B$, denoted by $\bipoint{A}{B}$, we
mean the directed line segment from $A$ to $B$. We define
$$\bipoint{A}{A} = \zeropoint = \colpoint{0\\ 0}.$$ \index{bi-point}
\end{df}
\begin{rem}
The bi-point $\bipoint{A}{B}$ can be thus interpreted as an arrow
starting at $A$ and finishing, with the arrow tip, at $B$. We say
that $A$ is the {\em tail} of the bi-point $\bipoint{A}{B}$ and
that $B$ is its {\em head}. Some authors use the terminology
``{\em fixed vector}'' instead of ``bi-point.''
\end{rem}


\begin{df}
Let $A \neq B$ be points on the plane and let $L$ be the line
passing through $A$ and $B$. The {\em direction} of the bi-point
$\bipoint{A}{B}$ is the direction of the line $L$, that is, the
angle $\theta \in \left]-\frac{\pi}{2}; \frac{\pi}{2}\right]$ that
the line $L$ makes with the horizontal. See figure
\ref{fig:bipointdirection}.
\end{df}\begin{df}
Let $A, B$ lie on line $L$, and let $ C, D$ lie on line $L'$. If
$L||L'$ then we say that $\bipoint{A}{B}$ has the same direction
as $\bipoint{C}{D}$.  We say that the bi-points $\bipoint{A}{B}$
and $\bipoint{C}{D}$ have the {\em same sense} if they have the
same direction and if both their heads lie on the same half-plane
made by  the line joining their tails. They have {\em opposite
sense } if they have the same direction and if both their heads
lie on alternative half-planes made by the line joining their
tails. See figures \ref{fig:bipointsense1} and
\ref{fig:bipointsense2} .
\end{df}
\vspace{2cm}
\begin{figure}[h]
\hfill
\begin{minipage}{3cm}$$\psset{unit=.7pc} \psline(-5, -5)(5, 5)\psline(-6, -3)(6,
-3) \psdots[dotstyle=*, dotscale=1.5](-3, -3) \psarc{->}(-3,
-3){2}{0}{45}\uput[r](-2.5,-2.5){\theta} \psline[linewidth=2pt,
linecolor=red]{*->}(-1, -1)(2, 2) \uput[r](-1,-1){A}\uput[r](2, 2
){B}$$\vspace{2cm}\scriptsize\hangcaption{Direction of a
bi-point}\label{fig:bipointdirection} \end{minipage} \hfill
\begin{minipage}{3cm}$$\psset{unit=.7pc}  \psline[linewidth=2pt,
linecolor=red]{*->}(-1, -1)(2, 2) \psline[linewidth=2pt,
linecolor=blue]{*->}(2, -3)(5, 0) \uput[r](-.7,-1){A}\uput[r](2, 2
){B}\uput[r](2,-3){C}\uput[r](5, 0 ){D}\psline(-1,
-1)(2,-3)$$\vspace{2cm}\scriptsize\hangcaption{Bi-points with the
same sense. }\label{fig:bipointsense1}\end{minipage} \hfill
\begin{minipage}{3cm}$$\psset{unit=.7pc} \psline[linewidth=2pt,
linecolor=red]{*->}(-1, -1)(2, 2)\psline[linewidth=2pt,
linecolor=blue]{*->}(2, -3)(-1, -6) \uput[r](-.7,-1){A}\uput[r](2,
2 ){B}\uput[r](2,-3){C}\uput[r](-1, -6 ){D}\psline(2, -3)(-1,
-1)$$\vspace{2cm}\scriptsize\hangcaption{Bi-points with opposite
sense. }\label{fig:bipointsense2}\end{minipage}\hfill
\end{figure}


\begin{rem}
Bi-point $\bipoint{B}{A}$ has the opposite sense of
$\bipoint{A}{B}$ and so we write
$$\bipoint{B}{A} = -\bipoint{A}{B}.$$
\end{rem}
\begin{df}\index{norm!of a bi-point}\index{norm!unit}
Let $A \neq B$. The {\em Euclidean length or norm} of bi-point
$\bipoint{A}{B}$ is simply the distance between $A$ and $B$ and it
is denoted by
$$\norm{\bipoint{A}{B}} = \sqrt{(a_1 - b_1)^2 + (a_2 - b_2)^2.}$$ We define
$$\norm{\bipoint{A}{A}}= \norm{\zeropoint} = 0.$$ A bi-point is
said to have {\em unit length} if it has norm $1$.
\end{df}
\begin{rem}
A bi-point is completely determined by three things: (i) its norm,
(ii) its direction, and (iii) its sense.
\end{rem}


\begin{df}[Chasles' Rule]
Two bi-points are said to be {\em contiguous} if one has as tail
the head of the other. In such case we define the sum of
contiguous bi-points $\bipoint{A}{B}$ and $\bipoint{B}{C}$ by {\em
Chasles' Rule} \index{Chasles' Rule}
$$\bipoint{A}{B} + \bipoint{B}{C} = \bipoint{A}{C}.$$
See figure \ref{fig:addbipoints}.\end{df}
\begin{df}[Scalar Multiplication of Bi-points] Let $\lambda \in \BBR \setminus \{0\}$ and $A \neq B$.
We define $$0\bipoint{A}{B} = \zeropoint$$ and
$$\lambda\bipoint{A}{A} = \zeropoint .$$ We  define
$\lambda\bipoint{A}{B}$ as follows.
\begin{enumerate}
\item $\lambda\bipoint{A}{B}$ has the direction of
$\bipoint{A}{B}$. \item $\lambda\bipoint{A}{B}$ has the sense of
$\bipoint{A}{B}$ if $\lambda > 0$ and sense opposite
$\bipoint{A}{B}$ if $\lambda < 0$. \item $\lambda\bipoint{A}{B}$
has norm $|\lambda|\norm{\bipoint{A}{B}}$ which is a contraction
of $\bipoint{A}{B}$ if $0 < |\lambda| < 1$ or  a dilatation of
$\bipoint{A}{B}$ if $|\lambda| > 1$.
\end{enumerate}  See figure \ref{fig:scalarmul_1} for some
examples.
\end{df}

\vspace{2cm}
\begin{figure}[h]\begin{minipage}{6cm}
$$\psset{unit=.5pc} \rput(-2,0){\psline[linewidth=2pt,
linecolor=red]{->}(-3,3)(3, 5) \psline[linewidth=2pt,
linecolor=blue]{->}(3,5)(9, 2) \uput[l](-3, 3
){A}\uput[u](3,5){B}\uput[r](9, 2 ){C}\psline[linewidth=2pt,
linecolor=green]{->}(-3,3)(9, 2)}
$$\vspace{1cm}\footnotesize\hangcaption{Chasles' Rule.}\label{fig:addbipoints}
\end{minipage}\hfill\begin{minipage}{6cm}$$\psset{unit=.5pc}
\psline[linewidth=2pt, linecolor=red]{->}(-3,3)(3, 5)\uput[r](3,
5.5){\bipoint{A}{B}} \psline[linewidth=2pt,
linecolor=green]{->}(0,0)(3, 1)\uput[d](5,
.5){\frac{1}{2}\bipoint{A}{B}} \psline[linewidth=2pt,
linecolor=blue]{->}(6, 10)(-6,6)\uput[u](6, 10){-2\bipoint{A}{B}}
$$\vspace{1cm}\footnotesize\hangcaption{Scalar multiplication of bi-points.}\label{fig:scalarmul_1}
\end{minipage}\end{figure}

\section{Vectors in $\BBR^2$}

\begin{df}[Midpoint] \index{midpoint}
Let $A, B$ be points in $\BBR^2$. We define the {\em midpoint} of
the bi-point $\bipoint{A}{B}$ as
$$\frac{A + B}{2} = \colpoint{\frac{a_1 + b_1}{2}\\ \frac{a_2 + b_2}{2}}.$$
\end{df}

\begin{df}[Equipollence] \index{equipollence}
Two bi-points $\bipoint{X}{Y}$ and $\bipoint{A}{B}$ are said to be
{\em equipollent} written $\bipoint{X}{Y} \sim \bipoint{A}{B}$ if
the midpoints of the bi-points $\bipoint{X}{B}$ and
$\bipoint{Y}{A}$ coincide, that is,
$$ \bipoint{X}{Y} \sim \bipoint{A}{B} \Leftrightarrow \frac{{X} + B}{2} = \frac{{Y} +
A}{2}.$$ See figure
\ref{fig:equibi-points1}.\label{df:equipollence}\end{df}
 Geometrically, equipollence means
that the quadrilateral ${XYBA}$ is a parallelogram. Thus the
bi-points $\bipoint{X}{Y}$ and $\bipoint{A}{B}$ have the same
norm, sense, and direction. \vspace{2cm}
\begin{figure}[h]
$$\psset{unit=1pc}
\psline[linewidth=2pt, linecolor=red]{->}(-3,3)(3, 5)
\psline[linewidth=2pt, linecolor=blue]{->}(3,0)(9, 2)
\psline[linestyle=dashed](-3,3)(9, 2) \rput(1.5, 2.75){||}\rput(6,
2.25){||} \psline[linestyle=dashed](3,5)(3,0)\rput(3,
3.75){-}\rput(3, 1.25){-} \uput[l](-3, 3){{X}} \uput[u](3,
5){{Y}}\uput[d](3, 0){A}\uput[r](9, 2){B}
$$\caption{Equipollent bi-points.}\label{fig:equibi-points1}
\end{figure}
\begin{lem}\label{lem:condition_for_equipollence}
Two bi-points $\bipoint{X}{Y}$ and $\bipoint{A}{B}$ are
equipollent if and only if $$\colpoint{y_1 - x_1\\ y_2 - x_2} =
\colpoint{b_1 - a_1\\ b_2 - a_2}.$$
\end{lem}
\begin{pf}
This is immediate, since
 $$\begin{array}{lll}
\bipoint{X}{Y} \sim \bipoint{A}{B} & \iff & \colpoint{\frac{a_1 + y_1}{2}\vspace{2mm}\\
\frac{a_2 + y_2}{2}} = \colpoint{\frac{b_1 + x_1}{2}\\ \frac{b_2 +
x_2}{2}} \vspace{2mm}\\
& \iff & \colpoint{y_1 - x_1\\ y_2 - x_2} = \colpoint{b_1 - a_1\\
b_2 - a_2},
\end{array}$$as desired. \end{pf}
\begin{rem}
From Lemma \ref{lem:condition_for_equipollence}, equipollent
bi-points have the same norm, the same direction, and the same
sense.
\end{rem}


\begin{thm} \label{thm:equipollence_equiv_rel}
Equipollence is an equivalence relation.
\end{thm}
\begin{pf}
Write $\bipoint{X}{Y} \sim \bipoint{A}{B}$ if $\bipoint{X}{Y}$ if
equipollent to  $\bipoint{A}{B}$. Now  $\bipoint{X}{Y} \sim
\bipoint{X}{Y}$
since $\colpoint{y_1 - x_1\\ y_2 - x_2} = \colpoint{y_1 - x_1\\
y_2 - x_2}$ and so the relation is reflexive. Also
$$ \begin{array}{lll} \bipoint{X}{Y}
\sim \bipoint{A}{B}  &\iff & \colpoint{y_1 - x_1\\ y_2 - x_2} =
\colpoint{b_1 - a_1\\ b_2 - a_2} \vspace{2mm}\\ & \iff &  \colpoint{b_1 - a_1\\
b_2 - a_2} = \colpoint{y_1 - x_1\\ y_2 - x_2} \\ & \iff  &
 \bipoint{A}{B} \sim \bipoint{X}{Y},  \end{array}$$and the
 relation is symmetric.
 Finally
$$ \begin{array}{lll} \bipoint{X}{Y}
\sim \bipoint{A}{B} \wedge   \bipoint{A}{B} \sim \bipoint{U}{V}
&\iff & \colpoint{y_1 - x_1\\ y_2 - x_2} = \colpoint{b_1 - a_1\\
b_2 - a_2} \vspace{2mm} \\ & & \qquad  \wedge \colpoint{b_1 - a_1\\
b_2 - a_2} =
\colpoint{v_1 - u_1\\ v_2 - au_2} \vspace{2mm}\\ & \iff &  \colpoint{y_1 - x_1\\ y_2 - x_2} = \colpoint{v_1 - u_1\\
v_2 - u_2} \vspace{2mm}  \\ & \iff  &
 \bipoint{X}{Y} \sim \bipoint{U}{V},  \end{array}$$
and the relation is transitive.
\end{pf}


\begin{df}[Vectors on the Plane] \index{vector!on the plane}
The equivalence class in which the bi-point $\bipoint{X}{Y}$ falls
is called the {\em vector} (or {\em free vector}) from ${X}$ to
${Y}$, and is denoted by $\vect{XY}$. Thus we write
$$\bipoint{X}{Y} \in \vect{XY} = \colvec{y_1 - x_1 \\ y_2 - x_2}.  $$If we desire to
talk about a vector without mentioning a bi-point representative,
we write, say, $\v{v}$, thus denoting vectors with boldface
lowercase letters. If it is necessary to mention the coordinates
of $\v{v}$ we will write
$$\v{v} = \colvec{v_1
\\ v_2 }.
$$
 \label{df:disvect}\end{df}

\begin{rem}
For any point $X$ on the plane, we have $\vect{XX} = \v{0}$, the
{\em zero vector}. If $\bipoint{X}{Y}\in \v{v}$ then
$\bipoint{Y}{X}\in -\v{v}$.
\end{rem}
\begin{df}[Position Vector] \index{vector!position}
For any particular point $P = \colpoint{p_1\\ p_2}\in \BBR^2$ we may
form the vector $\vect{OP} = \colvec{p_1 \\ p_2}$. We call
$\vect{OP}$ the {\em position vector} of $P$ and we use boldface
lowercase letters to denote the equality $\vect{OP} = \v{p}$.
\end{df}

\begin{exa}
The vector into which the bi-point with tail at $A =
\colpoint{-1\\
2}$ and
head at $B = \colpoint{3\\ 4}$ falls is $$\vect{AB} = \colvec{3 - (-1)\\
4 - 2} = \colvec{4 \\ 2}.
$$
\end{exa}
\begin{exa}
The bi-points $\bipoint{A}{B}$ and $\bipoint{X}{Y}$ with $$A =
\colpoint{-1\\ 2}, B = \colpoint{3\\ 4},$$
$$X =
\colpoint{3\\ 7}, Y = \colpoint{7\\ 9}$$ represent the same vector
$$\vect{AB} = \colvec{3 - (-1)\\
4 - 2} = \colvec{4 \\ 2} = \colvec{7-3 \\ 9-7} = \vect{XY}.
$$ In fact, if $S = \colpoint{-1 + n\\ 2 + m}, T = \colpoint{3 + n \\ 4 +
m}$ then the infinite number of bi-points $\bipoint{S}{T}$ are
representatives of of the vectors $\vect{AB} = \vect{XY} =
\vect{ST}$.
\end{exa}


\index{vector!sum!on the plane}Given two vectors $\v{u}$, $\v{v}$
we define their sum $\v{u} + \v{v}$ as follows. Find a bi-point
representative $\vect{AB}\in\v{u}$ and a contiguous bi-point
representative $\vect{BC}\in\v{v}$. Then by Chasles' Rule
$$ \v{u} + \v{v} = \vect{AB} + \vect{BC} = \vect{AC}.    $$
Again, by  virtue of Chasles' Rule we then have
\begin{equation}
\vect{AB} = \vect{AO} + \vect{OB} = -\vect{OA} + \vect{OB} = \v{b}
- \v{a} \label{eq:difference_pos_vectors}
\end{equation}

\bigskip
\index{scalar multiplication!of a plane vector}Similarly we define
scalar multiplication of a vector by scaling one of its bi-point
representatives.\index{norm!of a vector}We define the norm of a
vector $\v{v}\in\BBR^2$ to be the norm of any of its bi-point
representatives.

\bigskip

Componentwise we may see that given vectors $\v{u} = \colvec{u_1
\\ u_2}$, $\v{v} = \colvec{v_1 \\ v_2}$, and a scalar
$\lambda\in\BBR$ then their sum and scalar multiplication take the
form $$ \v{u} + \v{v}= \colvec{u_1
\\ u_2} + \colvec{v_1
\\ v_2}, \ \ \ \ \lambda\v{u} = \colvec{\lambda u_1
\\ \lambda u_2}.
$$
\vspace{2cm}
\begin{figure}[h]\begin{minipage}{6cm}
$$\psset{unit=.5pc} \rput(-2,0){\psline[linewidth=2pt,
linecolor=red]{->}(-3,3)(3, 5) \psline[linewidth=2pt,
linecolor=blue]{->}(3,5)(9, 2) \uput[l](-3, 3
){A}\uput[u](3,5){B}\uput[r](9, 2 ){C}\uput[u](5.5, 3.5){\v{v}}
\uput[l](0,4.5){\v{u}} \uput[l](3.5,1){\v{u} +
\v{v}}\psline[linewidth=2pt, linecolor=green]{->}(-3,3)(9, 2)}
$$\vspace{1cm}\footnotesize\hangcaption{Addition of Vectors.}\label{fig:addvectors}
\end{minipage}\hfill\begin{minipage}{6cm}$$\psset{unit=.5pc}
\psline[linewidth=2pt, linecolor=red]{->}(-3,3)(3, 5)\uput[r](3,
5.5){\v{u}} \psline[linewidth=2pt, linecolor=green]{->}(0,0)(3,
1)\uput[d](5, .5){\frac{1}{2}\v{u}} \psline[linewidth=2pt,
linecolor=blue]{->}(6, 10)(-6,6)\uput[u](6, 10){-2\v{u}}
$$\vspace{1cm}\footnotesize\hangcaption{Scalar multiplication of vectors.}\label{fig:scalarmultvectors}
\end{minipage}\end{figure}
\begin{exa}
Diagonals are drawn in a rectangle $ABCD$. If $\vect{AB} = \v{x}$
and $\vect{AC} = \v{y}$, then $\vect{BC} = \v{y} -\v{x}$,
$\vect{CD} = -\v{x}$, $\vect{DA} = \v{x} - \v{y}$, and $\vect{BD}
= \v{y} - 2\v{x}$.
\end{exa}
\begin{df}[Parallel Vectors] Two vectors $\v{u}$ and
$\v{v}$ are said to be {\em parallel} if there is a scalar $\lambda$
such that $\v{u}=\lambda\v{v}$. If $\v{u}$ is parallel to $\v{v}$ we
write $\v{u}||\v{v}$. We denote by $\BBR\v{v} = \{\alpha \v{v}:
\alpha \in \BBR \}$, the set of all vectors parallel to $\v{v}$.
\end{df}
\begin{rem}
$\v{0}$ is parallel to every vector.
\end{rem}

\begin{df}If $\v{u} = \colvec{u_1
\\ u_2}$, then we define its {\em norm} as $\norm{\v{u}} = \sqrt{u_1 ^2 + u_2 ^2}$. The distance between two vectors $\v{u}$ and
$\v{v}$ is ${\bf d}\langle \v{u}, \v{v} \rangle = \norm{\v{u}-
\v{v}}$.
\end{df}

\begin{exa}
Let $a\in\BBR$, $a>0$ and let $\v{v} \neq \v{0}$. Find a vector with
norm $a$ and parallel to $\v{v}$.
\end{exa}
\begin{solu}Observe that $\dfrac{\v{v}}{\norm{\v{v}}}$ has norm $1$
as
$$\left|\left|\dfrac{\v{v}}{\norm{\v{v}}}\right|\right| =
\dfrac{\norm{\v{v}}}{\norm{\v{v}}} = 1.$$ Hence the vector
$a\dfrac{\v{v}}{\norm{\v{v}}}$ has norm $a$ and it is in the
direction of $\v{v}$. One may also take
$-a\dfrac{\v{v}}{\norm{\v{v}}}$.
\end{solu}



\begin{exa} If ${M}$ is the midpoint of the bi-point $\bipoint{X}{Y}$
then $\vect{XM}= \vect{MY}$ from where $\vect{XM}=
\frac{1}{2}\vect{XY}$. Moreover, if ${T}$ is any point, by
Chasles' Rule $$\begin{array}{lll}\vect{TX} + \vect{TY} &  = &
\vect{TM} + \vect{MX}  +
\vect{TM}  + \vect{MY}\\
& = & 2\vect{TM} - \vect{XM}+ \vect{MY} \\
&  = &   2\vect{TM}. \\
\end{array} $$
\label{exa:propertyofmidpoint}\end{exa}
\begin{exa} Let $\triangle {ABC}$ be a triangle on the
plane. Prove that  the line joining the midpoints of two sides of
the triangle is parallel to the third side and measures half its
length. \label{exa:medtriag_1}\end{exa} \begin{solu}Let the
midpoints of $\bipoint{A}{B}$ and $\bipoint{A}{C}$ be ${M_C}$ and
${M_B}$, respectively. We shew that $\vect{BC} = 2\vect{M_CM_B}$. We
have $2\vect{AM_C} = \vect{AB}$ and $2\vect{AM_B} = \vect{AC}$. Thus
$$\begin{array}{lll}\vect{BC} & = &
\vect{BA} + \vect{AC}  \\
& = & -\vect{AB} + \vect{AC}  \\
& = &  -2\vect{AM_C} + 2\vect{AM_B} \\
& = & 2\vect{M_CA} + 2\vect{AM_B} \\
& = & 2(\vect{M_CA} + \vect{AM_B}) \\
& = & 2\vect{M_CM_B}, \end{array}$$ as we wanted to shew.
\end{solu}
\begin{exa}
In $\triangle {ABC}$, let ${M_C}$ be the midpoint of side ${AB}$.
Shew that
$$\vect{CM_C} = \frac{1}{2}\left(\vect{CA} + \vect{CB}\right).$$
\label{exa:medtriag_2}\end{exa} \begin{solu}Since $\vect{AM_C} =
\vect{M_CB}$, we have
$$\begin{array}{lll}
\vect{CA} + \vect{CB}  & = & \vect{CM_C} + \vect{M_CA} + \vect{CM_C} + \vect{M_CB} \\
& = & 2\vect{CM_C} - \vect{AM_C} + \vect{M_CB}\\
& = & 2\vect{CM_C},
\end{array}
$$which yields the desired result.
\end{solu}
\begin{thm}[Section Formula] Let
$APB$ be a straight line and $\lambda$ and  $\mu$ be  real numbers
such that
$$\dfrac{\norm{\bipoint{A}{P}}}{\norm{\bipoint{P}{B}}} =
\dfrac{\lambda}{\mu}.
$$With $\v{a} = \vect{OA}$, $\v{b} = \vect{OB}$, and
$\v{p} = \vect{OP}$, then \begin{equation}\v{p}
=\dfrac{\lambda\v{b} + \mu\v{a}}{\lambda +
\mu}.\label{eq:section_formula}\index{section
formula}\end{equation}
\end{thm}
\begin{pf}
Using Chasles' Rule for vectors, $$\vect{AB} = \vect{AO} +
\vect{OB} = -\v{a} + \v{b},
$$
$$\vect{AP} = \vect{AO} + \vect{OP} =
-\v{a} + \v{p}.
$$Also, using Chasles' Rule for bi-points,
$$\bipoint{A}{P}\mu = \lambda (\bipoint{P}{B}) = \lambda (\bipoint{P}{A} +\bipoint{A}{B}) = \lambda (-\bipoint{A}{P} +\bipoint{A}{B}),
$$whence
$$\bipoint{A}{P} = \dfrac{\lambda}{\lambda + \mu}\bipoint{A}{B} \implies \vect{AP} = \dfrac{\lambda}{\lambda + \mu}\vect{AB} \implies \v{p} - \v{a} = \dfrac{\lambda}{\lambda + \mu}(\v{b} -\v{a}).  $$
On combining these formul\ae \ $$(\lambda + \mu)(\v{p} - \v{a}) =
\lambda (\v{b} - \v{a}) \implies (\lambda + \mu)\v{p} =
\lambda\v{b} + \mu\v{a},
$$from where the result follows.  \end{pf}

\vspace{1cm}
\begin{figure}[h]
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pscircle[linewidth=1pt](0,0){1}
\psline[linewidth=1.5pt]{o->}(0,0)(0,1)
\psline[linewidth=1.5pt]{o->}(0,0)(0.866, -0.5)
\psline[linewidth=1.5pt]{o->}(0,0)(-0.866,-0.5) \uput[u](0,1){\v{a}}
\uput[dl](-0.866, -0.5){\v{b}} \uput[dr](0.866, -0.5){\v{c}}
  $$\vspace{1cm}\footnotesize\hangcaption{[A]. Problem \ref{pro:sum_of_vectors1}.} \label{fig:sum_of_vectors1}
\end{minipage}
\hfill
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pscircle[linewidth=1pt](0,0){1}
\psline[linewidth=1.5pt]{<-o}(0,0)(0,1)
\psline[linewidth=1.5pt]{<-o}(0,0)(0.866, -0.5)
\psline[linewidth=1.5pt]{<-o}(0,0)(-0.866, -0.5)
\uput[u](0,1){\v{a}} \uput[dr](0.866, -0.5){\v{b}} \uput[dl](-0.866,
-0.5){\v{c}}
  $$\vspace{1cm}\footnotesize\hangcaption{[B]. Problem \ref{pro:sum_of_vectors1}.} \label{fig:sum_of_vectors2}
\end{minipage}\hfill
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pscircle[linewidth=1pt](0,0){1}
\psline[linewidth=1.5pt]{o->}(0,0)(1,0)
\psline[linewidth=1.5pt]{o->}(0,0)(-1,0)
\psline[linewidth=1.5pt]{o->}(0,0)(0,1)
\psline[linewidth=1.5pt]{o->}(0,0)(0,-1) \uput[u](0,1){\v{a}}
\uput[d](0, -1){\v{b}} \uput[l](-1, 0){\v{c}} \uput[r](1, 0){\v{d}}
  $$\vspace{1cm}\footnotesize\hangcaption{[C]. Problem \ref{pro:sum_of_vectors1}.} \label{fig:sum_of_vectors3}
\end{minipage}
\end{figure}

\vspace{1cm}

\begin{figure}[h]\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pscircle[linewidth=1pt](0,0){1}
\psline[linewidth=1.5pt]{<-o}(0,0)(1,0)
\psline[linewidth=1.5pt]{<-o}(0,0)(-1,0)
\psline[linewidth=1.5pt]{<-o}(0,0)(0,1)
\psline[linewidth=1.5pt]{<-o}(0,0)(0,-1) \uput[u](0,1){\v{a}}
\uput[d](0, -1){\v{b}} \uput[l](-1, 0){\v{c}} \uput[r](1, 0){\v{d}}
  $$\vspace{1cm}\footnotesize\hangcaption{[D]. Problem \ref{pro:sum_of_vectors1}.} \label{fig:sum_of_vectors4}
\end{minipage}\hfill
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pscircle[linewidth=1pt](0,0){1}
\psline[linewidth=1.5pt]{o->}(0,0)(1,0)
\psline[linewidth=1.5pt]{<-o}(0,0)(-1,0)
\psline[linewidth=1.5pt]{o->}(0,0)(0,1)
\psline[linewidth=1.5pt]{o->}(0,0)(0,-1) \uput[u](0,1){\v{a}}
\uput[d](0, -1){\v{b}} \uput[l](-1, 0){\v{c}} \uput[r](1, 0){\v{d}}
  $$\vspace{1cm}\footnotesize\hangcaption{[E]. Problem \ref{pro:sum_of_vectors1}.} \label{fig:sum_of_vectors5}
\end{minipage}\hfill
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pscircle[linewidth=1pt](0,0){1}
\psline[linewidth=1.5pt]{o->}(0,0)(0.5,-0.866)
\psline[linewidth=1.5pt]{<-o}(0,0)(-0.5,-0.866)
\psline[linewidth=1.5pt]{o->}(0,0)(0.5,0.866)
\psline[linewidth=1.5pt]{<-o}(0,0)(-0.5,0.866)
\psline[linewidth=1.5pt]{<-o}(0,0)(1,0)
\psline[linewidth=1.5pt]{o->}(0,0)(-1,0) \uput[l](-1,0){\v{a}}
\uput[r](1, 0){ \v{d}} \uput[ul](-0.5, 0.866){\v{b}} \uput[ur](0.5,
0.866){\v{c}} \uput[dl](-0.5, -0.866){\v{f}} \uput[dr](0.5,
-0.866){\v{e}}
  $$ \vspace{1cm}
\footnotesize\hangcaption{[F]. Problem \ref{pro:sum_of_vectors1}.}
\label{fig:sum_of_vectors6}
\end{minipage}
 \end{figure}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}
Let $a$ be a real number. Find the distance between $\colvec{1\\
a}$ and $\colvec{1 - a
\\ 1}$.
\begin{answer} $\sqrt{2a^2-2a + 1}$ \end{answer}
\end{pro}
\begin{pro}
Find all scalars $\lambda$ for which $\norm{\lambda\v{v}} =
\frac{1}{2}$, where $\v{v} = \colvec{1\\ -1}$.
\begin{answer}
 $\norm{\lambda\v{v}} =
\frac{1}{2} \implies \sqrt{(\lambda)^2 + (-\lambda)^2} =
\frac{1}{2} \implies 2\lambda^2 = \frac{1}{4} \implies \lambda =
\pm \dfrac{1}{\sqrt{8}}$.

\end{answer}
\end{pro}
\begin{pro}
Given a pentagon $ABCDE$, find $\vect{AB} +
\vect{BC}+\vect{CD}+\vect{DE}+\vect{EA}$.
\begin{answer}
$\v{0}$
\end{answer}
\end{pro}
\begin{pro}
For which values of $a$ will the vectors $$\v{a} = \colvec{a + 1\\
a^2 -1}, \ \ \ \ \v{b} = \colvec{2a + 5 \\ a^2 - 4a + 3}$$ will be
parallel?
\begin{answer} $a = \pm 1$ or $a = -8$.\end{answer}
\end{pro}
\begin{pro}
In $\triangle ABC$ let the midpoints of $\bipoint{A}{B}$ and
$\bipoint{A}{C}$ be ${M_C}$ and ${M_B}$, respectively. Put
$\vect{M_CB} = \v{x}$, $\vect{M_BC} = \v{y}$, and $\vect{CA} =
\v{z}$. Express [A] $\vect{AB} + \vect{BC} + \vect{M_CM_B}$, [B]
$\vect{AM_C} + \vect{M_CM_B} + \vect{M_BC}$, [C] $\vect{AC} +
\vect{M_CA} - \vect{BM_B}$ in terms of $\v{x}$, $\v{y}$, and
$\v{z}$.
\begin{answer}
[A] $2(\v{x} + \v{y}) - \frac{1}{2}\v{z} $, [B] $\v{x} + \v{y} -
\frac{1}{2}\v{z}$, [C] $-(\v{x}+\v{y} + \v{z})$
\end{answer}
\end{pro}

\begin{pro} \label{pro:sum_of_vectors1}
A circle is divided into three,  four equal, or six equal parts
(figures \ref{fig:sum_of_vectors1} through
\ref{fig:sum_of_vectors6}). Find the sum of the vectors. Assume
that the divisions start or stop at the centre of the circle, as
suggested in the figures.
  \begin{answer} [A]. $\v{0}$, [B]. $\v{0}$, [C]. $\v{0}$,
 [D]. $\v{0}$, [E]. $2\v{c} (=2\v{d})$
\end{answer}
\end{pro}


\begin{pro}
Diagonals are drawn in a square (figures \ref{fig:sum_of_vectors7}
through \ref{fig:sum_of_vectors9}). Find the vectorial sum $\v{a}
+ \v{b} + \v{c}$. Assume that the diagonals either start, stop, or
pass through the centre of the square, as suggested by the
figures.


\vspace{1cm}
\begin{figure}[htb]
\hfill\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pspolygon[linewidth=.6pt](-1,-1)(1,-1)(1,1)(-1,1)
\psline[linewidth=1.5pt]{o->}(-1,-1)(-1,1) \uput[u](-1,1){\v{a}}
\psline[linewidth=1.5pt]{o->}(-1,-1)(1,1) \uput[ur](1,1){\v{b}}
\psline[linewidth=1.5pt]{o->}(1,1)(1,-1) \uput[d](1,-1){\v{c}}
  $$\vspace{1cm}\footnotesize\hangcaption{[G].} \label{fig:sum_of_vectors7}
\end{minipage}
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pspolygon[linewidth=.6pt](-1,-1)(1,-1)(1,1)(-1,1)
\psline[linewidth=1.5pt]{<-o}(-1,1)(0,0) \uput[l](-1,1){\v{a}}
\psline[linewidth=1.5pt]{o->}(0,0)(1,1) \uput[r](1,1){\v{b}}
\psline[linewidth=1.5pt]{o->}(1,1)(1,-1) \uput[d](1,-1){\v{c}}
  $$\vspace{1cm}\footnotesize\hangcaption{[H].} \label{fig:sum_of_vectors8}
\end{minipage}\hfill
\begin{minipage}{4cm}
$$\psset{unit=2pc}
\pspolygon[linewidth=.6pt](-1,-1)(1,-1)(1,1)(-1,1)
\psline[linewidth=1.5pt]{o->}(-1,1)(1,1) \uput[r](1,1){\v{a}}
\psline[linewidth=1.5pt]{o->}(1,1)(0,0) \uput[l](-1,1){\v{b}}
\psline[linewidth=1.5pt]{o->}(1,-1)(0,0) \uput[d](1,-1){\v{c}}
  $$\vspace{1cm}\footnotesize\hangcaption{[I].} \label{fig:sum_of_vectors9}
\end{minipage}\hfill
\end{figure}
\begin{answer}
[F]. $\v{0}$,
 [G]. $\v{b}$, [H]. $2\v{0}$, [I].
$\v{0}$.
\end{answer}
\end{pro}
\begin{pro}
Prove that the mid-points of the sides of a skew quadrilateral
form the vertices of a parallelogram.
\begin{answer} Let the skew quadrilateral be $ABCD$ and let $P, Q, R,
S$ be the midpoints of $\bipoint{A}{B}$, $\bipoint{B}{C}$,
$\bipoint{C}{D}$, $\bipoint{D}{A}$, respectively. Put $\v{x} =
\v{OX}$, where $X\in \{A, B, C, D, P, Q, R, S\}$. Using the
Section Formula \ref{eq:section_formula} we have $$\v{p} =
\dfrac{\v{a} + \v{b}}{2}, \ \ \ \v{q} = \dfrac{\v{b} + \v{c}}{2},
\ \ \ \v{r} = \dfrac{\v{c} + \v{d}}{2}, \ \ \ \v{s} = \dfrac{\v{d}
+ \v{a}}{2}.
$$This gives $$\v{p} - \v{q} =
\dfrac{\v{a} - \v{c}}{2},\ \ \  \v{s} - \v{r} = \dfrac{\v{a} -
\v{c}}{2}.       $$This means that $\vect{QP} = \vect{RS}$ and so
$PQRS$ is a parallelogram since one pair of sides are equal and
parallel.
\end{answer}
\end{pro}

\begin{pro}
${ABCD}$ is a parallelogram. ${E}$ is the midpoint of
$\bipoint{B}{C}$ and ${F}$ is the midpoint of $\bipoint{D}{C}$.
Prove that $$\vect{AC} + \vect{BD} = 2\vect{BC}.$$
\begin{answer} We have $2\vect{BC} = \vect{BE} + \vect{EC}$. By
Chasles' Rule $\vect{AC} = \vect{AE} + \vect{EC}$, and $\vect{BD}
= \vect{BE} + \vect{ED}$. We deduce that
$$  \vect{AC} + \vect{BD} =  \vect{AE} + \vect{EC} + \vect{BE} + \vect{ED} =
\vect{AD} + \vect{BC}.$$ But since ${ABCD}$ is a parallelogram,
$\vect{AD} = \vect{BC}$. Hence
$$  \vect{AC} + \vect{BD} = \vect{AD} + \vect{BC} = 2\vect{BC}.$$
\end{answer}
\end{pro}

\begin{pro}
Let $A, B$ be two points on the plane. Construct two points ${I}$
and ${J}$ such that
$$\vect{IA} = -3\vect{IB}, \ \ \ \vect{JA} = -\frac{1}{3}\vect{JB},
$$and then demonstrate that for any arbitrary point ${M}$ on
the plane
$$\vect{MA} + 3\vect{MB} = 4\vect{MI}   $$ and $$3\vect{MA} + \vect{MB} = 4\vect{MJ}.
$$
\begin{answer} We have $\vect{IA} = -3\vect{IB} \iff \vect{IA} =
-3(\vect{IA} + \vect{AB}) = -3\vect{IA} - 3\vect{AB}$. Thus we
deduce $$\begin{array}{lll} \vect{IA} + 3\vect{IA}  =
 -3\vect{AB}  & \iff & 4\vect{IA} = -3\vect{AB} \\
 & \iff & 4\vect{AI} = 3\vect{AB} \\
 & \iff & \vect{AI} = \frac{3}{4}\vect{AB}.
\end{array}
$$Similarly
$$\begin{array}{lll} \vect{JA}  =
 -\frac{1}{3}\vect{JB}  & \iff & 3\vect{JA} = -\vect{JB} \\
 & \iff & 3\vect{JA} = -\vect{JA} - \vect{AB} \\
 & \iff & 4\vect{JA} = -\vect{AB} \\
  & \iff & \vect{AJ} = \frac{1}{4}\vect{AB} \\
 .
\end{array}
$$
Thus we take ${I}$ such that $\vect{AI} = \frac{3}{4}\vect{AB}$
and ${J}$ such that $\vect{AJ} = \frac{1}{4}\vect{AB}$.


\bigskip

Now
$$\begin{array}{lll} \vect{MA} + 3\vect{MB}  & = & \vect{MI} + \vect{IA} + 3\vect{IB} \\
& = &  4\vect{MI} + \vect{IA} + 3\vect{IB} \\
& = & 4\vect{MI},
 \end{array}     $$
and
$$\begin{array}{lll} 3\vect{MA} + \vect{MB}  & = & 3\vect{MJ} + 3\vect{JA} + \vect{MJ} + \vect{JB} \\
& = &  4\vect{MJ} + 3\vect{JA} + \vect{JB} \\
& = & 4\vect{MJ}.
 \end{array}     $$
\end{answer}
\end{pro}

\begin{pro}
You find an ancient treasure map in your great-grandfather's
sea-chest. The sketch indicates that from the gallows you should
walk to the oak tree, turn right $90^\circ$ and walk a like
distance, putting and $x$ at the point where you stop; then go
back to the gallows, walk to the pine tree, turn left $90^\circ$,
walk the same distance, mark point $Y$. Then you will find the
treasure at the midpoint of the segment $\overline{XY}$. So you
charter a sailing vessel and go to the remote south-seas island.
On arrival, you readily locate the oak and pine trees, but
unfortunately, the gallows was struck by lightning, burned to dust
and dispersed to the winds. No trace of it remains. What do you
do?
\begin{answer}
Let $\v{G}$, $\v{O}$ and $\v{P}$ denote vectors from an arbitrary
origin to the gallows, oak, and pine, respectively. The conditions
of the problem define $\v{X}$ and $\v{Y}$, thought of similarly as
vectors from the origin, by $\v{X} = \v{O} + R(\v{O} -\v{G})$,
$\v{Y} = \v{P} - R(\v{P} -\v{G})$, where $R$ is the $90^\circ$
rotation to the right, a linear transformation on vectors in the
plane; the fact that $-R$  is $90^\circ$ leftward rotation has been
used in writing $Y$. Anyway, then
$$
\dfrac{\v{X} + \v{Y}}{2}  =  \dfrac{\v{O} + \v{P}}{2} +
\dfrac{R(\v{O} - \v{P})}{2}$$is independent of the position of the
gallows. This gives a simple algorithm for treasure-finding: take
$\v{P}$ as the (hitherto) arbitrary origin, then the treasure is
at $\dfrac{\v{O} + R(\v{O})}{2}$.
\end{answer}
\end{pro}


\end{multicols}
\section{Dot Product in \protect$\BBR^2\protect$}
\begin{df} Let $(\v{a}, \v{b})\in (\BBR^2)^2$. The {\em dot
product} $\dotprod{a}{b}$ of $\v{a}$ and $\v{b}$ is defined by
     $$\dotprod{a}{b} =
\begin{bmatrix} a_1  \cr a_2  \cr\end{bmatrix}
\bulletproduct \begin{bmatrix} b_1 \cr b_2 \end{bmatrix} =
 a_1b_1 + a_2b_2.$$\end{df}
The following properties of the dot product are easy to deduce
from the definition.
\begin{enumerate}
\item[DP1]  {\bf Bilinearity} \begin{equation}(\v{x} +
\v{y})\bulletproduct \v{z} = \dotprod{x}{z} + \dotprod{y}{z}, \ \
\ \v{x}\bulletproduct (\v{y} + \v{z}) = \dotprod{x}{y} +
\dotprod{x}{z}\label{dp:bilinearity}\end{equation} \item[DP2] {\bf
Scalar Homogeneity}
\begin{equation}(\alpha\v{ x})\bulletproduct \v{ y} = \v{
x}\bulletproduct (\alpha\v{ y}) = \alpha(\dotprod{\bf x}{\bf y}), \
\alpha \in \BBR. \label{dp:scalar_homogeneity}\end{equation}
\item[DP3] {\bf Commutativity}
\begin{equation}\dotprod{x}{y} = \dotprod{\bf y}{\bf x}\label{dp:commutativity}\end{equation} \item[DP4]
\begin{equation}\dotprod{x}{x} \ \ \geq \ \ 0
\label{dp:dot_itself_is_positive}\end{equation} \item[DP5]
\begin{equation}\dotprod{x}{x} = 0 \Leftrightarrow \v{ x} = \v{{\bf
0}}\label{dp:when_zero_is_zero}\end{equation} \item[DP6]
\begin{equation}\norm{\bf x} =
 \sqrt{\dotprod{x}{x}} \label{dp:dot_and_norm}\end{equation}
\end{enumerate}
\begin{exa}
If we put
$$\v{{i}} = \colvec{ 1 \\ 0}, \
\v{{j}} = \colvec{0 \\ 1},$$then we can write any vector $\v{a} =
\begin{bmatrix} a_1 \cr a_2 \cr
\end{bmatrix}$ as a sum
$$\v{a} = a_1\v{i} + a_2\v{j}.$$
The vectors
$$\v{{i}} = \begin{bmatrix} 1 \cr 0 \cr\end{bmatrix}, \
\v{{j}} = \begin{bmatrix} 0 \cr 1 \cr
\end{bmatrix},$$ satisfy $\v{i}\bulletproduct\v{j} = 0,$ and  $\norm{\v{i}} = \norm{\v{j}}
= 1$.  \label{exa:ij}\end{exa}


\begin{df}\index{vectors!angle between}
Given vectors $\v{a}$ and $\v{b}$, we define the angle between
them, denoted by $\anglebetween{a}{b}$, as the angle between  any
two contiguous  bi-point representatives of $\v{a}$ and $\v{b}$.
\end{df}

 \begin{thm}
$$\v{{\bf a}}\bulletproduct\v{ b} = ||\v{ a}||||\v{ b}||\cos\anglebetween{a}{b}.$$
\label{thm:cosanglebetween}\end{thm}
\begin{pf}
Using Al-Kashi's Law of Cosines on the length of the vectors, we
have
$$\begin{array}{l}||\v{ b} - \v{ a}||^2 = ||\v{ a}||^2 + ||\v{ b}||^2
- 2||\v{ a}||||\v{ b}||\cos\anglebetween{a}{b}\\
\Leftrightarrow (\v{ b} - \v{ a})\bulletproduct (\v{ b} -\v{ a}) =
||\v{ a}||^2 + ||\v{ b}||^2
- 2||\v{ a}||||\v{ b}||\cos\anglebetween{a}{b}\\
\Leftrightarrow \v{ b}\bulletproduct\v{ b} - 2\v{
a}\bulletproduct\v{ b} + \v{ a}\bulletproduct\v{ a} = ||\v{ a}||^2
+ ||\v{ b}||^2 - 2||\v{ a}||||\v{ b}||\cos\anglebetween{a}{b}\\
\Leftrightarrow ||\v{{\bf b}}||^2 - 2\v{{\bf
a}}\bulletproduct\v{{\bf b}} + ||\v{{\bf b}}||^2 = ||\v{ a}||^2 +
||\v{ b}||^2 -
2||\v{ a}||||\v{ b}||\cos\anglebetween{a}{b}\\
\Leftrightarrow \v{ a}\bulletproduct\v{ b} = ||\v{ a}||||\v{
b}||\cos\anglebetween{a}{b} ,
\end{array}$$as we wanted to shew.
\end{pf}





Putting $\anglebetween{a}{b} = \frac{\pi}{2}$ in Theorem
\ref{thm:cosanglebetween} we obtain the following corollary.


\begin{cor}
Two vectors in $\BBR^2$ are perpendicular  if and only if their dot
product is $0$.
\end{cor}



\vspace{2cm}
\begin{figure}[htb]
$$\psset{unit=1pc} \psline[linewidth=2pt,
linecolor=red]{->}(-3,3)(3, 5) \psline[linewidth=2pt,
linecolor=blue]{->}(3,5)(9, 2) \uput[l](-2.5, 4
){\v{a}}\uput[u](6.5,4){\v{b} -\v{a}}\uput[r](4, 1
){\v{b}}\psline[linewidth=2pt, linecolor=green]{->}(-3,3)(9, 2)\
$$\footnotesize\hangcaption{Theorem \ref{thm:cosanglebetween}.}\label{fig:dotproduct}
\end{figure}

\begin{df}
Two vectors are said to be {\em orthogonal} if they are
perpendicular. If $\v{a}$ is orthogonal to $\v{b}$, we write
$\v{a} \perp \v{b}.$ \index{orthogonal}
\end{df}
\begin{df}
If $\v{a} \perp \v{b}$  and $\norm{\v{a}}= \norm{\v{b}} = 1$ we
say that $\v{a}$ and $\v{b}$ are {\em orthonormal}.
\index{orthonormal}
\end{df}

\begin{rem}
It follows that the vector $\v{0}$ is simultaneously parallel and
perpendicular to any vector!
\end{rem}
\begin{df}\index{orthogonal space}
Let $\v{a} \in \BBR^2$ be fixed. Then the {\em orthogonal space} to
$\v{a}$ is defined and denoted by
$$\v{a}^{\perp} = \{\v{x}\in \BBR^2: \v{x} \perp \v{ a}\}.$$
\end{df}
Since $|\cos \theta| \leq\ 1$ we also have
\begin{cor}[Cauchy-Bunyakovsky-Schwarz Inequality]\index{inequality!Cauchy-Bunyakovsky-Schwarz!in R2}
$$\left|\dotprod{a}{b}\right| \leq \norm{\v{a}}\norm{\v{b}}.$$
\end{cor}

\begin{cor}[Triangle Inequality] \index{inequality!triangle!in R2}
$$\norm{\v{a} + \v{b}} \leq \norm{\v{a}} + \norm{\v{b}}.$$
\end{cor}
\begin{pf}
$$\begin{array}{lll}
||\v{a} + \v{b}||^2 & = & (\v{a} + \v{b})\bulletproduct (\v{a} + \v{b}) \\
& = & \v{a}\bulletproduct\v{a} + 2\v{a}\bulletproduct\v{b} +
\v{b}\bulletproduct\v{b} \\
& \leq & ||\v{a}||^2  + 2||\v{a}||||\v{b}|| +
||\v{b}||^2 \\
& = & (||\v{a}|| + ||\v{b}||)^2,
\end{array}$$from where the desired result follows.  \end{pf}
\begin{cor}[Pythagorean Theorem] If $\v{a} \perp \v{b}$ then
$$||\v{a} + \v{b}||^2 = \norm{\v{a}}^2 + \norm{\v{b}}^2.
$$\label{cor:pythagoras} \index{theorem!Pythagoras'}
\end{cor}
\begin{pf}
Since $\dotprod{a}{b} = 0$, we have
$$\begin{array}{lll}
||\v{a} + \v{b}||^2 & = & (\v{a} + \v{b})\bulletproduct (\v{a} + \v{b}) \\
& = & \v{a}\bulletproduct\v{a} + 2\v{a}\bulletproduct\v{b} +
\v{b}\bulletproduct\v{b} \\
& = & \v{a}\bulletproduct\v{a} + 0 +
\v{b}\bulletproduct\v{b} \\
& = & ||\v{a}||^2 + ||\v{b}||^2,
\end{array}$$from where the desired result follows.  \end{pf}


\begin{df}
 The {\em
projection} of $\v{t}$ onto $\v{v}$ (or the $\v{v}$-component of
$\v{t}$) is the vector $$ \proj{t}{v} = (\cos
\anglebetween{t}{v})\norm{\v{t}}\frac{1}{\norm{\v{v}}}\v{v},$$where
$\anglebetween{v}{t}\in [0; \pi]$ is the convex angle between
$\v{v}$ and $\v{t}$ read in the positive sense.\index{projection
of a vector}
\end{df}

\begin{rem}
Given two vectors $\v{t}$  and  vector $\v{v} \neq \v{0}$, find
bi-point representatives  of them having a common tail and join
them together at their tails. The projection of $\v{t}$ onto
$\v{v}$ is the ``shadow'' of $\v{t}$ in the direction of $\v{v}$.
To obtain $\proj{t}{v}$ we prolong $\v{v}$ if necessary and drop a
perpendicular line to it from the head of $\v{t}$. The projection
is the portion between the common tails of the vectors and the
point where this perpendicular meets $\v{t}$. See figure
\ref{fig:vectorprojections}.
\end{rem}

\vspace{1cm}
\begin{figure}[htb]
\begin{minipage}[t]{6cm}$$ \psset{unit=1pc} \psline[linewidth=2pt,
linecolor=blue]{*->}(0, 0)(4, 0)\psline[linewidth=2pt,
linecolor=red]{*->}(0, 0)(2, 3) \psline[linestyle=dashed](2, 3)(2,
0)\psdots[dotstyle=*, dotscale=2](0,0)
$$
\end{minipage}
\hfill
\begin{minipage}[t]{6cm}$$ \psset{unit=1pc} \psline[linewidth=2pt,
linecolor=blue]{*->}(0, 0)(4, 0)\psline[linewidth=2pt,
linecolor=red]{*->}(0, 0)(-2, 3) \psline[linestyle=dashed](-2,
3)(-2, 0)\psdots[dotstyle=*, dotscale=2](0,0)
\psline[linestyle=dashed]{->}(0,0)(-2,0)
$$
\end{minipage}
\vspace{1cm}\footnotesize\caption{Vector
Projections.}\label{fig:vectorprojections}
\end{figure}








\begin{cor}
Let $\v{a} \neq \v{0}$. Then
$$\proj{x}{a} = \cos\anglebetween{x}{a}\norm{\v{x}}\frac{1}{\norm{\v{a}}} \v{a}
= \frac{\dotprod{x}{a}}{\norm{\v{a}}^2}\v{a}.$$
\end{cor}


\begin{thm}
Let $\v{a}\in \BBR^2 \setminus \{\v{0}\}$. Then any $\v{x}\in
\BBR^2$ can be decomposed as
$$\v{x} = \v{u} + \v{v},$$where
$\v{u} \in \BBR\v{a}$ and  $\v{v} \in \v{a}^\perp$.
\label{thm:orthodecomp}\end{thm}
\begin{pf}
We know that $\proj{x}{a}$ is parallel to $\v{ a}$, so we take
$\v{u} = \proj{x}{a}$. This means that we must then take $\v{v} =
\v{x} - \proj{x}{a}$. We must demonstrate that $\v{v}$ is indeed
perpendicular to $\v{a}$. But this is clear, as
\renewcommand{\arraystretch}{1.7}
$$
\begin{array}{lll}\dotprod{a}{v}  & =  & \dotprod{a}{x} - \v{a}\bulletproduct
\proj{x}{a} \\
& =  & \dotprod{a}{x}  - \v{a}\bulletproduct
\frac{\dotprod{x}{a}}{\norm{\v{a}}^2}\v{a} \\
& = & \dotprod{a}{x} - \dotprod{x}{a}\\
 & =  & 0, \end{array}$$\renewcommand{\arraystretch}{1}completing the proof. \end{pf}
\begin{cor}
Let $\v{v} \perp \v{w}$ be non-zero vectors in $\BBR^2$. Then any
vector $\v{a} \in \BBR^2$ has a unique representation as a linear
combination of  $\v{v}, \v{w}$ ,
$$\v{a} = s\v{v} + t\v{w}, \ \ (s, t)\in \BBR^2.$$
\label{cor:orthodecomp_1}\end{cor}
\begin{pf}
By Theorem \ref{thm:orthodecomp}, there exists a decomposition
$$\v{a} = s\v{v} + s'\v{v}',$$where $\v{v}'$ is
orthogonal to $\v{v}$. But then $\v{v}' || \v{w}$ and hence there
exists $\alpha\in \BBR$ with $\v{v}' = \alpha \v{w}$. Taking $t =
s'\alpha$ we achieve the decomposition
$$\v{a} = s\v{v} + t\v{w}.$$ To prove uniqueness,
assume
$$s\v{v} + t\v{w} = \v{a} = p\v{v} + q\v{w}.$$
Then $(s - p)\v{v} = (q - t)\v{w}.$ We must have $s = p$ and $q =
t$ since otherwise $\v{v}$ would be parallel to $\v{w}$. This
completes the proof.
\end{pf}
\begin{cor}
Let $\v{p},\v{q}$ be non-zero, non-parallel vectors in $\BBR^2$.
Then any vector $\v{a} \in \BBR^2$ has a unique representation as a
linear combination of  $\v{p}, \v{q}$ ,
$$\v{a} = l\v{p} + m\v{q}, \ \ (l, m)\in \BBR^2.$$
\label{cor:orthodecomp_2}\end{cor}
\begin{pf}
Consider $\v{z} = \v{q}  - \proj{q}{p}$. Clearly $\v{p} \perp \v{z}$
and so by Corollary \ref{cor:orthodecomp_1}, there exists unique
$(s, t)\in \BBR^2$ such that
$$\begin{array}{lll}\v{a} &  =  & s\v{p} + t\v{z} \\
& = & s\v{p} - t\proj{q}{p} + t\v{q} \\
& = &  \left(s -
t\frac{\dotprod{q}{p}}{\norm{\v{p}}^2}\right)\v{p} + t\v{q},
\end{array}
$$establishing the result upon choosing $l = s -
t\frac{\dotprod{q}{p}}{\norm{\v{p}}^2}$ and $m = t$.
\end{pf}
\begin{exa}
Let $\v{p} = \colvec{1 \\ 1}$, $\v{q} = \colvec{1 \\
2}$. Write $\v{p}$ as the sum of two vectors, one parallel to
$\v{q}$ and the other perpendicular to $\v{q}$.
\end{exa}
\begin{solu}We use Theorem \ref{thm:orthodecomp}.  We know that
$\proj{p}{q}$ is parallel to $\v{q}$, and we find
$$\proj{p}{q} = \dfrac{\dotprod{p}{q}}{\norm{\v{q}}^2}
\v{q} = \dfrac{3}{5}\v{q} = \colvec{\frac{3}{5} \\
\frac{6}{5}}.$$ We also compute $$ \v{p} - \proj{p}{q} =
\colvec{1-\frac{3}{5}
\\ 1 - \frac{6}{5}} = \colvec{\frac{2}{5} \\ -\frac{1}{5}}.
$$Observe that $$ \colvec{\frac{3}{5} \\ \frac{6}{5}} \bulletproduct \colvec{\frac{2}{5} \\ -\frac{1}{5}} = \frac{6}{25} - \frac{6}{25} = 0,$$
and the desired decomposition is  $$\colvec{1\\ 1} =  \colvec{\frac{3}{5} \\
\frac{6}{5}} + \colvec{\frac{2}{5} \\ -\frac{1}{5}}.  $$
\end{solu}
\vspace{2cm}

\begin{figure}[h] \centering\begin{minipage}{4cm}\psset{unit=1pc}
\pstTriangle[PosAngleA=-90, PosAngleB=180, PosAngleC=0]
  (4,1){A}(1,3){B}(5,5){C}
\pstCircleABC[CodeFig=true, CodeFigColor=blue,
              linecolor=red]{A}{B}{C}{H}
              \vspace{.3cm}\footnotesize\hangcaption{Orthocentre.}
\end{minipage}
\end{figure}


\begin{exa}\label{exa:orthocentre}
Prove that the altitudes of a triangle $\triangle {ABC}$ on the
plane are concurrent. This point is called the {\em orthocentre}
of the triangle.
\end{exa} \begin{solu}Put $\v{a} = \vect{OA}, \v{b} = \vect{OB}, \v{c} = \vect{OC}$. First observe that for any $\v{x}$,
we have, upon expanding, \begin{equation}(\v{x} -
\v{a})\bulletproduct (\v{b} - \v{c}) + (\v{x} -
\v{b})\bulletproduct(\v{c} - \v{a}) + (\v{x} -
\v{c})\bulletproduct(\v{a} - \v{b}) = 0.
\label{eq:rel_altitudes_1}\end{equation} Let $H$ be the point of
intersection of the altitude from $A$ and the altitude from $B.$
Then
\begin{equation}0 = \vect{AH}\bulletproduct\vect{CB} = (\vect{OH} -
\vect{OA})\bulletproduct (\vect{OB} - \vect{OC}) = (\vect{OH} -
\v{a})\bulletproduct(\v{b} - \v{c}),
\label{eq:rel_altitudes_2}\end{equation}and \begin{equation}0 =
\vect{BH}\bulletproduct\vect{AC} = (\vect{OH} -
\vect{OB})\bulletproduct (\vect{OC} - \vect{OA}) = (\vect{OH} -
\v{b})\bulletproduct(\v{c} - \v{a}).
\label{eq:rel_altitudes_3}\end{equation} Putting $\v{x} =
\vect{OH}$ in (\ref{eq:rel_altitudes_1}) and subtracting from it
(\ref{eq:rel_altitudes_2}) and (\ref{eq:rel_altitudes_3}), we
gather that
$$0 = (\vect{OH} - \v{c})\bulletproduct (\v{a} - \v{b})  = \vect{CH}\bulletproduct\vect{AB}, $$
which gives the result.
\end{solu}
\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900



\begin{pro}
Determine the value of $a$ so that $\colvec{a\\ 1-a}$ be
perpendicular to $ \colvec{1 \\ -1}.$
\begin{answer} $a = \frac{1}{2}$\end{answer}
\end{pro}

\begin{pro}
Demonstrate that $$(\v{b} + \v{c} = \v{0}) \wedge (\norm{\v{a}} =
\norm{\v{b}}) \iff (\v{a} - \v{b})\bulletproduct (\v{a} - \v{c}) =
0.$$
\end{pro}
\begin{pro}
Let $\v{p} = \colvec{4 \\ 5}$, $\v{r} = \colvec{-1 \\ 1}$, $\v{s} = \colvec{2 \\
1}$. Write $\v{p}$ as the sum of two vectors, one parallel to
$\v{r}$ and the other parallel to $\v{s}$.
\begin{answer}
$$\v{p} = \colvec{4\\ 5}  = 2\colvec{-1\\ 1} + 3\colvec{2 \\ 1} = 2\v{r} + 3\v{s}.  $$
\end{answer}
\end{pro}

\begin{pro}
Prove that
$$
\norm{\v{a}}^2 =  (\dotprod{a}{i})^2 + (\dotprod{a}{j})^2.
$$
\begin{answer}
Since $a_1  = \dotprod{a}{i}, a_2 = \dotprod{a}{j},$ we may write
$$
\v{a} = (\dotprod{a}{i})\v{i} + (\dotprod{a}{j})\v{j}
$$from where the assertion follows.
\end{answer}
\end{pro}
\begin{pro}
Let $\v{a} \neq \v{0} \neq \v{b}$ be vectors in $\BBR^2$ such that
$\dotprod{a}{b} = 0$. Prove that
$$\alpha\v{a} + \beta\v{b} = \v{0} \implies \alpha
= \beta = 0.
$$
\begin{answer}
$$\begin{array}{lll} \alpha\v{a} + \beta\v{b} = \v{0} & \implies &  \v{a}\bulletproduct (\alpha\v{a} + \beta\v{b})  = \v{a}\bulletproduct\v{0} \\
 & \implies & \alpha (\v{a}\bulletproduct \v{a}) = 0\\
& \implies & \alpha \norm{\v{a}}^2 = 0.
\end{array}$$Since $\v{a} \neq
\v{0}$, we must have $\norm{\v{a}} \neq 0$ and thus $\alpha = 0.$
But if $\alpha = 0$ then
$$\begin{array}{lll}\alpha\v{a} + \beta\v{b} = \v{0}
& \implies &  \beta\v{b} = \v{0}\\ &  \implies &  \beta
= 0, \\
\end{array}
$$since $\v{b} \neq \v{0}$.

\end{answer}

\end{pro}
\begin{pro}
Let $(\v{x},\ \v{y}) \in (\BBR^2)^2 $ with $||\v{x}|| =
\frac{3}{2}||\v{y}||$. Shew that $2\v{x} + 3\v{y}$ and $2\v{x} -
3\v{y}$ are perpendicular.
\begin{answer} We must shew that $$(2\v{x} +
3\v{y})\bulletproduct( 2\v{x} - 3\v{y}) = 0.$$ But
$$(2\v{x} + 3\v{y})\bulletproduct(2\v{x} - 3\v{y}) = 4||\v{x}||^2 - 9||\v{y}||^2 =
4(\frac{9}{4}||\v{y}||^2) - 9||\v{y}||^2 = 0.$$
\end{answer}
\end{pro}
\begin{pro}
Let $\v{a}, \v{b}$ be fixed vectors in $\BBR^2$. Prove that if
$$\forall \v{v}\in\BBR^2, \v{v}\bulletproduct\v{a} = \v{v}\bulletproduct\v{b},$$then $\v{a} = \v{b}$.
\begin{answer} We have $\forall \v{v}\in\BBR^2, \v{v}\bulletproduct(\v{a} - \v{b}) = 0.$ In particular, choosing
$\v{v} = \v{a} - \v{b}$, we gather
$$(\v{a} - \v{b})\bulletproduct (\v{a} - \v{b}) = ||\v{a} - \v{b}||^2 =
0.$$But the norm of a vector is $0$ if and only if the vector  is
the $\v{0}$ vector. Therefore $\v{a} - \v{b} = \v{0}$, i.e.,
$\v{a} = \v{b}$.
\end{answer}
\end{pro}
\begin{pro}
 Let $(\v{a}, \v{b})\in (\BBR^2)^2$. Prove
that $$   \norm{\v{a} + \v{b}}^2  + \norm{\v{a} - \v{b}}^2 = 2
\norm{\v{a}}^2  + 2  \norm{\v{b}}^2 .
$$
\begin{answer} We have
$$ \begin{array}{lll}
\norm{\v{a} \pm \v{b}}^2   & = & (\v{a} \pm
\v{b}) \bulletproduct (\v{a} \pm \v{b})  \\
& = & \dotprod{a}{a} \pm 2\dotprod{a}{b} + \dotprod{b}{b}
\\ & = & \norm{\v{a}}^2 \pm 2\dotprod{a}{b}+ \norm{\v{b}}^2,\end{array}
$$
whence the result follows.
\end{answer}
\end{pro}
\begin{pro}
Let $\v{u}, \v{v}$ be vectors in $\BBR^2$. Prove the {\em
polarisation identity:}
$$\v{u}\bullet\v{v} = \frac{1}{4}\left(||\v{u} + \v{v}||^2 - ||\v{u} - \v{v}||^2 \right).$$
\begin{answer} We have $$\begin{array}{lll} ||\v{u} + \v{v}||^2 -
||\v{u} - \v{v}||^2 & = & (\v{u} + \v{v})\bulletproduct (\v{u} +
\v{v}) - (\v{u}
- \v{v})\bulletproduct (\v{u} - \v{v}) \\
&=& \dotprod{u}{u} + 2\dotprod{u}{v} + \dotprod{v}{v} -
(\dotprod{u}{u} - 2\dotprod{u}{v} + \dotprod{v}{v}) \\
&=& 4\dotprod{u}{v},
\end{array}$$giving the result.
\end{answer}
\end{pro}
\begin{pro}
Let $\v{x}, \v{a}$ be non-zero vectors in $\BBR^2$. Prove that
$${\rm proj}\begin{array}{c}\proj{a}{x}\\ \v{a}\end{array} = \alpha \v{a},$$with $0 \leq \alpha \leq 1.$
\begin{answer} By definition
\renewcommand{\arraystretch}{2.5}
$${\everymath{\displaystyle}
\begin{array}{lll}
{\rm proj}\begin{array}{c}\proj{a}{x}\\
\v{a}\end{array} & = &
\frac{\proj{a}{x}\bulletproduct \v{a}}{\norm{\v{a}}^2}\v{a}\\
& = & \frac{
\frac{\dotprod{a}{x}}{\norm{\v{x}}^2}\v{x}\bulletproduct \v{ a}
}{\norm{\v{a}}^2}\v{a}
 \\
& = & \frac{(\dotprod{a}{x})^2}{\norm{\bf x}^2\norm{\v{a}}^2} \v{a}, \\
\end{array}}$$
\renewcommand{\arraystretch}{1}
Since $\dis{ 0 \leq
\frac{(\dotprod{a}{x})^2}{\norm{\v{x}}^2\norm{\v{a}}^2} \leq 1}$
by the CBS Inequality, the result follows.
\end{answer}
\end{pro}
\begin{pro}
Let $(\lambda , \v{ a})\in\BBR\times \BBR^2$ be fixed. Solve the
equation $$\dotprod{a}{x} = \lambda
$$for $\v{x}\in\BBR^2$.
\begin{answer} Clearly, if $\v{ a} = \v{{\bf 0}}$ and $\lambda
\neq 0$ then there are no solutions. If both $\v{ a} = \v{{\bf 0}}$
and $\lambda = 0$, then the solution set is the whole space
$\BBR^2$. So assume that $\v{ a}  \neq \v{{\bf 0}}$. By Theorem
\ref{thm:orthodecomp}, we may write $\v{ x} = \v{u} + \v{v}$ with
$\proj{\bf x}{\bf a} = \v{u}||\v{ a}$ and $\v{v}\perp \v{ a}$. Thus
there are infinitely many solutions, each of the form
$$\v{ x} = \v{u} +
\v{v} = \frac{\dotprod{x}{a}}{\norm{\bf a}^2}\v{a} + \v{v} =
\frac{\lambda}{\norm{\bf a}^2}\v{ a} + \v{v},$$where $\v{v}\in
\v{a}^{\perp}$.
\end{answer}
\end{pro}
\end{multicols}
\section{Lines on the Plane}
\begin{df}
Three points $A$, $B$, and $C$ are {\em collinear} if they lie on
the same line.
\end{df}
It is clear that the points $A$, $B$, and $C$ are collinear if and
only if $\vect{AB}$ is parallel to $\vect{AC}$. Thus we have the
following definition.
\begin{df}
The parametric equation with parameter $t\in \BBR$ of the straight
line passing through the point $P = \colpoint{p_1 \\ p_2}$ in the
direction of the vector $\v{v} \neq \v{0}$ is
$$\colvec{x-p_1 \\ y-p_2} = t \v{v}.
$$ If $\v{r} = \colvec{x\\ y}$, then the equation of the line
can be written in the form
\begin{equation}  \v{r} - \v{p} = t\v{v}. \label{eq:equation_line_R2}\end{equation}
The {\em Cartesian equation of a line} is an equation of the form
$ax + by = c$, where $a^2 + b^2 \neq 0$.  We write $(AB)$ for the
line passing through the points $A$ and $B$.
\end{df}

\begin{thm}\label{thm:alternative_eq_line_R2}
Let $\v{v} \neq \v{0}$ and  let $\v{n} \perp \v{v}$. An
alternative form for the equation of the line $\v{r} - \v{p} =
t\v{v}$ is
$$  (\v{r} - \v{p})\bulletproduct \v{n} = 0.   $$ Moreover, the
vector $\colvec{a\\ b}$ is perpendicular to the line with
Cartesian equation $ax + by = c$.
\end{thm}
\begin{pf}
The first part follows at once by observing that $\dotprod{v}{n} =
0$ and taking dot products to both sides of
\ref{eq:equation_line_R2}. For the second part observe that at
least one of $a$ and $b$ is $\neq 0$. First assume that $a \neq
0$. Then we can put $y = t$ and $x = -\frac{b}{a}t + \frac{c}{a}$
and the parametric equation of this line is
$$\colvec{x - \frac{c}{a}\\ y} = t\colvec{-\frac{b}{a} \\ 1},   $$
and we have $$\colvec{-\frac{b}{a} \\ 1} \bulletproduct
\colvec{a\\ b} = -\frac{b}{a}\cdot a + b = 0.
$$Similarly if $b \neq 0$ we can put $x = t$ and $y = -\frac{a}{b}t + \frac{c}{b}$
and the parametric equation of this line is
$$\colvec{x \\ y - \frac{c}{b}} = t\colvec{1 \\ -\frac{a}{b}},   $$
and we have $$\colvec{1\\ -\frac{a}{b}} \bulletproduct \colvec{a\\
b} = a-\frac{a}{b}\cdot b  = 0,
$$proving the theorem in this case.  \end{pf}
\begin{rem}
The vector $\colvec{\frac{a}{\sqrt{a^2 + b^2}} \\
\frac{b}{\sqrt{a^2 + b^2}}}$ has norm $1$ and is orthogonal to the
line $ax + by = c$.
\end{rem}


\begin{exa}
The equation of the line passing through $A = \colpoint{2\\ 3}$
and in the direction of $\v{v} = \colvec{-4 \\ 5}$ is
$$\colvec{x -2 \\ y-3} =   \lambda \colvec{-4 \\ 5}.$$
\end{exa}
\begin{exa}
Find the equation of the line passing through $A = \colpoint{-1\\
1}$ and $B = \colpoint{-2\\ 3}$.
\end{exa}
\begin{solu}The direction of this line is that of
$$\vect{AB} = \colvec{-2 - (-1) \\ 3 - 1} = \colvec{-1 \\
2}.$$The equation is thus
$$\colvec{x+1\\ y-1} = \lambda\colvec{-1 \\ 2}, \ \lambda \in\BBR .$$
\end{solu}
\begin{exa}
Suppose that $(m, b)\in \BBR^2$. Write the Cartesian equation of the
line $y = mx + b$ in parametric form.
\end{exa}\begin{solu}Here is a way. Put $x = t$. Then $y =
mt + b$ and so the desired parametric form is
$$\colvec{x \\ y - b} = t\colvec{1\\  m }.   $$
\end{solu}
\begin{exa}
Let $(m_1, m_2, b_1, b_2)\in\BBR^4, m_1m_2 \neq 0.$ Consider the
lines $L_1: y = m_1x + b_1$ and $L_2: y = m_2x + b_2$. By
translating this problem in the language of vectors in $\BBR^2$,
shew that $L_1 \perp L_2$ if and only if $m_1m_2 = -1.$ \end{exa}
\begin{solu}The parametric equations of the lines are
$$L_1: \  \colvec{x \\ y-b_1}=  s\colvec{1 \\ m_1},\ \ \  L_2: \ \colvec{x \\ y-b_2} = t\colvec{1 \\ m_2}.$$
Put $\v{v} = \colvec{1\\ m_1}$ and $\v{w} = \colvec{1\\
m_2}$. Since the lines are perpendicular we must have
$\dotprod{v}{w} = 0$, which yields
$$0 = \dotprod{v}{w} = 1(1) + m_1(m_2) \implies m_1m_2 = -1.$$
\end{solu}

\begin{thm}[Distance Between a Point and a Line]
\label{thm:distance_point_lineR2}\index{distance!between a point
and a line!on the plane} Let $(\v{r} - \v{a})\bulletproduct\v{n} =
0$ be a line passing through the point $A$ and perpendicular to
vector $\v{n}$. If $B$ is not a point on the line, then the
distance from $B$ to the line is
$$\frac{\left|(\v{a} -
\v{b})\bulletproduct\v{n}\right|}{\norm{\v{n}}}.$$ If the line has
Cartesian equation $ax + by = c$, then this distance is
$$ \dfrac{|ab_1 + bb_2 - c|}{\sqrt{a^2 + b^2}}.  $$
\end{thm}
\begin{pf}
Let $R_0$ be the point on the line that is nearest to $B$. Then
$\vect{BR_0} = \v{r_0} - \v{b}$ is orthogonal to the line, and the
distance we seek is
$$ ||\proj{\v{r_0} - \v{b}}{n}|| = \left|\left| \frac{(\v{r_0} - \v{b})\bulletproduct\v{n}}{\norm{\v{n}}^2}\v{n}\right|\right| =
\frac{|(\v{r_0} - \v{b})\bulletproduct\v{n}|}{\norm{\v{n}}}.$$
Since $R_0$ is on the line, $\dotprod{r_0}{n} = \dotprod{a}{n},$
and so
$$ ||\proj{\v{r_0} - \v{b}}{n}|| = \frac{|\dotprod{r_0}{n} -
\dotprod{b}{n}|}{\norm{\v{n}}|} = \frac{|\dotprod{a}{n} -
\dotprod{b}{n}|}{\norm{\v{n}}} = \frac{|(\v{a} -
\v{b})\bulletproduct\v{n}|}{\norm{\v{n}}},$$ as we wanted to shew.

\bigskip

If the line has Cartesian equation $ax + by = c$, then at least
one of $a$ and $b$ is $\neq 0$. Let us suppose $a \neq 0$, as the
argument when $a = 0$ and $b\neq 0$ is similar. Then $ax + by = c$
is equivalent to  $$\left(\colvec{x\\ y} - \colvec{\frac{c}{a}
\\ 0}\right)\bulletproduct
\colvec{a\\ b} =0.
$$We use the result obtained above with $\v{a} = \colvec{\frac{c}{a}
\\ 0}$, $\v{n} = \colvec{a
\\ b}$, and $B = \colpoint{b_1 \\ b_2}$. Then $\norm{\v{n}} = \sqrt{a^2 +
b^2}$ and $$|(\v{a} - \v{b})\bulletproduct\v{n}| =
\left|\colvec{\frac{c}{a} - b_1 \\ -b_2} \bulletproduct \colvec{a\\
b}\right| = |c - ab_1 - bb_2| = |ab_1 + bb_2 - c|,$$giving the
result.   \end{pf}


\begin{exa}
Recall that the medians of $\triangle {ABC}$ are lines joining the
vertices of $\triangle {ABC}$ with the midpoints of the side
opposite the vertex. Prove that the medians of a triangle are
concurrent, that is, that they pass through a common point.
\end{exa}
\begin{rem}
This point of concurrency is called, alternatively,  the {\em
isobarycentre}, {\em centroid}, or {\em centre of gravity} of the
triangle.
\end{rem}
\begin{solu}Let ${M_A}$, ${M_B}$, and ${M_C}$ denote the midpoints
of the lines opposite $A$, $B$, and $C$, respectively. The
equation of the line passing through $A$ and in
the direction of $\vect{AM_A}$ is (with $\v{r} = \colvec{x\\
y}$)
$$\v{r} = \vect{OA} + r\vect{AM_A}.$$Similarly, the equation of the line passing through $B$
and in the direction of $\vect{BM_B}$ is
$$\v{r} = \vect{OB} + s\vect{BM_A}.$$These two lines must intersect at
a point ${G}$ inside the triangle. We will shew that $\vect{GC}$
is parallel to $\vect{CM_C}$, which means that the three points
$G, C, M_C$ are collinear.

Now, $\exists (r_0, s_0) \in \BBR^2$ such that
$$\vect{OA} + r_0\vect{AM_A} = \vect{OG} = \vect{OB} + s_0\vect{BM_B},$$
that is
$$r_0\vect{AM_A} - s_0\vect{BM_B} = \vect{OB} - \vect{OA},$$
or
$$r_0(\vect{AB} + \vect{BM_A}) - s_0(\vect{BA}+ \vect{AM_B}) = \vect{AB}.$$
Since ${M_A}$ and ${M_B}$ are the midpoints of $\bipoint{B}{C}$
and $\bipoint{C}{A}$ respectively, we have $2\vect{BM_A} =
\vect{BC}$ and $2\vect{AM_B} = \vect{AC} = \vect{AB} + \vect{BC}$.
The relationship becomes
$$r_0(\vect{AB} + \frac{1}{2}\vect{BC}) - s_0(-\vect{AB} + \frac{1}{2}\vect{AB} + \frac{1}{2}\vect{BC}) =
\vect{AB},$$
$$ (r_0  + \frac{s_0}{2} - 1)\vect{AB}  = (-\frac{r_0}{2} + \frac{s_0}{2})\vect{BC}.$$
We must have
$$r_0  + \frac{s_0}{2} - 1 = 0,$$
$$-\frac{r_0}{2} + \frac{s_0}{2}  = 0,$$since otherwise the
vectors $\vect{AB}$ and $\vect{BC}$ would be parallel, and the
triangle would be degenerate. Solving, we find $s_0 = r_0 =
\frac{2}{3}$. Thus we have $\vect{OA} + \frac{2}{3}\vect{AM_A} =
\vect{OG}$, or $\vect{AG} = \frac{2}{3}\vect{AM_A}$, and
similarly, $\vect{BG} = \frac{2}{3}\vect{BM_B}$.

\bigskip

     From $\vect{AG} = \frac{2}{3}\vect{AM_A}$, we deduce $\vect{AG} =
2\vect{GM_A}$. Since ${M_A}$ is the midpoint of $\bipoint{B}{C}$,
we have $\vect{GB}+ \vect{GC} = 2\vect{GM_A} = \vect{AG}$, which
is equivalent to
$$\vect{GA} + \vect{GB}+ \vect{GC} = \v{0}.$$ As ${M_C}$
is the midpoint of $\bipoint{A}{B}$ we have $\vect{GA} +
\vect{GB}= 2\vect{GM_C}$. Thus
$$\v{0} = \vect{GA} + \vect{GB}+ \vect{GC} = 2\vect{GM_C} + \vect{GC}.$$
This means that $\vect{GC} = -2\vect{GM_C}$, that is, that they
are parallel, and so the points ${G}$, $C$ and ${M_C}$ all lie on
the same line. This achieves the desired result.
\end{solu}
\begin{rem}
The centroid of $\triangle {ABC}$ satisfies thus
$$\vect{GA} + \vect{GB}+ \vect{GC} = \v{0}, $$and
divides the medians on the ratio $2:1$, reckoning from a vertex.
\end{rem}

\section*{\psframebox{Homework}}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
Find the angle between the lines $2x - y = 1$ and $x - 3y = 1$.
\begin{answer}
Since $\v{a} = \colvec{2 \\ -1}$ is normal to $2x - y = 1$ and
$\v{b} = \colvec{1\\ -3}$ is normal to $x - 3y = 1$, the desired
angle can be obtained by finding the angle between the normal
vectors:

$$\anglebetween{a}{b} = \arccos
\dfrac{\dotprod{a}{b}}{\norm{\v{a}}\norm{\v{b}}}= \arccos
\dfrac{5}{\sqrt{5} \cdot \sqrt{10}} = \arccos \dfrac{1}{\sqrt{2}}
= \dfrac{\pi}{4}. $$

\end{answer}
\end{pro}
\begin{pro}
Find the equation of the line passing through $\colpoint{1\\ -1}$
and in a direction perpendicular to $\colvec{2\\ 1}$.
\begin{answer} $2(x-1) + (y + 1) = 0$ or $2x + y = 1$.  \end{answer}
\end{pro}

\begin{pro}
$\triangle {ABC}$ has centroid ${G}$, and $\triangle A'B'C'$
satisfies
$$\vect{AA'} + \vect{BB'} + \vect{CC'} = \v{0}.$$Prove
that ${G}$ is also the centroid of $\triangle A'B'C'$.
\begin{answer} By Chasles' Rule $\vect{AA'}  = \vect{AG}  + \vect{GA'} $,
$\vect{BB'}  = \vect{BG}  + \vect{GB'} $, and $\vect{CC'}  =
\vect{CG}  + \vect{GC'} $. Thus
$$\begin{array}{lll} \v{0} & = & \vect{AA'} + \vect{BB'} + \vect{CC'} \\ &
=&  \vect{AG}  + \vect{GA'} + \vect{BG}  + \vect{GB'} +
\vect{CG}  + \vect{GC'} \\
& = &  -(\vect{GA} + \vect{GB}+ \vect{GC}) + (\vect{GA'}
+ \vect{GB'} + \vect{GC'}) \\
&  = &  \vect{GA'} + \vect{GB'} + \vect{GC'}, \end{array}
$$whence the result.
\end{answer}
\end{pro}


\begin{pro}
Let ${ABCD}$ be a trapezoid, with bases $\bipoint{A}{B}$ and
$\bipoint{C}{D}$. The lines $(AC)$ and $(BD)$ meet at ${E}$ and
the lines $(AD)$ and $(BC)$ meet at $F$. Prove that the line
$({EF})$ passes through the midpoints of $\bipoint{A}{B}$ and
$\bipoint{C}{D}$ by proving the following steps.
\begin{dingautolist}{202}
\item  Let ${I}$ be the midpoint of $\bipoint{A}{B}$ and let ${J}$
be the point of intersection of the lines $({FI})$ and $({DC})$.
Prove that ${J}$ is the midpoint of $\bipoint{C}{D}$. Deduce that
${F}, {I}, {J}$ are collinear. \item Prove that ${E}, {I}, {J}$
are collinear.
\end{dingautolist}
\begin{answer} We have: \begin{dingautolist}{202}\item The points ${F}, A,
D$ are collinear, and so $\vect{FA}$ is parallel to $\vect{FD}$,
meaning that there is $k\in \BBR\setminus \{0\}$ such that
$\vect{FA} = k\vect{FD}$. Since the lines $(AB)$ and $(DC)$ are
parallel, we obtain through Thales' Theorem that $\vect{FI} =
k\vect{FJ}$ and $\vect{FB} = k\vect{FC}$. This gives
$$ \vect{FA} - \vect{FI} = k(\vect{FD} - \vect{FJ}) \implies \vect{IA} = k\vect{JD}.  $$
Similarly
$$ \vect{FB} - \vect{FI} = k(\vect{FC} - \vect{FJ}) \implies \vect{IB} = k\vect{JC}.
$$Since ${I}$ is the midpoint of $\bipoint{A}{B}$,  $\vect{IA} + \vect{IB} =
\v{0}$, and thus $k(\vect{JC} + \vect{JD}) = \v{0}$. Since $k \neq
0,$ we have $\vect{JC} + \vect{JD} = \v{0}$, meaning that ${J}$ is
the midpoint of $\bipoint{C}{D}$. Therefore the midpoints of
$\bipoint{A}{B}$ and $\bipoint{C}{D}$ are aligned with ${F}$.

\item  Let ${J'}$ be the intersection of the lines $({EI})$ and
$({DC})$. Let us prove that ${J'} = {J}$.
\bigskip

Since the points ${E}, A, C$ are collinear, there is $l\neq 0$
such that $\vect{EA}= l\vect{EC}$. Since the lines $({ab})$ and
$({DC})$ are parallel, we obtain via Thales' Theorem that
$\vect{EI} = l\vect{EJ'}$ and $\vect{EB} = l\vect{ED}$. These
equalities give
$$\vect{EA}-\vect{EI} = l(\vect{EC}- \vect{EJ'}) \implies \vect{IA} = l\vect{J'C},$$
$$\vect{EB} -\vect{EI} = l(\vect{ED} - \vect{EJ'}) \implies \vect{IB} = l\vect{J'D}.$$
Since ${I}$ is the midpoint of $\bipoint{A}{B}$, $\vect{IA} +
\vect{IB} = \v{0}$, and thus $l(\vect{J'C} + \vect{J'D}) = \v{0}$.
Since $l \neq 0$, we deduce $\vect{J'C} + \vect{J'D} = \v{0}$,
that is, ${J'}$ is the midpoint of $\bipoint{C}{D}$, and so ${J'}
= {J}$.


\end{dingautolist}
\end{answer}
\end{pro}
\begin{pro}
Let ${ABCD}$ be a parallelogram.\begin{dingautolist}{202} \item
Let ${E}$ and ${F }$ be  points such that
$$\vect{AE} = \frac{1}{4} \vect{AC} \ \ \ \
{\rm and}\ \ \ \vect{AF} = \frac{3}{4}\vect{AC}.
$$Demonstrate that the lines $({BE})$ and $({DF})$ are
parallel. \item  Let ${I}$ be the midpoint of $\bipoint{A}{D}$ and
${J}$ be the midpoint of $\bipoint{B}{C}$.  Demonstrate that the
lines $({AB})$ and $({IJ})$ are parallel. What type of
quadrilateral is ${IEJF}$?
\end{dingautolist}
\begin{answer} We have: \begin{dingautolist}{202} \item By Chasles' Rule
$$\begin{array}{lll}\vect{AE} = \frac{1}{4} \vect{AC}  & \iff
&  \vect{AB} +  \vect{BE} = \frac{1}{4} \vect{AC}
\end{array},$$ and
$$\begin{array}{lll}\vect{AF} =
\frac{3}{4} \vect{AC}  & \iff &  \vect{AD} +  \vect{DF} =
\frac{3}{4} \vect{AC} \end{array}.$$Adding, and observing that
since  ${ABCD}$ is a parallelogram, $ \vect{AB} = \vect{CD}$,
$$\begin{array}{lll}\vect{AB} + \vect{BE} + \vect{AD} +
\vect{DF} = \vect{AC} & \iff & \vect{BE} + \vect{DF} = \vect{AC} -
\vect{AB} - \vect{AD} \\ & \iff &
 \vect{BE} + \vect{DF} =
\vect{AD} + \vect{DC} - \vect{AB} - \vect{AD} \\
& \iff &  \vect{BE} = -\vect
{DF}.
\end{array}.
$$
The last equality shews that the lines $({BE})$ and $({DF})$ are
parallel. \item Observe that $\vect{BJ} = \frac{1}{2}\vect{BC} =
\frac{1}{2}\vect{AD} = \vect{AI} = -\vect{IA}$ . Hence
$$\vect{IJ} = \vect{IA} + \vect{AB} + \vect{BJ}  = \vect{AB},    $$
proving that the lines $({AB})$ and $({IJ})$ are parallel.

\bigskip

Observe that $$\vect{IE} = \vect{IA} + \vect{AE} =
\frac{1}{2}\vect{DA} + \frac{1}{4}\vect{AC} = \frac{1}{2}\vect{CB}
+ \vect{FC} = \vect{CJ} + \vect{FC} = \vect{FC} + \vect{CJ} =
\vect{FJ},
$$whence  ${IEJF}$ is a parallelogram.


\end{dingautolist}
\end{answer}
\end{pro}


\begin{pro}
${ABCD}$ is a parallelogram; point ${I}$ is the midpoint of
$\bipoint{A}{B}$. Point $E$ is defined by the relation $\vect{IE}
= \frac{1}{3} \vect{ID}$. Prove that $$ \vect{AE} =
\frac{1}{3}\left(\vect{AB} + \vect{AD}\right)$$ and prove that the
points $A, C, E$ are collinear.
\begin{answer} Since $\vect{IE} = \frac{1}{3} \vect{ID}$ and
$\bipoint{I}{D}$ is a median of $\triangle ABD$, $E$ is the centre
of gravity of $\triangle ABD$. Let ${M}$ be the midpoint of
$\bipoint{B}{D}$, and observe that ${M}$ is the centre of the
parallelogram, and so $2\vect{AM} = \vect{AB} + \vect{AD}$. Thus
$$ \vect{AE} = \frac{2}{3}\vect{AM} = \frac{1}{3}(2\vect{AM}) = \frac{1}{3}(\vect{AB} + \vect{AD}) .
$$To shew that $A, C, E$ are collinear it is
enough to notice  that $\vect{AE} = \frac{1}{3}\vect{AC}$.
\end{answer}
\end{pro}

\begin{pro}\label{pro:barycentre_three_points}
Put $\vect{OA} = \v{a}$, $\vect{OB} = \v{b}$, $\vect{OC} = \v{c}$.
Prove that  $A, B, C$ are collinear if and only if there exist
real numbers $\alpha, \beta, \gamma$, not all zero, such that
$$\alpha\v{a} +  \beta\v{b} + \gamma\v{c} =
\v{0}, \ \ \ \ \alpha + \beta +  \gamma = 0.
$$
\begin{answer}
Suppose $A, B, C$ are collinear and that $
\dfrac{\norm{\bipoint{A}{B}}}{\norm{\bipoint{B}{C}}} =
\dfrac{\lambda}{\mu}$. Then by the Section Formula
\ref{eq:section_formula},
$$ \v{b} = \dfrac{\lambda\v{c} + \mu\v{a}}{\lambda + \mu},   $$
whence $\mu \v{a} - (\lambda + \mu)\v{b} + \lambda \v{c} = \v{0}$
and clearly $\mu -  (\lambda + \mu) + \lambda = 0$. Thus we may
take $\alpha = \mu$, $\beta = \lambda + \mu$, and $\gamma =
\lambda$.
\bigskip
Conversely, suppose that $$\alpha\v{a} +  \beta\v{b} + \gamma\v{c}
= \v{0}, \ \ \ \ \alpha + \beta +  \gamma = 0
$$ for some real numbers $\alpha, \beta, \gamma$, not all zero.
Assume without loss of generality that $\gamma \neq 0$. Otherwise
we simply change the roles of $\gamma$, and $\alpha$ and $\beta$
Then $\gamma = -(\alpha + \beta) \neq 0$. Hence
$$\alpha \v{a} + \beta\v{b} = (\alpha +
\beta)\v{c} \implies \v{c} = \dfrac{\alpha \v{a} +
\beta\v{b}}{\alpha + \beta},
$$and thus $\bipoint{O}{C}$ divides $\bipoint{A}{B}$ into the
ratio $\dfrac{\beta}{\alpha}$, and therefore, $A, B, C$ are
collinear.
\end{answer}
 \end{pro}

\begin{pro} Prove Desargues' Theorem:\index{theorem!Desargues} If $\triangle ABC$ and $\triangle
A'B'C'$ (not necessarily in the same plane) are so positioned that
$(AA')$, $(BB')$, $(CC')$ all pass through the same point $V$ and
if $(BC)$ and $(B'C')$ meet at $L$, $(CA)$ and $(C'A')$ meet at
$M$, and  $(AB)$ and $(A'B')$ meet at $N$, then $L, M, N$ are
collinear.
\begin{answer}
Put $\vect{OX} = \v{x}$ for $X\in \{A, A', B, B', C, C', L, M, N,
V\}$. Using problem \ref{pro:barycentre_three_points} we deduce
\begin{equation} \v{v} + \alpha \v{a} + \alpha'\v{a'} =
\v{0}, \ \ \ 1 + \alpha + \alpha' = 0, \label{eq:desargues1}
\end{equation}
\begin{equation} \v{v} + \beta \v{a} + \beta'\v{a'} = \v{0},
\ \ \ 1 + \beta + \beta' = 0, \label{eq:desargues2} \end{equation}
\begin{equation} \v{v} + \gamma \v{a} +
\gamma'\v{a'} = \v{0}, \ \ \ 1 + \gamma + \gamma' = 0.
\label{eq:desargues3}
\end{equation}
From \ref{eq:desargues2}, \ref{eq:desargues3}, and the Section
Formula \ref{eq:section_formula} we find
$$\dfrac{\beta\v{b} - \gamma\v{c}}{\beta - \gamma} = \dfrac{\beta'\v{b'} - \gamma'\v{c'}}{\beta' -
\gamma'}= \v{l},
$$whence $(\beta - \gamma)\v{l} = \beta\v{b} -
\gamma\v{c}$. In a similar fashion, we deduce $$(\gamma -
\alpha)\v{m} = \gamma \v{c} - \alpha\v{a},
$$
$$(\alpha -
\beta)\v{n} = \alpha \v{a} - \beta\v{b}.
$$This gives
$$(\beta - \gamma)\v{l} +  (\gamma -
\alpha)\v{m} +  (\alpha - \beta)\v{n}  = \v{0}, $$
$$ (\beta - \gamma) +  (\gamma -
\alpha) +  (\alpha - \beta)  = 0,  $$and appealing to problem
\ref{pro:barycentre_three_points} once again, we deduce that $L,
M, N$ are collinear.

\end{answer}


\end{pro}

\end{multicols}
\section{Vectors in \protect$\BBR^3\protect$}
\index{vector!in space} We now extend the notions studied for
$\BBR^2$ to $\BBR^3$. The rectangular coordinate form of a vector in
$\BBR^3$ is
$$\v{a} = \colvec{a_1 \\ a_2 \\ a_3}.$$In particular, if
$$\v{{i}} = \colvec{ 1 \\ 0 \\ 0 }, \
\v{{j}} = \colvec{0 \\ 1 \\ 0}, \v{{k}} = \colvec{0 \\ 0
\\ 1}$$then we can write any vector $\v{a} = \colvec{
a_1 \\ a_2 \\ a_3}$ as a sum
$$\v{a} = a_1\v{i} + a_2\v{j} + a_3\v{k}.$$
Given $\v{a} = \colvec{a_1 \\ a_2 \\ a_3}$ and $\v{b} =
\colvec{b_1 \\ b_2 \\ b_3}$, their dot product is
$$\dotprod{a}{b} = a_1b_1 + a_2b_2 + a_3b_3,$$
and
$$\norm{\v{a}} = \sqrt{a_1 ^2 + a_2 ^2 + a_3 ^2}.$$

We also have $$\v{i}\bulletproduct\v{j} = \v{j}\bulletproduct\v{k}
= \v{k}\bulletproduct\v{i} = 0,$$and $$\norm{\v{i}} = \norm{\v{j}}
= \norm{\v{k}} = 1.$$


\begin{df}  A system of unit vectors $\v{i}, \v{j}, \v{k}$ is
{\em right-handed} if the shortest-route rotation which brings
$\v{i}$ to coincide with $\v{j}$ is performed in a
counter-clockwise manner. It is {\em left-handed} if the rotation
is done in a clockwise manner. \index{right-handed}
\index{left-handed}
\end{df}
To study points in space we must first agree on the orientation
that we will give our coordinate system. We will use, unless
otherwise noted, a right-handed orientation, as in figure
\ref{fig:righthanded}.

\vspace{1cm}
\begin{figure}[htb]
\begin{minipage}{6cm}
$$\psset{unit=1pc}
\psline[linewidth=1.5pt, linecolor=red]{->}(0,0)(3,
0)\uput[r](3,0){\v{j}} \psline[linewidth=1.5pt,
linecolor=blue]{->}(0,0)(0, 3)\uput[u](0,3){\v{k}}
\psline[linewidth=1.5pt, linecolor=green]{->}(0,0)(-2.213,
-2.213)\uput[d](-2.213,-2.213){\v{i}}
$$\vspace{1cm}\hangcaption{Right-handed system.} \label{fig:righthanded}\end{minipage}
\hfill
\begin{minipage}{6cm}
$$\psset{unit=1pc}
\psline[linewidth=1.5pt, linecolor=red]{->}(0,0)(-3,
0)\uput[l](-3,0){\v{j}} \psline[linewidth=1.5pt,
linecolor=blue]{->}(0,0)(0, 3)\uput[u](0,3){\v{k}}
\psline[linewidth=1.5pt, linecolor=green]{->}(0,0)(-2.213,
-2.213)\uput[d](-2.213,-2.213){\v{i}}
$$\vspace{1cm}\hangcaption{Left-handed system.}\end{minipage}\end{figure}



\begin{rem}
The analogues of the Cauchy-Bunyakovsky-Schwarz and the Triangle
Inequality also hold in $\BBR^3$.
\end{rem}

\bigskip

We now define the (standard) cross (wedge) product in $\BBR^3$ as a
product satisfying the following properties.

\begin{df} Let $(\v{x}, \v{y}, \v{z}, \alpha) \in
\BBR^3\times\BBR^3\times\BBR^3\times\BBR.$ The wedge product
$\cross: \BBR^3 \times \BBR^3 \rightarrow \BBR^3$ is a closed binary
operation satisfying
\begin{enumerate}
\item[CP1] {\bf Anti-commutativity:} \begin{equation}\v{ x}
\cross\v{ y} = -(\v{ y} \cross\v{
x})\label{cp:anti_commutativity}\end{equation} \item[CP2] {\bf
Bilinearity:}
\begin{equation}(\v{ x} + \v{z}) \cross\v{ y}
= \v{ x} \cross\v{ y} + \v{z} \cross\v{ y}, \ \ \ \v{ x} \cross (
\v{z} + \v{ y}) = \v{ x} \cross\v{z} + \v{ x} \cross\v{
y}\label{cp:bilinearity}\end{equation} \item[CP3] {\bf Scalar
homogeneity:}
\begin{equation}(\alpha\v{ x}) \cross\v{ y} =
\v{ x} \cross (\alpha \v{ y}) = \alpha(\v{ x} \cross\v{
y})\label{cp:scalar_homogeneity}\end{equation} \item[CP4]
\begin{equation}\v{ x} \cross\v{ x} = \v{{\bf
0}}\label{cp:cross_itself_is_0}\end{equation} \item[CP5] {\bf
Right-hand Rule:}
\begin{equation}\crossprod{i}{j} = \v{k}, \ \ \
\crossprod{j}{k} = \v{i}, \ \ \ \crossprod{k}{i} = \v{j}
\label{cp:right_hand_rule}\end{equation}
\end{enumerate}
\end{df}
\begin{thm}
Let $\v{x} = \colvec{x_1 \\ x_2 \\ x_3}$ and $\v{y} = \colvec{y_1
\\ y_2 \\ y_3}$ be vectors in $\BBR^3$. Then
$$\crossprod{x}{y} = (x_2y_3 - x_3y_2 )\v{i}
+ (x_3y_1 - x_1y_3 )\v{j} + (x_1y_2 - x_2y_1 )\v{k}.$$
\end{thm}
\begin{pf}
Since $\crossprod{i}{i} =\crossprod{j}{j} = \crossprod{k}{k} =
\v{0} $ we have
$$\begin{array}{lll} (x_1\v{i} + x_2\v{j} + x_3\v{k}) \cross
(y_1\v{i} + y_2\v{j} + y_3\v{k}) & = & x_1y_2\crossprod{i}{j} +
x_1y_3\crossprod{i}{k} \\
& & \qquad + x_2y_1\crossprod{j}{i} +
x_2y_3\crossprod{j}{k} \\
& & \qquad + x_3y_1\crossprod{k}{i} +
x_3y_2\crossprod{k}{j} \\
& = & x_1y_2\v{k} - x_1y_3\v{j}- x_2y_1\v{k}\\ & & \qquad +
x_2y_3\v{i} +x_3y_1\v{j} - x_3y_2\v{i},
\end{array}$$from where the theorem follows.
\end{pf}
\begin{exa}
Find $$ \colvec{1 \\ 0 \\ -3} \cross \colvec{0 \\ 1 \\ 2}.$$
 \label{exa:wedge1}\end{exa}
\begin{solu}We have
$$\begin{array}{lll}
(\v{i} - 3\v{k}) \cross (\v{j} + 2\v{k}) & = & \crossprod{i}{j} +
2\crossprod{i}{k} - 3\crossprod{k}{j} -
6\crossprod{k}{k} \\
& = & \v{k} - 2 \v{j} - 3 \v{i} + 6\v{0} \\
& = & -3\v{i} - 2\v{j} + \v{k}.
\end{array}$$
Hence $$ \colvec{1 \\ 0 \\ -3} \cross \colvec{0 \\ 1 \\ 2} =
\colvec{-3 \\ -2 \\  1}.$$
\end{solu}
\begin{thm} The cross product  vector $\v{x} \cross\v{y}$ is
simultaneously perpendicular to $\v{x}$ and $ \v{y}$.
\end{thm}
\begin{pf}
We will only check the first assertion, the second verification is
analogous.
$$\begin{array}{lll}
\v{ x}\bulletproduct(\crossprod{ x}{\bf y}) & = &
(x_1\v{i} + x_2 \v{j} + x_3\v{k})\bulletproduct ((x_2y_3 - x_3y_2 )\v{i} \\
& & \qquad + (x_3y_1 - x_1y_3 )\v{j} + (x_1y_2 -
x_2y_1 )\v{k} ) \\
& = & x_1x_2y_3 - x_1x_3y_2 + x_2x_3y_1 - x_2x_1y_3 + x_3x_1y_2 -
x_3x_2y_1 \\
 & = & 0,
\end{array}$$
completing the proof.
\end{pf}

\begin{thm}\label{thm:semi_associative_cross_prod}
$\v{ a}\cross (\crossprod{ b}{c}) = (\dotprod{\bf a}{c})\v{ b} -
(\dotprod{\bf a}{\bf b})\v{c}.$
\end{thm}
\begin{pf}
$$\begin{array}{lll}
\v{ a}\cross (\crossprod{ b}{c}) & = &
(a_1\v{i} + a_2 \v{j} + a_3\v{k})\cross ((b_2c_3 - b_3c_2 )\v{i} +   \\
& & \qquad  + (b_3c_1 - b_1c_3)\v{j} + (b_1c_2 -
b_2c_1)\v{k} ) \\
& = &   a_1(b_3c_1 - b_1c_3)\v{k} - a_1(b_1c_2 -
b_2c_1)\v{j} \\
&  & \qquad - a_2(b_2c_3 - b_3c_2 )\v{k}+ a_2(b_1c_2 - b_2c_1)\v{i} \\
&  & \qquad + a_3(b_2c_3 - b_3c_2
)\v{j} - a_3(b_3c_1 - b_1c_3)\v{i} \\
& = & (a_1c_1 + a_2c_2 + a_3c_3)(b_1\v{i} + b_2\v{j} +
b_3\v{i})  \\
& & \qquad
+(-a_1b_1 - a_2b_2 - a_3b_3)(c_1\v{i} + c_2\v{j} + c_3\v{i})\\
 & = &  (\dotprod{\bf a}{c})\v{ b} - (\dotprod{\bf a}{\bf b})\v{c}, \\
\end{array}$$
completing the proof.
\end{pf}
\begin{thm}[Jacobi's Identity]
$$\v{a}\cross (\crossprod{b}{c}) + \v{ b}\cross (\crossprod{c}{a}) + \v{c}\cross (\crossprod{ a}{b})  = \v{0}.$$
\end{thm}
\begin{pf}From Theorem \ref{thm:semi_associative_cross_prod} we
have $$\v{ a}\cross (\crossprod{ b}{c}) = (\dotprod{\bf a}{c})\v{
b} - (\dotprod{\bf a}{\bf b})\v{c},     $$
$$\v{ b}\cross (\crossprod{ c}{a}) = (\dotprod{ b}{a})\v{
c} - (\dotprod{ b}{ c})\v{a},     $$
$$\v{ c}\cross (\crossprod{ a}{b}) = (\dotprod{c}{b})\v{
a} - (\dotprod{c}{a})\v{b},     $$ and adding yields the result.
\end{pf}


 \begin{thm}
Let $\anglebetween{x}{y}\in [0;\pi[$ be the convex  angle between
two vectors $\v{ x}$ and $\v{ y}$. Then
$$||\v{ x}\cross\v{ y}|| = ||\v{ x}||||\v{ y}||\sin\anglebetween{x}{y} .$$
\label{thm:sineanglebetweenc}\end{thm}
\begin{pf}We have
$$\begin{array}{lll}
||\v{ x}\cross\v{ y}||^2 & = &    (x_2y_3 - x_3y_2 )^2 + (x_3y_1 -
x_1y_3 )^2 + (x_1y_2 -
x_2y_1 )^2        \\
& = & x_2 ^2 y_3^2  - 2x_2y_3x_3y_2  + x_3 ^2 y_2 ^2 + x_3 ^2 y_1
^2 - 2x_3y_1x_1y_3 +  \\
& & \qquad + x_1 ^2y_3 ^2 + x_1 ^2 y_2 ^2 - 2x_1y_2x_2y_1
+ x_2 ^2y_1 ^2 \\
& = & (x_1 ^2 + x_2 ^2 + x_3 ^2)(y_1 ^2 + y_2 ^2 + y_3 ^2) -
(x_1y_1 + x_2y_2 + x_3y_3)^2 \\
& = & ||\v{ x}||^2||\v{{\bf y}}||^2 -
(\v{{\bf x}}\bulletproduct \v{{\bf y}})^2 \\
& = & ||\v{ x}||^2||\v{{\bf y}}||^2 -
||\v{ x}||^2||\v{{\bf y}}||^2\cos^2\anglebetween{x}{y}  \\
& = & ||\v{ x}||^2||\v{{\bf y}}||^2\sin^2\anglebetween{x}{y},
\end{array}
$$whence the theorem follows. The Theorem is illustrated in Figure
\ref{fig:sineanglebetweenc}. Geometrically it means that the area
of the parallelogram generated by joining $\v{x}$ and $\v{y}$ at
their heads is $\norm{\crossprod{x}{y}}$.
\end{pf}

\vspace{1cm}
\begin{figure}[htb]
$$\psset{unit=4pc}
\psset{viewpoint=1 1.5 1} \ThreeDput[normal=1 0 0](0,0,0){
%%This draws the vector (1,0,0)
\psline[linewidth=2pt]{->}(0,0)(0,1)
\uput[u](0,1){\crossprod{x}{y}}
%%This draws the vector (0,1,0)
\psline[linewidth=2pt]{->}(0,0)(1,0) \uput[u](1,0){\v{y}} } %%
\ThreeDput[normal=0 1 0](0,0,0){
%%This draws the vector (1,0,0)
\psline[linewidth=2pt]{->}(0,0)(-1,0) \uput[u](-1,0){\v{x}}}
\ThreeDput[normal=0 0 1](0,0,0){
%%This draws the grid
\psframe[fillstyle=solid, fillcolor=green](0,0)(1,1) }
$$ \vspace{1cm} \footnotesize\hangcaption{Theorem
\ref{thm:sineanglebetweenc}.}\label{fig:sineanglebetweenc}

\end{figure}


The following corollaries are now obvious.
\begin{cor}\label{cor:a_cross_parallel_is_0}
Two non-zero vectors $\v{ x}, \v{ y}$ satisfy $\v{ x}\cross \v{ y}
= \v{{\bf 0}}$ if and only if they are parallel.
\end{cor}
\begin{cor}[Lagrange's Identity]
$$||\crossprod{ x}{\bf y}||^2 = \norm{\bf x}^2\norm{\bf y}^2 - (\dotprod{\bf x}{\bf y})^2.$$
\end{cor}
\begin{exa}
Let $\v{ x}\in \BBR^3, \norm{\bf x} = 1$. Find
$$||\crossprod{ x}{i}||^2 + ||\crossprod{ x}{j}||^2 + ||\crossprod{ x}{k}||^2.$$
\end{exa}
\begin{solu}By Lagrange's Identity,
$$||\crossprod{ x}{i}||^2  =  \norm{\v{x}}^2\norm{\v{i}}^2 - (\dotprod{x}{i})^2 = 1 - (\dotprod{\bf x}{i})^2,$$
$$||\crossprod{ x}{k}||^2  =  \norm{\v{x}}^2\norm{\v{j}}^2 - (\dotprod{x}{j})^2 = 1 - (\dotprod{\bf x}{j})^2,$$
$$||\crossprod{ x}{j}||^2  =  \norm{\v{x}}^2\norm{\v{k}}^2 - (\dotprod{x}{k})^2 = 1 -
(\dotprod{x}{k})^2,$$and since $(\dotprod{x}{i})^2 +
(\dotprod{x}{j})^2 + (\dotprod{x}{k})^2 = \norm{\v{x}}^2 = 1$, the
desired sum equals $3 - 1 = 2$.
\end{solu}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
Consider a tetrahedron $ABCS$. [A] Find $\vect{AB} + \vect{BC} +
\vect{CS}$. [B]~Find $\vect{AC} + \vect{CS} + \vect{SA} +
\vect{AB}$.
\begin{answer}
[A] $\vect{AS}$, [B] $\vect{AB}$.
\end{answer}
\end{pro}


\begin{pro}
Find a vector simultaneously
perpendicular to $\colvec{1 \\ 1\\ 1}$ and  $\colvec{1 \\ 1\\
0}$ and having norm $3$.\begin{answer} Put $$\v{a} = \colvec{1 \\ 1\\ 1} \cross \colvec{1 \\ 1\\
0} = (\v{i} + \v{j} + \v{k}) \cross (\v{i} + \v{j}) = \v{j} -
\v{i} = \colvec{-1 \\ 1 \\ 0} . $$ Then either
$$ \frac{3\v{a}}{\norm{\v{a}}} =
\frac{3\v{a}}{\sqrt{2}} = \colvec{-\frac{3}{\sqrt{2}} \\
\frac{3}{\sqrt{2}} \\ 0},
$$ or $$ -\frac{3\v{a}}{\norm{\v{a}}} =
 \colvec{\frac{3}{\sqrt{2}} \\
-\frac{3}{\sqrt{2}} \\ 0}$$will satisfy the requirements.


\end{answer}
\end{pro}
\begin{pro}
Find the area of the triangle whose vertices are at $P =
\colpoint{0\\ 0 \\ 1}$, $Q = \colpoint{0\\ 1 \\ 0}$, and $R =
\colpoint{1\\ 0 \\ 0}$.
\begin{answer}
The desired area is $$\norm{\vect{PQ} \cross \vect{PR}} =
\norm{\colvec{0 \\ 1 \\ -1} \cross \colvec{1 \\ 0 \\ -1}} =
\norm{\colvec{-1\\ -1 \\ -1}} = \sqrt{3}.$$
\end{answer}
\end{pro}
\begin{pro}
Prove or disprove! The cross product  is  associative.
\begin{answer} It is not associative, since $\v{i}\cross (\crossprod{i}{j}) =
\crossprod{i}{k} = -\v{j}$ but $(\crossprod{i}{i})\cross\v{j} =
\crossprod{0}{j} = \v{{\bf 0}}$.
\end{answer}
\end{pro}
\begin{pro}
Prove that $\crossprod{x}{x} = \v{0}$ follows from the
anti-commutativity of the cross product.
\begin{answer}
We have $\crossprod{x}{x} = -\crossprod{x}{x}$ by letting $\v{y} =
\v{x}$ in \ref{cp:anti_commutativity}. Thus $2\crossprod{x}{x} =
\v{0}$ and hence $\crossprod{x}{x} = \v{0}$.
\end{answer}

\end{pro}
\begin{pro}
Expand the product $(\v{a} - \v{b})\cross (\v{a} + \v{b})$.
\begin{answer} $2\crossprod{a}{b}$ \end{answer}
\end{pro}

\begin{pro}
The vectors $\v{a}, \v{b}$ are constant vectors. Solve the
equation $\v{a}\cross (\crossprod{x}{b}) = \v{b} \cross
(\crossprod{x}{a})$.
\begin{answer}
$$\v{a}\cross (\crossprod{x}{b}) = \v{b}
\cross (\crossprod{x}{a}) \iff (\dotprod{a}{b})\v{x} -
(\dotprod{a}{x})\v{b} = (\dotprod{b}{a})\v{x} -
(\dotprod{b}{x})\v{a} \iff \dotprod{a}{x} = \dotprod{b}{x} = 0.
$$The answer is thus $\{\v{x}: \v{x}\in
\BBR\crossprod{a}{b}\}$.
\end{answer}
\end{pro}


\begin{pro}
The vectors $\v{a}, \v{b}, \v{c}$ are constant vectors. Solve the
system of equations
$$2\v{x} + \crossprod{y}{a} = \v{b}, \ \ \ 3\v{y} + \crossprod{x}{a} = \v{c},   $$
\begin{answer} $$\v{x} = \dfrac{(\dotprod{a}{b})\v{a} + 6\v{b} + 2\crossprod{a}{c}}{12 + 2\norm{\v{a}}^2}$$
 $$\v{y} = \dfrac{(\dotprod{a}{c})\v{a} + 6\v{c} + 3\crossprod{a}{b}}{18 + 3\norm{\v{a}}^2}$$    \end{answer}

\end{pro}

\begin{pro}
Prove that there do not exist three unit vectors in $\BBR^3$ such
that the angle between any two of them be $> \dfrac{2\pi}{3}$.
\begin{answer}
Assume contrariwise that $\v{a}$, $\v{b}$, $\v{c}$ are three unit
vectors in $\BBR^3$ such that the angle between any two of them is
$> \dfrac{2\pi}{3}$. Then $\dotprod{a}{b} < -\dfrac{1}{2}$,
$\dotprod{b}{c} < -\dfrac{1}{2}$, and $\dotprod{c}{a} <
-\dfrac{1}{2}$. Thus
$$\begin{array}{lll} \norm{\v{a} + \v{b} + \v{c}}^2 & = &  \norm{\v{a}}^2 + \norm{\v{b}}^2 + \norm{\v{c}}^2\\
& & \qquad + 2\dotprod{a}{b}  + 2\dotprod{b}{c}  + 2\dotprod{c}{a} \\
& < & 1 + 1 + 1 -1-1-1 \\
& = & 0,
\end{array} $$which is impossible, since a norm of vectors is
always $\geq 0$.
\end{answer}
\end{pro}


\begin{pro}
Let $\v{a}\in \BBR^3$ be a fixed vector. Demonstrate that $$ X =
\{\v{x}\in\BBR^3: \crossprod{a}{x} = \v{0}\}$$is a subspace of
$\BBR^3$.
\begin{answer}
Take $(\v{u}, \v{v})\in X^2$ and $\alpha\in \BBR$. Then
$$\v{a}\cross (\v{u}+\alpha \v{v}) = \crossprod{a}{u} + \alpha \crossprod{a}{v} = \v{0} + \alpha\v{0} =\v{0},
$$proving that $X$ is a vector subspace of $\BBR^n$.
\end{answer}
\end{pro}
\begin{pro}
Let $(\v{a}, \v{b})\in (\BBR^3)^2$ and assume that $\dotprod{\bf
a}{\bf b} = 0$ and that $\v{a}$ and $\v{b}$ are linearly
independent. Prove that $\v{a}, \v{b}, \crossprod{a}{b}$ are
linearly independent. \label{exa:3_vectors_in_r3}\begin{answer}
Since $\v{a}, \v{b}$ are linearly independent, none of them is
$\v{0}$. Assume that there are $(\alpha, \beta, \gamma)\in\BBR^3$
such that
\begin{equation}\label{eq:3_vectors_in_r3}\alpha\v{a}  +
\beta\v{b} + \gamma\crossprod{a}{b} = \v{0}.
\end{equation} Since $\v{a}\bulletproduct(\crossprod{a}{b}) = 0$,
taking the dot product of \ref{eq:3_vectors_in_r3} with $\v{a}$
yields $\alpha \norm{\v{a}}^2 = 0$, which means that $\alpha = 0$,
since $\norm{\v{a}} \neq 0$. Similarly, we take the dot product
with $\v{b}$ and $\crossprod{a}{b}$ obtaining respectively, $\beta
= 0$ and $\gamma = 0$. This establishes linear independence.
\end{answer}
\end{pro}
\begin{pro}
Let $(\v{ a}, \v{ b})\in \BBR^3 \times \BBR^3$ be fixed. Solve the
equation
$$\crossprod{ a}{\bf x} = \v{b},$$for $\v{ x}$.
\begin{answer} Since $\v{ a} \perp \crossprod{ a}{\bf x} =
\v{ b}$, there are no solutions if $\dotprod{\bf a}{\bf b} \neq 0.$
Neither are there solutions if $\v{ a} = \v{{\bf 0}}$ and $\v{ b}
\neq \v{{\bf 0}}$. If both $\v{ a} = \v{ b} = \v{{\bf 0}}$, then the
solution set is the whole of $\BBR^3.$ Assume thus that
$\dotprod{\bf a}{\bf b} = 0$ and that $\v{ a}$ and $\v{ b}$ are
linearly independent. Then $\v{a}, \v{b}, \crossprod{a}{b}$ are
linearly independent, and so they constitute a basis for $\BBR^3$.
Any $\v{x} \in \BBR^3$ can be written in the form
$$\v{x} = \alpha \v{a} + \beta\v{b} + \gamma\crossprod{a}{b}.$$
We then have
$$\begin{array}{lll}\v{b} & =  & \crossprod{a}{x} \\
&  =  & \beta\crossprod{a}{b} + \gamma \v{
a}\cross (\crossprod{a}{b}) \\
& = & \beta\crossprod{a}{b} + \gamma
((\dotprod{a}{b})\v{a} - (\dotprod{a}{a})\v{b}). \\
& = & \beta\crossprod{a}{b} - \gamma
(\dotprod{a}{a}\v{b}) \\
& = & \beta\crossprod{a}{b} -\gamma\norm{\v{a}}^2\v{b},
\end{array}$$
from where $$\beta\crossprod{ a}{\bf b} + (-\gamma\norm{\v{a}}^2 -
1) \v{b} = \v{0},$$ which means that $\beta = 0$ and $\gamma =
-\frac{1}{\norm{\v{a}}^2}$, since $\v{a}, \v{b}, \crossprod{a}{b}$
are linearly independent. Thus
$$\v{x} = \alpha\v{a} -
\frac{1}{\norm{\v{a}}^2}\crossprod{a}{b}$$in this last case.
\end{answer}
\end{pro}

\begin{pro}
Let $\v{h}, \v{k}$ be fixed vectors in $\BBR^3$. Prove that
$$\fun{L}{(\v{x}, \v{y})}{\crossprod{x}{k} +
\crossprod{h}{y}}{\BBR^3\times \BBR^3}{\BBR^3}$$is a linear
transformation. \begin{answer} Let $\v{x}, \v{y}, \v{x}', \v{y}'$ be
vectors in $\BBR^3$ and let $\alpha\in\BBR$ be a scalar. Then
$$\begin{array}{lll} L((\v{x}, \v{y}) + \alpha
(\v{x}',\v{y}')) & = & L(\v{x} + \alpha \v{x}', \v{y} +
\alpha\v{y}')
\\ & = & (\v{x} +
\alpha \v{x}')\cross \v{k}  +  \v{h}\cross (\v{y} +  \alpha\v{y}') \\
& = & \crossprod{x}{k} + \alpha\v{x}'\cross \v{k} +
\crossprod{h}{y} +
\v{h} \cross \alpha\v{y}' \\
& = & L(\v{x}, \v{y}) + \alpha L(\v{x}', \v{y}')
\end{array}$$
\end{answer}
\end{pro}
\end{multicols}

\section{Planes and Lines in $\BBR^3$}

\begin{df}
If bi-point representatives of a family of vectors in $\BBR^3$ lie
on the same plane, we will say that the vectors are {\em coplanar}
or parallel to the plane.
\end{df}

\begin{lem}
Let $\v{v}, \v{w}$ in $\BBR^3$ be non-parallel vectors. Then every
vector $\v{u}$ of the form  $$\v{u} = a\v{v} + b\v{w},$$ ($(a, b)
\in \BBR^2$ arbitrary) is coplanar with both $\v{v}$ and $\v{w}$.
Conversely, any vector $\v{t}$ coplanar with both $\v{v}$ and
$\v{w}$  can be uniquely expressed in the form
$$\v{t} = p\v{v} + q\v{w}.$$
\label{lem:coplanarvectors}\end{lem}
\begin{pf}
This follows at once from Corollary \ref{cor:orthodecomp_2}, since
the operations occur on a plane, which can be identified with
$\BBR^2$.
\end{pf}

\bigskip

A plane is determined by three non-collinear points. Suppose that
$A$, $B$, and $C$ are non-collinear points on the same plane and
that $R = \colpoint{x\\ y\\ z}$ is another arbitrary point on this
plane. Since $A$, $B$, and $C$ are non-collinear, $\vect{AB}$ and
$\vect{AC}$, which are coplanar, are non-parallel. Since
$\vect{AR}$ also lies on the plane, we have by Lemma
\ref{lem:coplanarvectors}, that there exist real numbers $p, q$
with
$$\vect{AR} = p\vect{AB} + q\vect{AC}.$$By Chasles' Rule,
$$\vect{OR} = \vect{OA}  + p(\vect{OB} - \vect{OA}) +
q(\vect{OC} - \vect{OA}),$$is the equation of a plane containing
the three non-collinear points $A$, $B$, and $C$. By letting
$\v{r} = \vect{OR}$, $\v{a} = \vect{OA}$, etc., we deduce that $$
\v{r}- \v{a} = p(\v{b} - \v{a}) + q(\v{c} - \v{a}).$$ Thus we have
the following definition.
\begin{df}
The {\em parametric equation} of a plane containing the point $A$,
and parallel to the vectors $\v{u}$ and  $\v{v}$ is given by
$$
\v{r} -\v{a} = p\v{u} + q\v{v}.$$ Componentwise this takes the
form
$$\begin{array}{c}x - a_1 = pu_1  + qv_1, \\ y - a_2 = pu_2  + qv_2, \\ z - a_3 = pu_3  + qv_3. \end{array} $$
The {\em Cartesian} equation of a plane is an equation of the form
$ax + by + cz = d$ with $(a, b, c, d) \in \BBR^4$ and $a^2 + b^2 +
c^2 \neq 0$.
\end{df}
\begin{exa}\label{exa:eqn_of_plane1}
Find both the parametric equation and the Cartesian equation of
the plane parallel to the vectors $\colvec{1
\\ 1 \\ 1}$ and $\colvec{1 \\ 1\\ 0}$ and passing through the
point $\colpoint{0 \\ -1\\ 2}$.
\end{exa}
\begin{solu}The desired parametric equation is
$$ \colvec{x \\ y + 1 \\ z-2} = s\colvec{1\\ 1\\ 1} + t\colvec{1\\ 1\\ 0}.    $$
This gives $s = z-2$, $t = y+1 - s = y + 1 -z+2 = y-z + 3$ and $x
= s + t = z-2 + y-z+3 = y+1$. Hence the Cartesian equation is $x-y
=1$.
\end{solu}
\begin{thm}\label{thm:alternative_eq_planeR3} Let $\v{u}$ and $\v{v}$ be non-parallel vectors
and let $\v{r} - \v{a} = p\v{u} + q\v{v}$ be the equation of the
plane containing $A$ an parallel to the vectors $\v{u}$ and
$\v{v}$. If $\v{n}$ is simultaneously perpendicular to $\v{u}$ and
$\v{v}$ then $$(\v{r} - \v{a})\bulletproduct \v{n} = 0.   $$
Moreover, the vector $\colvec{a\\ b \\ c}$ is normal to the plane
with Cartesian equation $ax + by + cz = d$.
\end{thm}
\begin{pf}
The first part is clear, as $\dotprod{u}{n} = 0 = \dotprod{v}{n}$.
For the second part, recall that at least one of $a, b, c$ is
non-zero. Let us assume $a \neq 0$. The argument is similar if one
of the other letters is non-zero and $a = 0$. In this case we can
see that $$ x =\frac{d}{a} -\frac{b}{a}y - \frac{c}{a}z.  $$ Put
$y = s$ and $z = t$. Then
$$\colvec{x - \frac{d}{a}\\ y \\ z} = s\colvec{-\frac{b}{a} \\ 1 \\ 0} + t\colvec{-\frac{c}{a} \\ 0 \\ 1}   $$
is a parametric equation for the plane.   \end{pf}
\begin{exa}
Find once again, by appealing to Theorem
\ref{thm:alternative_eq_planeR3}, the Cartesian equation of the
plane parallel to the vectors $\colvec{1
\\ 1 \\ 1}$ and $\colvec{1 \\ 1\\ 0}$ and passing through the
point $\colpoint{0 \\ -1\\ 2}$.
\end{exa}
\begin{solu}The vector $\colvec{1
\\ 1 \\ 1}\cross \colvec{1 \\ 1\\ 0} = \colvec{-1\\ 1 \\ 0}$ is normal to the
plane. The plane has thus equation $$ \colvec{x \\
y + 1 \\ z-2}\bulletproduct \colvec{-1\\ 1 \\ 0} = 0 \implies -x +
y + 1 = 0 \implies x-y= 1,
$$as obtained before.
\end{solu}
\begin{thm}[Distance Between a Point and a Plane]
\label{thm:distance_point_planeR3}\index{distance!between a point
and a plane} Let $(\v{r} - \v{a})\bulletproduct\v{n} = 0$ be a
plane passing through the point $A$ and perpendicular to vector
$\v{n}$. If $B$ is not a point on the plane, then the distance
from $B$ to the plane is
$$\frac{\left|(\v{a} -
\v{b})\bulletproduct\v{n}\right|}{\norm{\v{n}}}.$$
\end{thm}
\begin{pf}
Let $R_0$ be the point on the plane that is nearest to $B$. Then
$\vect{BR_0} = \v{r_0} - \v{b}$ is orthogonal to the plane, and
the distance we seek is
$$ ||\proj{\v{r_0} - \v{b}}{n}|| = \left|\left| \frac{(\v{r_0} - \v{b})\bulletproduct\v{n}}{\norm{\v{n}}^2}\v{n}\right|\right| =
\frac{|(\v{r_0} - \v{b})\bulletproduct\v{n}|}{\norm{\v{n}}}.$$
Since $R_0$ is on the plane, $\dotprod{r_0}{n} = \dotprod{a}{n},$
and so
$$ ||\proj{\v{r_0} - \v{b}}{n}|| = \frac{|\dotprod{r_0}{n} -
\dotprod{b}{n}|}{\norm{\v{n}}|} = \frac{|\dotprod{a}{n} -
\dotprod{b}{n}|}{\norm{\v{n}}} = \frac{|(\v{a} -
\v{b})\bulletproduct\v{n}|}{\norm{\v{n}}},$$ as we wanted to shew.
\end{pf}





\begin{rem}
Given three planes in space, they may (i) be parallel (which
allows for some of them to coincide), (ii) two may be parallel and
the third intersect each of the other two at a line, (iii)
intersect at a line, (iv) intersect at a point.
\end{rem}

\begin{df}The equation of a line passing through $A\in\BBR^3$ in the
direction of $\v{v} \neq \v{0}$ is given by $$ \v{r} - \v{a} =
t\v{v}, \ \ t\in\BBR.  $$
\end{df}

\begin{thm}Put $\vect{OA} = \v{a}$, $\vect{OB} = \v{b}$, and  $\vect{OC} =
\v{c}$. Points $(A, B, C)\in (\BBR^3)^3$ are collinear if and only
if
$$\crossprod{a}{b} + \crossprod{b}{c} + \crossprod{c}{a} = \v{0}.$$
\label{thm:coll_in_r3}\end{thm}
\begin{pf}
If the points $A, B, C$ are collinear, then $\vect{AB}$ is
parallel to $\vect{AC}$ and by Corollary
\ref{cor:a_cross_parallel_is_0}, we must have
$$(\v{c} - \v{a})\cross (\v{b} - \v{a}) = \v{0}.$$
Rearranging, gives
$$\crossprod{c}{b} - \crossprod{c}{a} - \crossprod{a}{b} =
\v{0}.$$Further rearranging completes the proof.
\end{pf}
\begin{thm}[Distance Between a Point and a Line]
\label{thm:distance_point_lineR3}\index{distance!between a point
and a line!in space} Let $L: \v{r} = \v{a} + \lambda \v{v}, \ \
\v{v} \neq \v{0},$ be a line and let $B$ be a point not on $L$.
Then the distance from $B$ to $L$ is given by
$$ \frac{||(\v{a} - \v{b})\cross\v{v}||}{\norm{\v{v}}}.$$
\end{thm}
\begin{pf}
If $R_0$---with position vector $\v{r_0}$---is the point on $L$
that is at shortest distance from $B$ then $\vect{BR_0}$ is
perpendicular to the line, and so
$$||\vect{BR_0}\cross \v{v}|| =
||\vect{BR_0}||\norm{\v{v}}\sin\frac{\pi}{2} =
||\vect{BR_0}||\norm{\v{v}}.$$ The distance we must compute is
$\norm{\vect{BR_0}} = ||\v{r_0} - \v{b}||$, which is then given by
$$||\v{r_0} - \v{b}|| = \frac{||\vect{BR_0}\cross \v{v}||}{\norm{\v{v}}} =
\frac{||(\v{r_0} - \v{b}) \cross \v{v}||}{\norm{\v{v}}}.$$ Now,
since $R_0$ is on the line $\exists t_0\in\BBR$ such that $\v{r_0} =
\v{a} + t_0\v{v}$. Hence
$$(\v{r_0} - \v{b}) \cross
\v{v} = (\v{a} - \v{b})\cross\v{v},$$ giving
$$||\v{r_0} - \v{b}|| = \frac{||(\v{a} - \v{b})\cross\v{v}||}{\norm{\v{v}}},$$
proving the theorem.   \end{pf}
\begin{rem}
Given two lines in space, one of the following three situations
might arise: (i) the lines intersect at a point, (ii) the lines
are parallel, (iii) the lines are skew (one over the other,
without intersecting).
\end{rem}




\section*{\psframebox{Homework}}

\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900


\begin{pro}
Find the equation of the plane passing through the points $(a, 0,
a)$, $(-a, 1, 0)$, and $(0, 1, 2a)$ in $\BBR^3$.
\begin{answer} The vectors $$\colvec{a - (-a) \\ 0 - 1 \\ a - 0} = \colvec{2a \\
-1 \\ a}
$$ and $$\colvec{0 - (-a) \\ 1 - 1 \\ 2a - 0} = \colvec{a \\
0 \\ 2a}
$$are coplanar. A vector normal to the plane is
$$ \colvec{2a \\
-1 \\ a} \cross   \colvec{a \\
0 \\ 2a} =  \colvec{-2a\\
-3a^2 \\ a}. $$ The equation of the plane is thus given by
$$ \colvec{-2a\\
-3a^2 \\ a}\bulletproduct \colvec{x - a\\
y - 0 \\ z - a} = 0,        $$ that is,
$$2ax +3a^2y - az   = a^2.   $$
\end{answer}
\end{pro}
\begin{pro}
Find the equation of plane  containing the point $(1, 1, 1)$ and
perpendicular to the line $x = 1 + t, y = -2t, z = 1 - t$.
\begin{answer} The vectorial form of the equation of the line is
$$\v{r} = \colvec{1 \\ 0 \\ 1} + t\colvec{1 \\ -2 \\ -1}.$$
Since the line follows the direction of $\colvec{1 \\ -2 \\ -1}$,
this means that $\colvec{1 \\ -2 \\ -1}$ is normal to the plane,
and thus the equation of the desired plane is
$$(x - 1) - 2(y - 1) - (z - 1) = 0.$$
\end{answer}
\end{pro}
\begin{pro}
Find the equation of plane  containing the point $(1, -1, -1)$ and
containing the line $x = 2y = 3z$. \begin{answer} Observe that $(0,
0, 0)$ (as $0 = 2(0) = 3(0)$) is on the line, and hence on the
plane. Thus the vector
$$\colvec{1 - 0 \\ -1 - 0 \\ -1 - 0} = \colvec{1 \\ -1 \\
-1}$$lies on the plane. Now, if $x = 2y = 3z = t$, then $x = t, y
= t/2, z = t/3$. Hence, the vectorial form of the equation of the
line is
$$\v{r} = \colvec{0 \\ 0 \\ 0} + t\colvec{1 \\ 1/2 \\ 1/3} = t\colvec{1 \\ 1/2 \\ 1/3}.$$
This means that $\colvec{1 \\ 1/2 \\ 1/3}$ also lies on the plane,
and thus
$$\colvec{1 \\ -1 \\
-1} \cross \colvec{1 \\ 1/2 \\ 1/3} =  \colvec{1/6\\ -4/3\\
3/2}$$is normal to the plane. The desired equation is thus
$$\frac{1}{6}x  - \frac{4}{3}y + \frac{3}{2}z = 0.$$
\end{answer}
\end{pro}
\begin{pro}
Find the equation of the plane perpendicular to
 the line $ax = by = cz,\ \ \ abc \neq 0$ and passing through the
 point $(1, 1, 1)$ in $\BBR^3$.
\begin{answer}  Put  $ax = by = cz = t$, so $x = t/a; y = t/b; z =
t/c$. The parametric equation of the line is $$\colvec{x \\ y \\
z} = t\colvec{1/a \\ 1/b \\
1/c}, \ \ \ t\in \BBR.
$$ Thus the vector $\colvec{1/a \\ 1/b \\
1/c}$ is perpendicular to the plane. Therefore, the equation of
the plane is $$\colvec{1/a \\ 1/b \\
1/c}\bulletproduct \colvec{x - 1 \\ y - 1 \\
z - 1}  = \colvec{0 \\ 0\\
0},  $$ or $$ \frac{x}{a} + \frac{y}{b} + \frac{z}{c} =
\frac{1}{a} + \frac{1}{b} + \frac{1}{c}.  $$ We may also write
this as $$bcx + cay + abz = ab + bc + ca.   $$
\end{answer}
\end{pro}


\begin{pro}
Find the equation of the line perpendicular to the plane $ax +
a^2y + a^3z = 0,\ \
 a \neq 0$ and passing through the point $(0, 0, 1)$.
\begin{answer} A vector normal to the plane is $ \colvec{a \\ a^2 \\
 a^2}$. The line sought has the same direction as this vector,
 thus the equation of the line is $$ \colvec{x \\ y \\
 z} = \colvec{0\\ 0 \\
 1} + t\colvec{a \\ a^2 \\
 a^2}, \ \ \ t\in\BBR. $$
\end{answer}
\end{pro}
\begin{pro}
The two planes $$ x - y - z = 1,\ \ \ \ \ \ x - z = -1,
$$intersect at a line. Write the equation of this line in the form
$$\colvec{x\\ y \\ z} = \v{a} + t\v{v}, \ \ t\in\BBR .
$$
\begin{answer} We have $$ x - z - y = 1 \implies -1 - y = 1 \implies y =
-2.
$$
Hence if $z = t$, $$ \colvec{x\\ y \\ z } = \colvec{t - 1\\ -2 \\
t} = \colvec{-1 \\ -2 \\ 0} + t\colvec{1 \\ 0 \\ 1}.
$$
\end{answer}
\end{pro}
\begin{pro}
Find the equation of the plane passing through the points
$\colpoint{1\\ 0\\ -1}$, \ \  $\colpoint{2\\ 1\\ 1}$ and parallel
to the line $ \colvec{x\\ y \\ z }  = \colvec{-1 \\ -2 \\ 0} +
t\colvec{1 \\ 0 \\ 1}.
$. \begin{answer} The vector $$\colvec{2-1\\ 1 - 0 \\ 1 - (-1)} = \colvec{1\\
1 \\ 2}
$$lies on the plane. The vector $$\colvec{1 \\ 0 \\ 1} \cross \colvec{1 \\ 1\\ 2}  =  \colvec{1\\ 1 \\ -1} $$
is normal to the plane. Hence the equation of the plane is
$$\colvec{1 \\ 1 \\ -1}\bulletproduct \colvec{x -1 \\ y \\ z + 1} = 0  \implies x+y-z=2.  $$
\end{answer}
\end{pro}
\begin{pro}
Points ${\bf a, b, c}$ in $\BBR^3$ are collinear and it is known
that $\crossprod{a}{c} = \v{i} - 2\v{j}$ and $\crossprod{a}{b} =
2\v{k} - 3\v{i}$. Find $\crossprod{b}{c}$. \begin{answer} We have
$\crossprod{c}{a} = -\v{i} + 2\v{j}$ and $\crossprod{a}{b} = 2\v{k}
- 3\v{i}$. By Theorem \ref{thm:coll_in_r3}, we have
$$\crossprod{b}{c} = -\crossprod{a}{b} - \crossprod{c}{a} =  -2\v{k} + 3\v{i} +\v{i} - 2\v{j}
= 4\v{i} - 2\v{j} - 2\v{k}.$$
\end{answer}
\end{pro}
\begin{pro}
Find the equation of the plane which is equidistant of the points
$\colpoint{3\\ 2\\ 1}$ and $\colpoint{1\\ -1\\ 1}$.
\begin{answer}  $4x + 6y =1 $ \end{answer}
\end{pro}
\begin{pro}[{\red\bf Putnam Exam, 1980}]
Let $S$ be the solid in three-dimensional space consisting of all
points $(x, y, z)$ satisfying the following system of six
conditions: $$x \geq 0, \ \ \ y \geq 0, \ \ \ z \geq 0,$$ $$x + y
+ z \leq 11,$$ $$2x + 4y + 3z \leq 36,$$ $$2x + 3z \leq
24.$$Determine the number of vertices and the number of edges of
$S$. \begin{answer} There are $7$ vertices ($V_0 = (0, 0, 0), V_1
= (11, 0, 0)$, $V_2 = (0, 9, 0), V_3 = (0, 0, 8)$, $V_4 = (0, 3,
8)$, $V_5 = (9, 0, 2)$, $V_6 = (4, 7, 0)$) and $11$ edges
($V_0V_1$, $V_0V_2$, $V_0V_3$, $V_1V_5$, $V_1V_6$, $V_2V_4$,
$V_3V_4$, $V_3V_5$, $V_4V_5$, and $V_4V_6$).
\end{answer}
\end{pro}

\end{multicols}






\section{$\BBR^n$} \index{vector!n-dimensional} As
a generalisation of $\BBR^2$ and $\BBR^3$ we define $\BBR^n$ as
the set of $n$-tuples $$\left\{ \colvec{x_1\\x_2 \\ \vdots \\
x_n}: x_i\in\BBR \right\}.$$ The dot product of two vectors in
$\BBR^n$ is defined as
$$\dotprod{x}{y} = \colvec{x_1\\x_2 \\ \vdots \\
x_n}\bulletproduct \colvec{y_1\\y_2 \\ \vdots \\
y_n} = x_1y_1 + x_2y_2 + \cdots + x_ny_n.$$ The norm of a vector in
$\BBR^n$ is given by  $$ \norm{\v{x}} = \sqrt{\dotprod{x}{x}}.
$$





As in the case of $\BBR^2$ and $\BBR^3$ we have
\begin{thm}[Cauchy-Bunyakovsky-Schwarz Inequality] \index{inequality!Cauchy-Bunyakovsky-Schwarz!in Rn}Given $(\v{x}, \v{y})\in
(\BBR^n)^2$ the following inequality holds
$$ |\dotprod{x}{y}| \leq \norm{\v{x}}\norm{\v{y}}. $$

\end{thm}
\begin{pf}
Put $\dis{a = \sum _{k = 1} ^n x_k ^2}$, $\dis{b = \sum _{k = 1}
^n x_ky_k }$, and $\dis{c = \sum _{k = 1} ^n y_k ^2}$. Consider
$$f(t) = \sum _{k = 1} ^n (tx_k - y_k)^2 = t^2\sum _{k = 1} ^n x_k
^2 - 2t \sum _{k = 1} ^n x_ky_k +  \sum _{k = 1} ^n y_k ^2 = at^2
+ bt + c.
$$ This is a quadratic polynomial which is non-negative for all
real $t$, so it must have complex roots. Its discriminant $b^2 -
4ac$ must be non-positive, from where we gather
$$4\left(\sum _{k = 1} ^n x_ky_k\right)^2 \leq 4\left(\sum _{k = 1} ^n x_k
^2 \right)\left(\sum _{k = 1} ^n y_k ^2 \right).   $$ This gives
$$ |\dotprod{x}{y}|^2 \leq \norm{\v{x}}^2\norm{\v{y}}^2
$$from where we deduce the result.
\end{pf}
\begin{exa}
Assume that $a_k, b_k, c_k, k = 1, \ldots, n$, are positive real
numbers. Shew that
$$\left(\sum _{k = 1} ^n a_kb_kc_k\right)^{4}
\leq \left(\sum _{k = 1} ^n a_k ^4\right)\left(\sum _{k = 1} ^n
b_k ^4\right) \left(\sum _{k = 1} ^n c_k ^2\right)^{2}.$$\end{exa}
\begin{solu}Using CBS on $\sum _{k = 1} ^n (a_kb_k)c_k$ once we
obtain
$$\sum _{k = 1} ^n a_kb_kc_k
\leq \left(\sum _{k = 1} ^n a_k ^2b_k ^2\right)^{1/2} \left(\sum
_{k = 1} ^n c_k ^2\right)^{1/2}.
$$Using CBS again on $\left(\sum _{k = 1} ^n a_k ^2b_k ^2\right)^{1/2}$ we obtain
$$
\begin{array}{lll}
\sum _{k = 1} ^n a_kb_kc_k  & \leq &
 \left(\sum _{k = 1} ^n a_k ^2 b_k ^2\right)^{1/2}
\left(\sum _{k = 1} ^n c_k ^2\right)^{1/2} \\
  & \leq & \left(\sum _{k = 1} ^n a_k ^4\right)^{1/4}
\left(\sum _{k = 1} ^n b_k ^4\right)^{1/4}
\left(\sum _{k = 1} ^n c_k ^2\right)^{1/2}, \\
\end{array}
$$which gives the required inequality.
\end{solu}

\begin{thm}[Triangle Inequality] \index{inequality!triangle!in Rn}Given $(\v{x}, \v{y})\in
(\BBR^n)^2$ the following inequality holds
$$ \norm{\v{x} + \v{y}}\leq \norm{\v{x}} + \norm{\v{y}}. $$

\end{thm}
\begin{pf}We have
$$\begin{array}{lll}
||\v{a} + \v{b}||^2 & = & (\v{a} + \v{b})\bulletproduct (\v{a} + \v{b}) \\
& = & \v{a}\bulletproduct\v{a} + 2\v{a}\bulletproduct\v{b} +
\v{b}\bulletproduct\v{b} \\
& \leq & ||\v{a}||^2  + 2||\v{a}||||\v{b}|| +
||\v{b}||^2 \\
& = & (||\v{a}|| + ||\v{b}||)^2,
\end{array}$$from where the desired result follows.

\end{pf}
We now consider a generalisation of the Euclidean norm. Given $p
> 1$ and $\v{x}\in \BBR^n$ we put
\begin{equation}  \norm{\v{x}}_p  = \left(\sum _{k = 1} ^n |x_k| ^p\right)^{1/p} \label{eq:p_norm}\end{equation}
Clearly \begin{equation} \norm{\v{x}}_p \geq 0 \end{equation}
\begin{equation} \norm{\v{x}}_p = 0 \Leftrightarrow \v{x} = \v{0}  \end{equation}
\begin{equation}  \norm{\alpha\v{x}}_p  = |\alpha|\norm{\v{x}}_p, \ \  \alpha \in
\BBR\end{equation}We now prove analogues of the
Cauchy-Bunyakovsky-Schwarz and the Triangle Inequality for
$\norm{\cdot}_p$. For this we need the following lemma.
\begin{lem}[Young's Inequality] Let $p > 1$ and put $\dfrac{1}{p} + \dfrac{1}{q} = 1$. Then for $(a, b)\in ([0;+\infty[)^2$ we have
$$ ab \leq \frac{a^p}{p} + \frac{b^q}{q}. $$      \end{lem}
\begin{pf} Let $0 < k < 1$, and consider the function
 $$\fun{f}{x}{x^k - k(x - 1)}{[0;+\infty[}{\BBR}. $$ Then $0 =
 f'(x) = kx^{k - 1} - k \Leftrightarrow x = 1$. Since $f''(x) = k(k - 1)x^{k - 2} <
 0$ for $0 < k < 1, x \geq 0$, $x = 1$ is a maximum point. Hence
 $f(x) \leq f(1)$ for $x \geq 0$, that is $x^k \leq 1 + k(x - 1)$.
 Letting $k = \dfrac{1}{p}$ and $x = \dfrac{a^p}{b^q}$ we deduce
 $$ \frac{a}{b^{q/p}} \leq 1  + \frac{1}{p}\left(\frac{a^p}{b^q} - 1\right).  $$
Rearranging gives
$$ ab \leq b^{1 + p/q} + \frac{a^pb^{1 + p/q - p}}{p} - \frac{b^{1 + p/q}}{p}  $$
from where we obtain the inequality. \end{pf} The promised
generalisation of the Cauchy-Bunyakovsky-Schwarz Inequality is
given in the following theorem.

\begin{thm}[H\"{o}lder Inequality] \index{inequality!H\"{o}lder} Given $(\v{x}, \v{y})\in
(\BBR^n)^2$ the following inequality holds
$$ |\dotprod{x}{y}| \leq \norm{\v{x}}_p\norm{\v{y}}_q. $$

\end{thm}
\begin{pf}
If $\norm{\v{x}}_p = 0$ or $\norm{\v{y}}_q = 0$ there is nothing to
prove, so assume otherwise. From the Young Inequality we have
$$ \frac{|x_k|}{\norm{\v{x}}_p}\frac{|y_k|}{{\norm{\v{y}}_q}}  \leq   \frac{|x_k| ^p}{{\norm{\v{x}}_p}^pp}
+ \frac{|y_k| ^q}{{\norm{\v{y}}_q}^qq}.
$$Adding, we deduce

$$ \begin{array}{lll}\sum _{k = 1} ^n  \dfrac{|x_k|}{\norm{\v{x}}_p}\dfrac{|y_k|}{{\norm{\v{y}}_q}} &
\leq&  \dfrac{1}{{\norm{\v{x}}_p}^pp}\sum _{k = 1} ^n |x_k| ^p
+ \dfrac{1}{{\norm{\v{y}}_q}^qq}\sum _{k = 1} ^n |y_k| ^q \\
& =  & \dfrac{{\norm{\v{x}}_p}^p}{{\norm{\v{x}}_p}^pp} +
\dfrac{{\norm{\v{y}}_q}^q}{{\norm{\v{y}}_q}^qq}\\ &  = &
\dfrac{1}{p} + \dfrac{1}{q}\\  & =  & 1. \end{array}
$$
This gives $$ \sum _{k = 1} ^n |x_ky_k| \leq
\norm{\v{x}}_p\norm{\v{y}}_q.
$$The result follows by observing that
$$ \left| \sum _{k = 1} ^n x_ky_k \right|\leq\sum _{k = 1} ^n
|x_ky_k| \leq \norm{\v{x}}_p\norm{\v{y}}_q.
$$

\end{pf}




 As a generalisation of the Triangle
Inequality we have
\begin{thm}[Minkowski Inequality] Let $p \in ]1; +\infty[$. Given $(\v{x}, \v{y})\in
(\BBR^n)^2$ the following inequality holds
$$ \norm{\v{x} + \v{y}}_p \leq \norm{\v{x}}_p + \norm{\v{y}}_p. $$
\label{thm:minkowski_inequality}\index{inequality!Minkowski}\end{thm}
\begin{pf}From the triangle inequality for real numbers \ref{ineq:triangle_real_numbers} $$|x_k + y_k|^p
= |x_k + y_k||x_k + y_k|^{p - 1} \leq \left(|x_k| + |y_k|\right)|x_k
+ y_k|^{p - 1}.$$ Adding \begin{equation}  \sum _{k = 1} ^n |x_k +
y_k|^p \leq \sum _{k = 1} ^n |x_k||x_k + y_k|^{p - 1} + \sum _{k =
1} ^n |y_k||x_k + y_k|^{p - 1}.\label{eq:minkowski_1}\end{equation}
By the H\"{o}lder Inequality \begin{equation}\begin{array}{lll} \sum
_{k = 1} ^n |x_k||x_k + y_k|^{p - 1}  & \leq & \left(\sum _{k = 1}
^n |x_k|^p\right)^{1/p}\left(\sum _{k = 1} ^n|x_k + y_k|^{(p -
1)q}\right)^{1/q} \\ & = &  \left(\sum _{k = 1} ^n
|x_k|^p\right)^{1/p}\left(\sum _{k = 1} ^n|x_k +
y_k|^{p}\right)^{1/q}\\ & = & \norm{\v{x}}_p\norm{\v{x} + \v{y}}_p
^{p/q}
\end{array}\label{eq:minkowski_2}\end{equation}
In the same manner we deduce \begin{equation}\sum _{k = 1} ^n
|y_k||x_k + y_k|^{p - 1} \leq \norm{\v{y}}_p\norm{\v{x} + \v{y}}_p
^{p/q}\label{eq:minkowski_3}.\end{equation} Hence
(\ref{eq:minkowski_1}) gives
$$ \norm{\v{x}
+ \v{y}}_p ^p = \sum _{k = 1} ^n |x_k + y_k|^p \leq
\norm{\v{x}}_p\norm{\v{x} + \v{y}}_p ^{p/q} +
\norm{\v{y}}_p\norm{\v{x} + \v{y}}_p ^{p/q}, $$from where we deduce
the result.
\end{pf}

\section*{\psframebox{Homework}}
\begin{multicols}{2}\columnseprule 1pt \columnsep 25pt\multicoltolerance=900

\begin{pro}Prove Lagrange's identity:
     $$ \begin{array}{lll}\left(\sum_{1\leq j\leq n} a_jb_j \right)^2
      & = &
      \left(\sum_{1\leq j\leq n}a_j^2\right)
      \left(\sum_{1\leq j\leq n}b_j^2\right)
   \\ & &\qquad   -
      \sum_{1\leq k < j\leq n}(a_kb_j-a_jb_k)^2
     \end{array}$$
    and then deduce the CBS Inequality in $\BBR^n$.
\end{pro}


\begin{pro}Let $\v{a}_i\in\BBR^n$ for $1 \leq i \leq n$ be unit vectors with $\sum _{i=1} ^n \v{a_i} = \v{0}$. Prove
that $\sum_{1\leq i<j\leq n} \dotprod{a_i}{a_j} = -\dfrac{n}{2}$.
\begin{answer} Expand $\norm{\sum _{i=1} ^n \v{a_i}}^2 = 0$. \end{answer}
\end{pro}
\begin{pro}
Let $a_k > 0$. Use the CBS Inequality to shew that
$$ \left(\sum _{k = 1} ^n a_k ^2\right)\left(\sum _{k = 1} ^n \frac{1}{a_k ^2}\right) \geq n^2.  $$
\begin{answer} Observe that $\sum _{k = 1} ^n 1 = n$. Then we have $$ n^2 =
\left(\sum _{k = 1} ^n 1\right)^2 =\left(\sum _{k = 1} ^n
(a_k)\left(\frac{1}{a_k}\right)\right)^2  \leq \left(\sum _{k = 1}
^n a_k ^2\right)\left(\sum _{k = 1} ^n \frac{1}{a_k ^2}\right),$$
giving the result.
\end{answer}
\end{pro}
\begin{pro}
Let $\v{a}\in \BBR^n$ be a fixed vector. Demonstrate that $$ X =
\{\v{x}\in\BBR^n: \dotprod{a}{x} = 0\}$$is a subspace of $\BBR^n$.
\begin{answer}
Take $(\v{u}, \v{v})\in X^2$ and $\alpha\in \BBR$. Then
$$\v{a}\bulletproduct (\v{u}+\alpha \v{v}) = \dotprod{a}{u} + \alpha \dotprod{a}{v} = 0 + 0 = 0,
$$proving that $X$ is a vector subspace of $\BBR^n$.
\end{answer}
\end{pro}

\begin{pro}
Let $\v{a_i}\in\BBR^n$, $1 \leq i \leq k$ ($k \leq n$) be $k$
non-zero vectors such that $\dotprod{a_i}{a_j} = 0$ for $i\neq j$.
Prove that these $k$ vectors are linearly independent.
\begin{answer}
Assume that $$ \lambda_1\v{a_1} + \cdots + \lambda_k\v{a_k} =
\v{0}.
$$Taking the dot product with $\v{a_j}$ and using the fact
that $\dotprod{a_i}{a_j} = 0$ for $i\neq j$  we obtain
$$  0 = \dotprod{0}{a_j} = \lambda_j\v{a_j}\bulletproduct \v{a_j} = \lambda_j \norm{a_j}^2.  $$
Since $\v{a_j} \neq \v{0} \implies \norm{a_j}^2 \neq 0$, we must
have $\lambda_j = 0$. Thus the only linear combination giving the
zero vector is the trivial linear combination, which proves that
the vectors are linearly independent.
\end{answer}
\end{pro}
\begin{pro}
Let $a_k \geq 0, 1 \leq k \leq n$ be arbitrary. Prove that
  $$ \left(\sum _{k = 1} ^n a_k\right)^2\leq \frac{n(n + 1)(2n + 1)}{6}\sum _{k = 1} ^n \frac{{a_k ^2}}{k^2}.$$
\begin{answer}  This follows at once from the CBS
  Inequality by putting $$\v{v} = \colvec{\frac{a_1}{1}\\
  \frac{a_2}{2}\\
  \ldots \\ \frac{a_n}{n}}, \ \ \v{u} = \colvec{1\\  2\\ \ldots\\
  n}
  $$and noticing that
  $$\sum _{k = 1} ^n k^2 = \frac{n(n + 1)(2n + 1)}{6}.$$
\end{answer}
\end{pro}
\end{multicols}






\Closesolutionfile{linearans}

\appendix

\renewcommand{\chaptername}{Appendix}
\chapter{Answers and Hints}\addcontentsline{toc}{section}{Answers and Hints}
\markright{Answers and Hints} {\small\input{linearansC1}}




\begin{thebibliography}{9}
\bibitem[Cul]{Cul} CULLEN, C., {\bf Matrices and Linear Transformations},  2nd ed., New York: Dover Publications, 1990.
\bibitem[Del]{Del} DELODE, C., {\bf G\'{e}om\'{e}trie Affine et Euclidienne}, Paris: Dunod, 2000.


\bibitem[Fad]{Fad} FADD\'{E}EV, D.,  SOMINSKY, I., {\bf Recueil d'Exercises d'Alg\`{e}bre Sup\'{e}rieure},
7th ed., Paris: Ellipses, 1994.
\bibitem[Hau]{Hau} HAUSNER, M., {\bf A Vector Space Approach to Geometry},  New York: Dover, 1998.

\bibitem[Lan]{Lan} LANG, S., {\bf Introduction to Linear Algebra}, 2nd
ed., New York: Springer-Verlag, 1986.
\bibitem[Pro]{Pro} PROSKURYAKOV, I. V., {\bf Problems in Linear Algebra}, Moscow: Mir Publishers, 1978.
\bibitem[Riv]{Riv} RIVAUD, J., {\bf Ejercicios de \'{a}lgebra}, Madrid: Aguilar, 1968.
\bibitem[Tru]{Tru} TRUFFAULT, B., {\bf G\'{e}om\'{e}trie \'{E}l\'{e}mentaire: Cours et exercises},  Paris: Ellipses, 2001.

\end{thebibliography}

\lhead{}\chead{}\rhead{}

{\tiny\chapter*{\rlap{GNU Free Documentation License}}
\phantomsection  % so hyperref creates bookmarks
\addcontentsline{toc}{chapter}{GNU Free Documentation License}
%\label{label_fdl}

 \begin{center}

       Version 1.2, November 2002


 Copyright \copyright{} 2000,2001,2002  Free Software Foundation, Inc.

 \bigskip

     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA

 \bigskip

 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.
\end{center}


\begin{center}
{\bf\large Preamble}
\end{center}

The purpose of this License is to make a manual, textbook, or other
functional and useful document ``free'' in the sense of freedom: to
assure everyone the effective freedom to copy and redistribute it,
with or without modifying it, either commercially or
noncommercially. Secondarily, this License preserves for the author
and publisher a way to get credit for their work, while not being
considered responsible for modifications made by others.

This License is a kind of ``copyleft'', which means that derivative
works of the document must themselves be free in the same sense.  It
complements the GNU General Public License, which is a copyleft
license designed for free software.

We have designed this License in order to use it for manuals for
free software, because free software needs free documentation: a
free program should come with manuals providing the same freedoms
that the software does.  But this License is not limited to software
manuals; it can be used for any textual work, regardless of subject
matter or whether it is published as a printed book.  We recommend
this License principally for works whose purpose is instruction or
reference.


\begin{center}
{\Large\bf 1. APPLICABILITY AND DEFINITIONS\par} \phantomsection
\addcontentsline{toc}{section}{1. APPLICABILITY AND DEFINITIONS}
\end{center}

This License applies to any manual or other work, in any medium,
that contains a notice placed by the copyright holder saying it can
be distributed under the terms of this License.  Such a notice
grants a world-wide, royalty-free license, unlimited in duration, to
use that work under the conditions stated herein.  The
``\textbf{Document}'', below, refers to any such manual or work. Any
member of the public is a licensee, and is addressed as
``\textbf{you}''.  You accept the license if you copy, modify or
distribute the work in a way requiring permission under copyright
law.

A ``\textbf{Modified Version}'' of the Document means any work
containing the Document or a portion of it, either copied verbatim,
or with modifications and/or translated into another language.

A ``\textbf{Secondary Section}'' is a named appendix or a
front-matter section of the Document that deals exclusively with the
relationship of the publishers or authors of the Document to the
Document's overall subject (or to related matters) and contains
nothing that could fall directly within that overall subject. (Thus,
if the Document is in part a textbook of mathematics, a Secondary
Section may not explain any mathematics.)  The relationship could be
a matter of historical connection with the subject or with related
matters, or of legal, commercial, philosophical, ethical or
political position regarding them.

The ``\textbf{Invariant Sections}'' are certain Secondary Sections
whose titles are designated, as being those of Invariant Sections,
in the notice that says that the Document is released under this
License.  If a section does not fit the above definition of
Secondary then it is not allowed to be designated as Invariant.  The
Document may contain zero Invariant Sections.  If the Document does
not identify any Invariant Sections then there are none.

The ``\textbf{Cover Texts}'' are certain short passages of text that
are listed, as Front-Cover Texts or Back-Cover Texts, in the notice
that says that the Document is released under this License.  A
Front-Cover Text may be at most 5 words, and a Back-Cover Text may
be at most 25 words.

A ``\textbf{Transparent}'' copy of the Document means a
machine-readable copy, represented in a format whose specification
is available to the general public, that is suitable for revising
the document straightforwardly with generic text editors or (for
images composed of pixels) generic paint programs or (for drawings)
some widely available drawing editor, and that is suitable for input
to text formatters or for automatic translation to a variety of
formats suitable for input to text formatters.  A copy made in an
otherwise Transparent file format whose markup, or absence of
markup, has been arranged to thwart or discourage subsequent
modification by readers is not Transparent. An image format is not
Transparent if used for any substantial amount of text.  A copy that
is not ``Transparent'' is called ``\textbf{Opaque}''.

Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format, SGML
or XML using a publicly available DTD, and standard-conforming
simple HTML, PostScript or PDF designed for human modification.
Examples of transparent image formats include PNG, XCF and JPG.
Opaque formats include proprietary formats that can be read and
edited only by proprietary word processors, SGML or XML for which
the DTD and/or processing tools are not generally available, and the
machine-generated HTML, PostScript or PDF produced by some word
processors for output purposes only.

The ``\textbf{Title Page}'' means, for a printed book, the title
page itself, plus such following pages as are needed to hold,
legibly, the material this License requires to appear in the title
page.  For works in formats which do not have any title page as
such, ``Title Page'' means the text near the most prominent
appearance of the work's title, preceding the beginning of the body
of the text.

A section ``\textbf{Entitled XYZ}'' means a named subunit of the
Document whose title either is precisely XYZ or contains XYZ in
parentheses following text that translates XYZ in another language.
(Here XYZ stands for a specific section name mentioned below, such
as ``\textbf{Acknowledgements}'', ``\textbf{Dedications}'',
``\textbf{Endorsements}'', or ``\textbf{History}''.) To
``\textbf{Preserve the Title}'' of such a section when you modify
the Document means that it remains a section ``Entitled XYZ''
according to this definition.

The Document may include Warranty Disclaimers next to the notice
which states that this License applies to the Document.  These
Warranty Disclaimers are considered to be included by reference in
this License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and has
no effect on the meaning of this License.


\begin{center}
{\Large\bf 2. VERBATIM COPYING\par} \phantomsection
\addcontentsline{toc}{section}{2. VERBATIM COPYING}
\end{center}

You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
copyright notices, and the license notice saying this License
applies to the Document are reproduced in all copies, and that you
add no other conditions whatsoever to those of this License.  You
may not use technical measures to obstruct or control the reading or
further copying of the copies you make or distribute.  However, you
may accept compensation in exchange for copies.  If you distribute a
large enough number of copies you must also follow the conditions in
section~3.

You may also lend copies, under the same conditions stated above,
and you may publicly display copies.


\begin{center}
{\Large\bf 3. COPYING IN QUANTITY\par} \phantomsection
\addcontentsline{toc}{section}{3. COPYING IN QUANTITY}
\end{center}


If you publish printed copies (or copies in media that commonly have
printed covers) of the Document, numbering more than 100, and the
Document's license notice requires Cover Texts, you must enclose the
copies in covers that carry, clearly and legibly, all these Cover
Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
the back cover.  Both covers must also clearly and legibly identify
you as the publisher of these copies.  The front cover must present
the full title with all words of the title equally prominent and
visible.  You may add other material on the covers in addition.
Copying with changes limited to the covers, as long as they preserve
the title of the Document and satisfy these conditions, can be
treated as verbatim copying in other respects.

If the required texts for either cover are too voluminous to fit
legibly, you should put the first ones listed (as many as fit
reasonably) on the actual cover, and continue the rest onto adjacent
pages.

If you publish or distribute Opaque copies of the Document numbering
more than 100, you must either include a machine-readable
Transparent copy along with each Opaque copy, or state in or with
each Opaque copy a computer-network location from which the general
network-using public has access to download using public-standard
network protocols a complete Transparent copy of the Document, free
of added material. If you use the latter option, you must take
reasonably prudent steps, when you begin distribution of Opaque
copies in quantity, to ensure that this Transparent copy will remain
thus accessible at the stated location until at least one year after
the last time you distribute an Opaque copy (directly or through
your agents or retailers) of that edition to the public.

It is requested, but not required, that you contact the authors of
the Document well before redistributing any large number of copies,
to give them a chance to provide you with an updated version of the
Document.


\begin{center}
{\Large\bf 4. MODIFICATIONS\par} \phantomsection
\addcontentsline{toc}{section}{4. MODIFICATIONS}
\end{center}

You may copy and distribute a Modified Version of the Document under
the conditions of sections 2 and 3 above, provided that you release
the Modified Version under precisely this License, with the Modified
Version filling the role of the Document, thus licensing
distribution and modification of the Modified Version to whoever
possesses a copy of it.  In addition, you must do these things in
the Modified Version:

\begin{itemize}
\item[A.]
   Use in the Title Page (and on the covers, if any) a title distinct
   from that of the Document, and from those of previous versions
   (which should, if there were any, be listed in the History section
   of the Document).  You may use the same title as a previous version
   if the original publisher of that version gives permission.

\item[B.]
   List on the Title Page, as authors, one or more persons or entities
   responsible for authorship of the modifications in the Modified
   Version, together with at least five of the principal authors of the
   Document (all of its principal authors, if it has fewer than five),
   unless they release you from this requirement.

\item[C.]
   State on the Title page the name of the publisher of the
   Modified Version, as the publisher.

\item[D.]
   Preserve all the copyright notices of the Document.

\item[E.]
   Add an appropriate copyright notice for your modifications
   adjacent to the other copyright notices.

\item[F.]
   Include, immediately after the copyright notices, a license notice
   giving the public permission to use the Modified Version under the
   terms of this License, in the form shown in the Addendum below.

\item[G.]
   Preserve in that license notice the full lists of Invariant Sections
   and required Cover Texts given in the Document's license notice.

\item[H.]
   Include an unaltered copy of this License.

\item[I.]
   Preserve the section Entitled ``History'', Preserve its Title, and add
   to it an item stating at least the title, year, new authors, and
   publisher of the Modified Version as given on the Title Page.  If
   there is no section Entitled ``History'' in the Document, create one
   stating the title, year, authors, and publisher of the Document as
   given on its Title Page, then add an item describing the Modified
   Version as stated in the previous sentence.

\item[J.]
   Preserve the network location, if any, given in the Document for
   public access to a Transparent copy of the Document, and likewise
   the network locations given in the Document for previous versions
   it was based on.  These may be placed in the ``History'' section.
   You may omit a network location for a work that was published at
   least four years before the Document itself, or if the original
   publisher of the version it refers to gives permission.

\item[K.]
   For any section Entitled ``Acknowledgements'' or ``Dedications'',
   Preserve the Title of the section, and preserve in the section all
   the substance and tone of each of the contributor acknowledgements
   and/or dedications given therein.

\item[L.]
   Preserve all the Invariant Sections of the Document,
   unaltered in their text and in their titles.  Section numbers
   or the equivalent are not considered part of the section titles.

\item[M.]
   Delete any section Entitled ``Endorsements''.  Such a section
   may not be included in the Modified Version.

\item[N.]
   Do not retitle any existing section to be Entitled ``Endorsements''
   or to conflict in title with any Invariant Section.

\item[O.]
   Preserve any Warranty Disclaimers.
\end{itemize}

If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no
material copied from the Document, you may at your option designate
some or all of these sections as invariant.  To do this, add their
titles to the list of Invariant Sections in the Modified Version's
license notice. These titles must be distinct from any other section
titles.

You may add a section Entitled ``Endorsements'', provided it
contains nothing but endorsements of your Modified Version by
various parties--for example, statements of peer review or that the
text has been approved by an organization as the authoritative
definition of a standard.

You may add a passage of up to five words as a Front-Cover Text, and
a passage of up to 25 words as a Back-Cover Text, to the end of the
list of Cover Texts in the Modified Version.  Only one passage of
Front-Cover Text and one of Back-Cover Text may be added by (or
through arrangements made by) any one entity.  If the Document
already includes a cover text for the same cover, previously added
by you or by arrangement made by the same entity you are acting on
behalf of, you may not add another; but you may replace the old one,
on explicit permission from the previous publisher that added the
old one.

The author(s) and publisher(s) of the Document do not by this
License give permission to use their names for publicity for or to
assert or imply endorsement of any Modified Version.


\begin{center}
{\Large\bf 5. COMBINING DOCUMENTS\par} \phantomsection
\addcontentsline{toc}{section}{5. COMBINING DOCUMENTS}
\end{center}


You may combine the Document with other documents released under
this License, under the terms defined in section~4 above for
modified versions, provided that you include in the combination all
of the Invariant Sections of all of the original documents,
unmodified, and list them all as Invariant Sections of your combined
work in its license notice, and that you preserve all their Warranty
Disclaimers.

The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
copy.  If there are multiple Invariant Sections with the same name
but different contents, make the title of each such section unique
by adding at the end of it, in parentheses, the name of the original
author or publisher of that section if known, or else a unique
number. Make the same adjustment to the section titles in the list
of Invariant Sections in the license notice of the combined work.

In the combination, you must combine any sections Entitled
``History'' in the various original documents, forming one section
Entitled ``History''; likewise combine any sections Entitled
``Acknowledgements'', and any sections Entitled ``Dedications''. You
must delete all sections Entitled ``Endorsements''.

\begin{center}
{\Large\bf 6. COLLECTIONS OF DOCUMENTS\par} \phantomsection
\addcontentsline{toc}{section}{6. COLLECTIONS OF DOCUMENTS}
\end{center}

You may make a collection consisting of the Document and other
documents released under this License, and replace the individual
copies of this License in the various documents with a single copy
that is included in the collection, provided that you follow the
rules of this License for verbatim copying of each of the documents
in all other respects.

You may extract a single document from such a collection, and
distribute it individually under this License, provided you insert a
copy of this License into the extracted document, and follow this
License in all other respects regarding verbatim copying of that
document.


\begin{center}
{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS\par}
\phantomsection \addcontentsline{toc}{section}{7. AGGREGATION WITH
INDEPENDENT WORKS}
\end{center}


A compilation of the Document or its derivatives with other separate
and independent documents or works, in or on a volume of a storage
or distribution medium, is called an ``aggregate'' if the copyright
resulting from the compilation is not used to limit the legal rights
of the compilation's users beyond what the individual works permit.
When the Document is included in an aggregate, this License does not
apply to the other works in the aggregate which are not themselves
derivative works of the Document.

If the Cover Text requirement of section~3 is applicable to these
copies of the Document, then if the Document is less than one half
of the entire aggregate, the Document's Cover Texts may be placed on
covers that bracket the Document within the aggregate, or the
electronic equivalent of covers if the Document is in electronic
form. Otherwise they must appear on printed covers that bracket the
whole aggregate.


\begin{center}
{\Large\bf 8. TRANSLATION\par} \phantomsection
\addcontentsline{toc}{section}{8. TRANSLATION}
\end{center}


Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of
section~4. Replacing Invariant Sections with translations requires
special permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections.  You may include a
translation of this License, and all the license notices in the
Document, and any Warranty Disclaimers, provided that you also
include the original English version of this License and the
original versions of those notices and disclaimers.  In case of a
disagreement between the translation and the original version of
this License or a notice or disclaimer, the original version will
prevail.

If a section in the Document is Entitled ``Acknowledgements'',
``Dedications'', or ``History'', the requirement (section~4) to
Preserve its Title (section~1) will typically require changing the
actual title.


\begin{center}
{\Large\bf 9. TERMINATION\par} \phantomsection
\addcontentsline{toc}{section}{9. TERMINATION}
\end{center}


You may not copy, modify, sublicense, or distribute the Document
except as expressly provided for under this License.  Any other
attempt to copy, modify, sublicense or distribute the Document is
void, and will automatically terminate your rights under this
License.  However, parties who have received copies, or rights, from
you under this License will not have their licenses terminated so
long as such parties remain in full compliance.


\begin{center}
{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE\par} \phantomsection
\addcontentsline{toc}{section}{10. FUTURE REVISIONS OF THIS LICENSE}
\end{center}


The Free Software Foundation may publish new, revised versions of
the GNU Free Documentation License from time to time.  Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.  See
http://www.gnu.org/copyleft/.

Each version of the License is given a distinguishing version
number. If the Document specifies that a particular numbered version
of this License ``or any later version'' applies to it, you have the
option of following the terms and conditions either of that
specified version or of any later version that has been published
(not as a draft) by the Free Software Foundation.  If the Document
does not specify a version number of this License, you may choose
any version ever published (not as a draft) by the Free Software
Foundation. }




 \QUOTEME{Que a quien robe este libro, o lo tome prestado y no lo
devuelva, se le convierta en una serpiente en las manos y lo venza.
Que sea golpeado por la par\'{a}lisis y todos sus miembros
arruinados. Que languidezca de dolor gritando por piedad, y que no
haya coto a su agon\'{\i}a hasta la \'{u}ltima disoluci\'{o}n. Que
las polillas roan sus entra\~{n}as y, cuando llegue al final de su
castigo, que arda en las llamas del Infierno para siempre.}
{Maldici\'{o}n an\'{o}nima contra los ladrones de libros en el
monasterio de San Pedro, Barcelona.}


\renewcommand{\chaptername}{Index}


\printindex

\end{document}
