\documentclass[fullpage,10pt, openany]{book}
\usepackage{amsmath,etex}
\usepackage[usenames]{color}

%%%%%                   Postscript drawing packages
\usepackage{psboxit}
\usepackage{pstricks}
\usepackage{pstricks-add}
\usepackage{fancybox}
%%%%Note: Eventually I might convert all the pstricks drawings into metafont. Who knows.

\usepackage{hangcaption}
\usepackage[amsmath,thmmarks,standard,thref]{ntheorem}
\usepackage[colorlinks=true,linkcolor=blue, pdftitle={David A. Santos Calculus Once Again Analysis Notes}, pdfauthor={david santos}, bookmarksopen, dvips]{hyperref}
\usepackage{pdflscape}
\usepackage{thumbpdf}
%\usepackage[dvips]{graphicx}

%%%%%                  FONTS
\usepackage[latin1]{inputenc}
% Fourier for math | Utopia (scaled) for rm | Helvetica for ss | Latin Modern for tt
\usepackage{fourier} % math & rm
\usepackage[scaled=0.875]{helvet} % ss
\renewcommand{\ttdefault}{lmtt} %tt
%\usepackage{marvosym}
\usepackage{pifont}
\usepackage{mathrsfs}
\usepackage{stmaryrd}
\usepackage{amssymb}
\usepackage[tiling]{pst-fill}      % PSTricks package for filling/tiling
\usepackage{pst-text}              % PSTricks package for character path
\usepackage{pst-grad}              % PSTricks package for gradient filling
\usepackage{multicol}
\everymath{\displaystyle} \mathversion{bold}

\newcommand{\WriteBig}[5] {%
  \DeclareFixedFont{\bigsf}{T1}{phv}{b}{n}{#2}%
  \DeclareFixedFont{\smallrm}{T1}{ptm}{(m)}{n}{#3}%
  \psboxfill{\smallrm  #1} %
      \centerline{%
         \pscharpath[fillstyle=gradient,%
        gradangle=-45,%
        gradmidpoint=0.5,%
        addfillstyle=boxfill,%
        gradbegin=#4,%
        gradend=#5,%
        fillangle=45,%
        fillsep=0.7mm]{\rput[b](0,0){\bigsf#1}}}
}


%%%%%FLOAT PLACEMENT
\renewcommand{\textfraction}{0.05}
\renewcommand{\topfraction}{0.95}
\renewcommand{\bottomfraction}{0.95}
\renewcommand{\floatpagefraction}{0.35}
\setcounter{totalnumber}{5}

%%%%%%%%%PRINTED AREA
\topmargin -.6in \textheight 9.4in \oddsidemargin -.3in
\evensidemargin -.3in \textwidth 7in


%%%%%                    THEOREM-LIKE ENVIRONMENTS
\newcommand{\proofsymbol}{\Pisymbol{pzd}{113}}
\newtheorem{pro}{Problem}[section]

\theorempreskipamount .5cm \theorempostskipamount .5cm
\theoremstyle{change} \theoremheaderfont{\sffamily\bfseries}
\theorembodyfont{\normalfont}
\newtheorem{thm}{\textbf{\textsc{\red Theorem}}}
\newtheorem{prop}[thm]{\textbf{\textsc{\magenta Definition-Proposition}}}
 \newtheorem{cor}[thm]{\textbf{\textsc{\blue Corollary}}}
\newtheorem{df}[thm]{Definition}
\newtheorem{axi}[thm]{Axiom}
\newtheorem{exa}[thm]{Example}
\newtheorem{lem}[thm]{\textbf{\textsc{\green Lemma}}}
\newenvironment{pf}[0]{\itshape\begin{quote}{\bf Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{f-pf}[0]{\itshape\begin{quote}{\bf First Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{s-pf}[0]{\itshape\begin{quote}{\bf Second Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{t-pf}[0]{\itshape\begin{quote}{\bf Third Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{fo-pf}[0]{\itshape\begin{quote}{\bf Fourth Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{fi-pf}[0]{\itshape\begin{quote}{\bf Fifth Proof: \ }}{\proofsymbol\end{quote}}
\newenvironment{si-pf}[0]{\itshape\begin{quote}{\bf Sixth Proof: \ }}{\proofsymbol\end{quote}}

\newenvironment{solu}[0]{\begin{quote}{\bf Solution: \ } \itshape }{\end{quote}}
\newenvironment{rem}[0]{\begin{quote}{\huge\textcolor{red}{\Pisymbol{pzd}{43}}}\small\itshape }{\end{quote}}
\newcommand{\QUOTEME}[2]{\begin{quote}{\it\textbf{#1}}\nolinebreak[1] \blue\hspace*{\fill} \mbox{-\textsl{#2}} \hspace*{\fill}\end{quote}}

\usepackage{answers}
\Newassociation{answer}{Answer}{calculillo}




%%Note: I had to comment-out marvosym.sty on the line \def\Rightarrow{{\mvchr58}}
%%because it wasn't giving the standard \implies command. Anyone knowing a better way
%%of dealing with this problem, please email me.

%%%%%                Non-standard commands and symbols
\newcommand{\BBZ}{\mathbb{Z}}
\newcommand{\BBR}{\mathbb{R}}
\newcommand{\BBN}{\mathbb{N}}
\newcommand{\BBC}{\mathbb{C}}
\newcommand{\BBQ}{\mathbb{Q}}
\def\binom#1#2{{#1\choose#2}}
\def\arc#1{{\wideparen{#1}}}
\newcommand{\dis}{\displaystyle}
\newcommand{\ngrows}{n\rightarrow +\infty}
\newcommand{\unif}{\stackrel{\mathrm{\tiny unif}}{\longrightarrow} }
\def\fun#1#2#3#4#5{\everymath{\displaystyle}{{#1} : \vspace{1cm}
\begin{array}{ccc}{#4} & \rightarrow &
{#5}\\
{#2} &  \mapsto & {#3} \\
\end{array}}}
\def\sgn#1{{\mathrm{signum}}\left(#1\right)}
\def\d#1{\mathbf{d}#1}
\def\acc#1{\mathbf{Acc}\left(#1\right)}
\def\card#1{\mathrm{card}\left(#1\right)}
\def\floor#1{\llfloor #1 \rrfloor}
\def\ceil#1{\llceil #1 \rrceil}
\def\norm#1{\Big|\Big| #1 \Big|\Big|}
\def\absval#1{\left| #1 \right|}
\def\N#1{\mathscr{N}_{#1}}
\def\closure#1{\overline{#1}}
\def\bdy#1{\operatorname{Bdy}\left(#1\right)}
\def\interior#1{\widering{#1}}
\def\interiorone#1{\mathring{#1}}
\def\signum#1{\operatorname{signum}\left( #1 \right)}
\def\card#1{\operatorname{card}\left( #1 \right)}
\def\fracpart#1{\left\{#1\right\}}
\def\gl#1#2{{\bf GL}_{#1}(#2)}
\def\magma#1#2{\langle #1,#2\rangle}
\def\ball#1#2{\mathscr{B}_{#1}\left(#2\right)}
\def\field#1#2#3{\langle #1,#2, #3\rangle}
\def\seq#1#2#3{\left\{#1\right\} _{#2} ^{#3}}
\newcommand{\dint}{\displaystyle\int }
\newcommand{\dsum}{\displaystyle\sum }
\newcommand{\curlyP}{\mathscr{P} }
\def\dom#1{{\mathbf{Dom}}\left(#1\right)}
\def\target#1{{\mathbf{Target}}\left(#1\right)}
\def\im#1{{\mathbf{Im}}\left(#1\right)}
\newcommand{\idefun}{{\bf Id\ }}
\def\supp#1{{\mathbf{supp}}\left(#1\right)}
%%%%%%%%%%%%%%%%%INTERVALS
%%%%%%%% lo= left open, rc = right closed, etc.
\def\lcrc#1#2{{\bf\Big[ }#1 \ ; #2 {\bf\Big] }}
\def\loro#1#2{{\bf\Big] }#1 \ ; #2 {\bf \Big[}}
\def\lcro#1#2{{\bf \Big[} #1 \ ; #2 {\bf \Big[} }
\def\lorc#1#2{\Big]#1 \ ; #2 \Big]}
\newcommand{\dott}{{\scriptscriptstyle \stackrel{\bullet}{{}}}}
\def\dotprod#1#2{\vec{#1} \dott \vec{#2}}
\newcommand{\ngroes}{n\rightarrow +\infty}
\newcommand{\sequ}{\left\{a_n\right\} _{n=1} ^{+\infty}}

\def\soo#1{\mathit{o}\left(#1\right)}
\def\meas#1{\mu\left(#1\right)}
\def\outermeas#1{\overline{\mu}\left(#1\right)}
\def\boo#1{\mathit{O}\left(#1\right)}
\def\smallo#1#2{\mathit{o}_{\substack #1}\left(#2\right)}
\def\asympto#1{\sim_{\substack #1}}
\def\bigo#1#2{\mathit{O}_{\substack #1}\left(#2\right)}
\def\ll#1#2{<<_{\substack #1}\left(#2\right)}
\newcommand{\closR}{\closure{\closure{\BBR}}}
%%%%Title Page

\makeatletter
\def\thickhrulefill{\leavevmode \leaders \hrule height 1pt\hfill \kern \z@}
\renewcommand{\maketitle}{\begin{titlepage}%
    \let\footnotesize\small
    \let\footnoterule\relax
    \parindent \z@
    \reset@font
    \null\vfil
    \begin{flushleft}
     \@title
    \end{flushleft}
    \par
    \hrule height 4pt
    \par
    \begin{flushright}
    \@author \par
    \end{flushright}
  \vskip 60\p@
  \vspace*{\stretch{2}}
    \vskip 60\p@
    \vspace*{\stretch{2}}
    \begin{center}
\Large\textsf{\today\ Version}
    \end{center}
  \end{titlepage}%
  \setcounter{footnote}{0}%
}


\makeatother

\title{\WriteBig{Calculus, once again}{1.5cm}{1mm}{green}{red}}
\author{\textcolor{blue}{David A. SANTOS} \\
\href{mailto:dsantos@ccp.edu}{dsantos@ccp.edu}}
%%%%%%%



%%%%%%%%%%%%%%%


%%%%%%%%%


%%%%%%%%%%%%%%







\usepackage{fancyhdr} %%%%for page headers and footers

%%%%%%%%%%%%%%%%%%

%%%%%%                  And voilą the document !!!

\begin{document}
\pagenumbering{roman}

\maketitle \clearpage



\begin{quote}
    Copyright \copyright{}  2007  David Anthony SANTOS.
    Permission is granted to copy, distribute and/or modify this document
    under the terms of the GNU Free Documentation License, Version 1.2
    or any later version published by the Free Software Foundation;
    with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
    A copy of the license is included in the section entitled ``GNU
    Free Documentation License''.
\end{quote}

\clearpage







{\tiny\chapter*{\rlap{GNU Free Documentation License}}
\phantomsection  % so hyperref creates bookmarks
\addcontentsline{toc}{chapter}{GNU Free Documentation License}
%\label{label_fdl}

 \begin{center}

       Version 1.2, November 2002


 Copyright \copyright{} 2000,2001,2002  Free Software Foundation, Inc.

 \bigskip

     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA

 \bigskip

 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.
\end{center}


\begin{center}
{\bf\large Preamble}
\end{center}

The purpose of this License is to make a manual, textbook, or other
functional and useful document ``free'' in the sense of freedom: to
assure everyone the effective freedom to copy and redistribute it,
with or without modifying it, either commercially or
noncommercially. Secondarily, this License preserves for the author
and publisher a way to get credit for their work, while not being
considered responsible for modifications made by others.

This License is a kind of ``copyleft'', which means that derivative
works of the document must themselves be free in the same sense.  It
complements the GNU General Public License, which is a copyleft
license designed for free software.

We have designed this License in order to use it for manuals for
free software, because free software needs free documentation: a
free program should come with manuals providing the same freedoms
that the software does.  But this License is not limited to software
manuals; it can be used for any textual work, regardless of subject
matter or whether it is published as a printed book.  We recommend
this License principally for works whose purpose is instruction or
reference.


\begin{center}
{\Large\bf 1. APPLICABILITY AND DEFINITIONS\par} \phantomsection
\addcontentsline{toc}{section}{1. APPLICABILITY AND DEFINITIONS}
\end{center}

This License applies to any manual or other work, in any medium,
that contains a notice placed by the copyright holder saying it can
be distributed under the terms of this License.  Such a notice
grants a world-wide, royalty-free license, unlimited in duration, to
use that work under the conditions stated herein.  The
``\textbf{Document}'', below, refers to any such manual or work. Any
member of the public is a licensee, and is addressed as
``\textbf{you}''.  You accept the license if you copy, modify or
distribute the work in a way requiring permission under copyright
law.

A ``\textbf{Modified Version}'' of the Document means any work
containing the Document or a portion of it, either copied verbatim,
or with modifications and/or translated into another language.

A ``\textbf{Secondary Section}'' is a named appendix or a
front-matter section of the Document that deals exclusively with the
relationship of the publishers or authors of the Document to the
Document's overall subject (or to related matters) and contains
nothing that could fall directly within that overall subject. (Thus,
if the Document is in part a textbook of mathematics, a Secondary
Section may not explain any mathematics.)  The relationship could be
a matter of historical connection with the subject or with related
matters, or of legal, commercial, philosophical, ethical or
political position regarding them.

The ``\textbf{Invariant Sections}'' are certain Secondary Sections
whose titles are designated, as being those of Invariant Sections,
in the notice that says that the Document is released under this
License.  If a section does not fit the above definition of
Secondary then it is not allowed to be designated as Invariant.  The
Document may contain zero Invariant Sections.  If the Document does
not identify any Invariant Sections then there are none.

The ``\textbf{Cover Texts}'' are certain short passages of text that
are listed, as Front-Cover Texts or Back-Cover Texts, in the notice
that says that the Document is released under this License.  A
Front-Cover Text may be at most 5 words, and a Back-Cover Text may
be at most 25 words.

A ``\textbf{Transparent}'' copy of the Document means a
machine-readable copy, represented in a format whose specification
is available to the general public, that is suitable for revising
the document straightforwardly with generic text editors or (for
images composed of pixels) generic paint programs or (for drawings)
some widely available drawing editor, and that is suitable for input
to text formatters or for automatic translation to a variety of
formats suitable for input to text formatters.  A copy made in an
otherwise Transparent file format whose markup, or absence of
markup, has been arranged to thwart or discourage subsequent
modification by readers is not Transparent. An image format is not
Transparent if used for any substantial amount of text.  A copy that
is not ``Transparent'' is called ``\textbf{Opaque}''.

Examples of suitable formats for Transparent copies include plain
ASCII without markup, Texinfo input format, LaTeX input format, SGML
or XML using a publicly available DTD, and standard-conforming
simple HTML, PostScript or PDF designed for human modification.
Examples of transparent image formats include PNG, XCF and JPG.
Opaque formats include proprietary formats that can be read and
edited only by proprietary word processors, SGML or XML for which
the DTD and/or processing tools are not generally available, and the
machine-generated HTML, PostScript or PDF produced by some word
processors for output purposes only.

The ``\textbf{Title Page}'' means, for a printed book, the title
page itself, plus such following pages as are needed to hold,
legibly, the material this License requires to appear in the title
page.  For works in formats which do not have any title page as
such, ``Title Page'' means the text near the most prominent
appearance of the work's title, preceding the beginning of the body
of the text.

A section ``\textbf{Entitled XYZ}'' means a named subunit of the
Document whose title either is precisely XYZ or contains XYZ in
parentheses following text that translates XYZ in another language.
(Here XYZ stands for a specific section name mentioned below, such
as ``\textbf{Acknowledgements}'', ``\textbf{Dedications}'',
``\textbf{Endorsements}'', or ``\textbf{History}''.) To
``\textbf{Preserve the Title}'' of such a section when you modify
the Document means that it remains a section ``Entitled XYZ''
according to this definition.

The Document may include Warranty Disclaimers next to the notice
which states that this License applies to the Document.  These
Warranty Disclaimers are considered to be included by reference in
this License, but only as regards disclaiming warranties: any other
implication that these Warranty Disclaimers may have is void and has
no effect on the meaning of this License.


\begin{center}
{\Large\bf 2. VERBATIM COPYING\par} \phantomsection
\addcontentsline{toc}{section}{2. VERBATIM COPYING}
\end{center}

You may copy and distribute the Document in any medium, either
commercially or noncommercially, provided that this License, the
copyright notices, and the license notice saying this License
applies to the Document are reproduced in all copies, and that you
add no other conditions whatsoever to those of this License.  You
may not use technical measures to obstruct or control the reading or
further copying of the copies you make or distribute.  However, you
may accept compensation in exchange for copies.  If you distribute a
large enough number of copies you must also follow the conditions in
section~3.

You may also lend copies, under the same conditions stated above,
and you may publicly display copies.


\begin{center}
{\Large\bf 3. COPYING IN QUANTITY\par} \phantomsection
\addcontentsline{toc}{section}{3. COPYING IN QUANTITY}
\end{center}


If you publish printed copies (or copies in media that commonly have
printed covers) of the Document, numbering more than 100, and the
Document's license notice requires Cover Texts, you must enclose the
copies in covers that carry, clearly and legibly, all these Cover
Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
the back cover.  Both covers must also clearly and legibly identify
you as the publisher of these copies.  The front cover must present
the full title with all words of the title equally prominent and
visible.  You may add other material on the covers in addition.
Copying with changes limited to the covers, as long as they preserve
the title of the Document and satisfy these conditions, can be
treated as verbatim copying in other respects.

If the required texts for either cover are too voluminous to fit
legibly, you should put the first ones listed (as many as fit
reasonably) on the actual cover, and continue the rest onto adjacent
pages.

If you publish or distribute Opaque copies of the Document numbering
more than 100, you must either include a machine-readable
Transparent copy along with each Opaque copy, or state in or with
each Opaque copy a computer-network location from which the general
network-using public has access to download using public-standard
network protocols a complete Transparent copy of the Document, free
of added material. If you use the latter option, you must take
reasonably prudent steps, when you begin distribution of Opaque
copies in quantity, to ensure that this Transparent copy will remain
thus accessible at the stated location until at least one year after
the last time you distribute an Opaque copy (directly or through
your agents or retailers) of that edition to the public.

It is requested, but not required, that you contact the authors of
the Document well before redistributing any large number of copies,
to give them a chance to provide you with an updated version of the
Document.


\begin{center}
{\Large\bf 4. MODIFICATIONS\par} \phantomsection
\addcontentsline{toc}{section}{4. MODIFICATIONS}
\end{center}

You may copy and distribute a Modified Version of the Document under
the conditions of sections 2 and 3 above, provided that you release
the Modified Version under precisely this License, with the Modified
Version filling the role of the Document, thus licensing
distribution and modification of the Modified Version to whoever
possesses a copy of it.  In addition, you must do these things in
the Modified Version:

\begin{itemize}
\item[A.]
   Use in the Title Page (and on the covers, if any) a title distinct
   from that of the Document, and from those of previous versions
   (which should, if there were any, be listed in the History section
   of the Document).  You may use the same title as a previous version
   if the original publisher of that version gives permission.

\item[B.]
   List on the Title Page, as authors, one or more persons or entities
   responsible for authorship of the modifications in the Modified
   Version, together with at least five of the principal authors of the
   Document (all of its principal authors, if it has fewer than five),
   unless they release you from this requirement.

\item[C.]
   State on the Title page the name of the publisher of the
   Modified Version, as the publisher.

\item[D.]
   Preserve all the copyright notices of the Document.

\item[E.]
   Add an appropriate copyright notice for your modifications
   adjacent to the other copyright notices.

\item[F.]
   Include, immediately after the copyright notices, a license notice
   giving the public permission to use the Modified Version under the
   terms of this License, in the form shown in the Addendum below.

\item[G.]
   Preserve in that license notice the full lists of Invariant Sections
   and required Cover Texts given in the Document's license notice.

\item[H.]
   Include an unaltered copy of this License.

\item[I.]
   Preserve the section Entitled ``History'', Preserve its Title, and add
   to it an item stating at least the title, year, new authors, and
   publisher of the Modified Version as given on the Title Page.  If
   there is no section Entitled ``History'' in the Document, create one
   stating the title, year, authors, and publisher of the Document as
   given on its Title Page, then add an item describing the Modified
   Version as stated in the previous sentence.

\item[J.]
   Preserve the network location, if any, given in the Document for
   public access to a Transparent copy of the Document, and likewise
   the network locations given in the Document for previous versions
   it was based on.  These may be placed in the ``History'' section.
   You may omit a network location for a work that was published at
   least four years before the Document itself, or if the original
   publisher of the version it refers to gives permission.

\item[K.]
   For any section Entitled ``Acknowledgements'' or ``Dedications'',
   Preserve the Title of the section, and preserve in the section all
   the substance and tone of each of the contributor acknowledgements
   and/or dedications given therein.

\item[L.]
   Preserve all the Invariant Sections of the Document,
   unaltered in their text and in their titles.  Section numbers
   or the equivalent are not considered part of the section titles.

\item[M.]
   Delete any section Entitled ``Endorsements''.  Such a section
   may not be included in the Modified Version.

\item[N.]
   Do not retitle any existing section to be Entitled ``Endorsements''
   or to conflict in title with any Invariant Section.

\item[O.]
   Preserve any Warranty Disclaimers.
\end{itemize}

If the Modified Version includes new front-matter sections or
appendices that qualify as Secondary Sections and contain no
material copied from the Document, you may at your option designate
some or all of these sections as invariant.  To do this, add their
titles to the list of Invariant Sections in the Modified Version's
license notice. These titles must be distinct from any other section
titles.

You may add a section Entitled ``Endorsements'', provided it
contains nothing but endorsements of your Modified Version by
various parties--for example, statements of peer review or that the
text has been approved by an organization as the authoritative
definition of a standard.

You may add a passage of up to five words as a Front-Cover Text, and
a passage of up to 25 words as a Back-Cover Text, to the end of the
list of Cover Texts in the Modified Version.  Only one passage of
Front-Cover Text and one of Back-Cover Text may be added by (or
through arrangements made by) any one entity.  If the Document
already includes a cover text for the same cover, previously added
by you or by arrangement made by the same entity you are acting on
behalf of, you may not add another; but you may replace the old one,
on explicit permission from the previous publisher that added the
old one.

The author(s) and publisher(s) of the Document do not by this
License give permission to use their names for publicity for or to
assert or imply endorsement of any Modified Version.


\begin{center}
{\Large\bf 5. COMBINING DOCUMENTS\par} \phantomsection
\addcontentsline{toc}{section}{5. COMBINING DOCUMENTS}
\end{center}


You may combine the Document with other documents released under
this License, under the terms defined in section~4 above for
modified versions, provided that you include in the combination all
of the Invariant Sections of all of the original documents,
unmodified, and list them all as Invariant Sections of your combined
work in its license notice, and that you preserve all their Warranty
Disclaimers.

The combined work need only contain one copy of this License, and
multiple identical Invariant Sections may be replaced with a single
copy.  If there are multiple Invariant Sections with the same name
but different contents, make the title of each such section unique
by adding at the end of it, in parentheses, the name of the original
author or publisher of that section if known, or else a unique
number. Make the same adjustment to the section titles in the list
of Invariant Sections in the license notice of the combined work.

In the combination, you must combine any sections Entitled
``History'' in the various original documents, forming one section
Entitled ``History''; likewise combine any sections Entitled
``Acknowledgements'', and any sections Entitled ``Dedications''. You
must delete all sections Entitled ``Endorsements''.

\begin{center}
{\Large\bf 6. COLLECTIONS OF DOCUMENTS\par} \phantomsection
\addcontentsline{toc}{section}{6. COLLECTIONS OF DOCUMENTS}
\end{center}

You may make a collection consisting of the Document and other
documents released under this License, and replace the individual
copies of this License in the various documents with a single copy
that is included in the collection, provided that you follow the
rules of this License for verbatim copying of each of the documents
in all other respects.

You may extract a single document from such a collection, and
distribute it individually under this License, provided you insert a
copy of this License into the extracted document, and follow this
License in all other respects regarding verbatim copying of that
document.


\begin{center}
{\Large\bf 7. AGGREGATION WITH INDEPENDENT WORKS\par}
\phantomsection \addcontentsline{toc}{section}{7. AGGREGATION WITH
INDEPENDENT WORKS}
\end{center}


A compilation of the Document or its derivatives with other separate
and independent documents or works, in or on a volume of a storage
or distribution medium, is called an ``aggregate'' if the copyright
resulting from the compilation is not used to limit the legal rights
of the compilation's users beyond what the individual works permit.
When the Document is included in an aggregate, this License does not
apply to the other works in the aggregate which are not themselves
derivative works of the Document.

If the Cover Text requirement of section~3 is applicable to these
copies of the Document, then if the Document is less than one half
of the entire aggregate, the Document's Cover Texts may be placed on
covers that bracket the Document within the aggregate, or the
electronic equivalent of covers if the Document is in electronic
form. Otherwise they must appear on printed covers that bracket the
whole aggregate.


\begin{center}
{\Large\bf 8. TRANSLATION\par} \phantomsection
\addcontentsline{toc}{section}{8. TRANSLATION}
\end{center}


Translation is considered a kind of modification, so you may
distribute translations of the Document under the terms of
section~4. Replacing Invariant Sections with translations requires
special permission from their copyright holders, but you may include
translations of some or all Invariant Sections in addition to the
original versions of these Invariant Sections.  You may include a
translation of this License, and all the license notices in the
Document, and any Warranty Disclaimers, provided that you also
include the original English version of this License and the
original versions of those notices and disclaimers.  In case of a
disagreement between the translation and the original version of
this License or a notice or disclaimer, the original version will
prevail.

If a section in the Document is Entitled ``Acknowledgements'',
``Dedications'', or ``History'', the requirement (section~4) to
Preserve its Title (section~1) will typically require changing the
actual title.


\begin{center}
{\Large\bf 9. TERMINATION\par} \phantomsection
\addcontentsline{toc}{section}{9. TERMINATION}
\end{center}


You may not copy, modify, sublicense, or distribute the Document
except as expressly provided for under this License.  Any other
attempt to copy, modify, sublicense or distribute the Document is
void, and will automatically terminate your rights under this
License.  However, parties who have received copies, or rights, from
you under this License will not have their licenses terminated so
long as such parties remain in full compliance.


\begin{center}
{\Large\bf 10. FUTURE REVISIONS OF THIS LICENSE\par} \phantomsection
\addcontentsline{toc}{section}{10. FUTURE REVISIONS OF THIS LICENSE}
\end{center}


The Free Software Foundation may publish new, revised versions of
the GNU Free Documentation License from time to time.  Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.  See
http://www.gnu.org/copyleft/.

Each version of the License is given a distinguishing version
number. If the Document specifies that a particular numbered version
of this License ``or any later version'' applies to it, you have the
option of following the terms and conditions either of that
specified version or of any later version that has been published
(not as a draft) by the Free Software Foundation.  If the Document
does not specify a version number of this License, you may choose
any version ever published (not as a draft) by the Free Software
Foundation. }




 \QUOTEME{Que a quien robe este libro, o lo tome prestado y no lo
devuelva, se le convierta en una serpiente en las manos y lo venza.
Que sea golpeado por la par\'{a}lisis y todos sus miembros
arruinados. Que languidezca de dolor gritando por piedad, y que no
haya coto a su agon\'{\i}a hasta la \'{u}ltima disoluci\'{o}n. Que
las polillas roan sus entra\~{n}as y, cuando llegue al final de su
castigo, que arda en las llamas del Infierno para siempre.}
{Maldici\'{o}n an\'{o}nima contra los ladrones de libros en el
monasterio de San Pedro, Barcelona.}

\clearpage






\tableofcontents
\renewcommand{\headrulewidth}{1pt}
\renewcommand\footrulewidth{1pt}
\lhead[\rm\thepage]{\it \rightmark} \rhead[\it
\leftmark]{\rm\thepage} \cfoot{}
\renewcommand{\sectionmark}{\markboth{\sectionname\ \thechapter}}
\renewcommand{\sectionmark}{\markright}

\chapter*{Preface} \markboth{}{}
\addcontentsline{toc}{chapter}{Preface} \markright{Preface}For many
years I have been lucky enough to have students ask for more: more
challenging problems, more illuminating proofs to different
theorems, a deeper look at various topics, etc. To those students I
normally recommend the books in the bibliography. Some of the same
students have complained of not finding the books or wanting to buy
them, but being impecunious, not being able to afford to buy them.
Hence I have decided to make this compilation.

\bigskip

Here we take a semi-rigorous tour through Calculus. We don't
construct the real numbers, but we examine closer the real number
axioms and some of the basic theorems of Calculus. We also consider
some Olympiad-level problems whose solution can be obtained through
Calculus.

\bigskip

The reader is assumed to be familiar with proofs using mathematical
induction, proofs by contradiction, and the mechanics of
differentiation and integration.


 \hfill{David A.
SANTOS}

\hfill{\href{mailto:dsantos@ccp.edu}{dsantos@ccp.edu}}




\cfoot{}\rfoot{\psovalbox{\thepage}} \pagestyle{fancy} \fancyhead{}
\renewcommand{\chaptermark}{\markboth{\chaptername\ \thechapter}}
\renewcommand{\sectionmark}{\markright}
\fancyhead[RO]{\itshape\leftmark} \fancyhead[RE]{\itshape\rightmark}

\chapter{Preliminaries}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} We will use the language of set
theory throughout these notes. There are various elementary results
that pop up in later proofs, among them, the  De Morgan Laws and the
Monotonicity Reversing of Complementation Rule.

\medskip The concept of a {\em function} lies at the core of
mathematics. We will give a brief overview here
    of some basic properties of functions.


\end{minipage}}
    \end{center}



 \pagenumbering{arabic} \setcounter{page}{1}




\Opensolutionfile{calculillo}[calculillo1]




\section{Sets}


This section contains some of the set notation to be used throughout
these notes. The one-directional arrow $\implies$ reads ``implies''
and the two-directional arrow $\iff$ reads ``if and only if.''
\begin{df}
We will accept the notion of {\em set} as a primitive notion, that
is, a notion that cannot be defined in terms of more elementary
notions. By a  {\em set} we will understand a well-defined
collection of objects, which we will call the {\em elements} of the
set. If the element $x$ belongs to the set $S$ we will write $x\in
S$, and in the contrary case we will write $x\not\in S$.\footnote{
Georg Cantor(1845-1918), the creator of set theory, said ``{\em A
set is any collection into a whole of definite, distinguishable
objects, called {\bf elements}, of our intuition or thought.''}} The
{\em cardinality} of a set is the number of elements the set has. It
can either be finite or infinite. We will denote the cardinality of
the set $S$ by $\card{S}$.
\end{df}
\begin{rem}
Some sets are used so often that merit special notation. We will
denote by $$ \BBN = \{0,1,2,3,\ldots \} $$ the set of natural
numbers, by $$\BBZ = \{\ldots, -3,-2,-1,0,1,2,3,\ldots
\}\footnote{$\BBZ$ for the German word {\em Z\"{a}hlen} meaning
``integer.''}
$$by $\BBQ$ the set of rational numbers\footnote{$\BBQ$ for ``quotients.''},  by $\BBR$ the real numbers, and by $\BBC$ the set of complex
numbers. We will occasionally also use $\alpha \BBZ =
\{\ldots,-3\alpha,-2\alpha,-\alpha,0,\alpha, 2\alpha, 3\alpha,
\ldots\}$, etc.

\bigskip

We will also denote the empty set, that is, the set having no
elements by $\varnothing$.
\end{rem}

\vspace{1cm}
\begin{figure}[h]
\begin{minipage}{5cm}$$\psset{unit=1pc} \pscircle[fillstyle=hlines,
fillcolor=red](-1,0){2}\pscircle[fillstyle=hlines,
fillcolor=red](1,0){2} \uput[d](-1,-2){A}\uput[d](1,-2){B}
$$\vspace{1cm}\footnotesize\hangcaption{$A\cup B$} \label{fig:a_union_b}
\end{minipage} \hfill \begin{minipage}{5cm}$$ \psset{unit=1pc} \pscircle(-1,0){2}\pscircle(1,0){2}
\uput[d](-1,-2){A}\uput[d](1,-2){B}
\pscustom[fillstyle=solid,fillcolor=green]{\psarc(1,0){2}{120}{240}\psarc(-1,0){2}{270}{60}}
$$\vspace{1cm}\footnotesize\hangcaption{$A\cap B$} \label{fig:a_intersection_b}
\end{minipage}
\hfill \begin{minipage}{5cm}$$\psset{unit=1pc}
\pscircle[fillstyle=solid,fillcolor=blue](-1,0){2}\pscircle(1,0){2}
\uput[d](-1,-2){A}\uput[d](1,-2){B}
\pscustom[fillstyle=solid,fillcolor=white]{\psarc(1,0){2}{120}{240}\psarc(-1,0){2}{270}{60}}
$$\vspace{1cm}\footnotesize\hangcaption{$A\setminus B$} \label{fig:a_minus_b}
 \end{minipage}
\end{figure}
\begin{df}
The {\em union} of two sets $A$ and $B$ is the set
$$A\cup B = \{x:(x\in A)\ \mathrm{or}\ (x\in B)\}.$$
This is read ``$A$ union $B$.'' See figure \ref{fig:a_union_b}. The
{\em intersection} of two sets $A$ and $B$ is
$$A\cap B = \{x:(x\in A)\ \mathrm{and} \ (x\in B)\}.$$
This is read ``$A$ intersection $B$.'' See figure
\ref{fig:a_intersection_b}. The {\em set difference} of two sets $A$
and $B$ is
$$A\setminus B = \{x:(x\in A)\ \mathrm{and} (x\not\in B)\}.$$
This is read ``$A$ set minus $B$.'' See figure \ref{fig:a_minus_b}.
\end{df}

\begin{df}
Two sets $A$ and $B$ are {\em disjoint} if $A\cap B = \varnothing$.
\end{df}
\begin{exa}
Write $A\cup B$ as the disjoint union of three sets.
\end{exa}
\begin{solu}
Observe that $$ A\cup B = (A\setminus B) \cup (A\cap B) \cup
(B\setminus A),
$$and that the sets on the dextral side are disjoint.
\end{solu}
\begin{df}
A {\em subset} $B$ of a set $A$ is a subcollection of $A$, and we
denote this by $B \subseteqq A$. \footnote{There seems not to be an
agreement here by authors. Some use the notation $\subset $ or
$\subseteq$ instead of $ \subseteqq$. Some see in the notation
$\subset $ the exclusion of equality. In these notes, we will always
use the notation $\subseteqq $, and if we wished to exclude equality
we will write $\subsetneqq$.} This means that $x\in B \implies x\in
A$.
\end{df}
\begin{rem}
$\varnothing$ and $A$ are always subsets of any set $A$.
\end{rem}
Observe that $$A= B \iff (A \subseteq B)\quad \mathrm{and}\quad
(B\subseteq A).$$We use this observation on the next theorem.


\begin{thm}[De Morgan Laws] \label{thm:DeMorgan}Let $A, B, C$ be sets. Then $$A
\setminus (B\cap C) = (A\setminus B) \cup (A\setminus C), \qquad A
\setminus (B\cup C) = (A\setminus B) \cap (A\setminus C).
$$
\end{thm}
\begin{pf}
 We have $$\begin{array}{lll} x \in A\setminus(B \cup C) & \iff & x
\in A \quad \mathrm{and}\quad  x\not \in (B \quad \mathrm{or} \quad
C)
\\  & \iff & (x \in A) \ \ \  \mathrm{and} \ \ \     ( (x \not \in B) \ \ \  \mathrm{and}\ \ \     (x \not \in C)) \\
& \iff & (x  \in A \ \ \  \mathrm{and} \ \ \     x \not \in B) \ \ \
\mathrm{and} \ \ \     (x  \in A \ \ \  \mathrm{and} \ \ \ x \not
\in C)
\\ & \iff & (x \in A\setminus B) \ \ \  \mathrm{and}\ \ \ (x \in A
\setminus C) \\ & \iff & x \in (A\setminus B) \cap(A\setminus C).
 \end{array}
$$
Also, $$\begin{array}{lll} x \in A\setminus(B \cap C) & \iff & x \in
A \quad \mathrm{and}\quad  x\not \in (B \quad \mathrm{and}\quad  C)
\\  & \iff & (x \in A) \ \ \  \mathrm{and} \ \ \     ( (x \not \in B) \ \ \  \mathrm{or}\ \ \     (x \not \in C)) \\
& \iff & (x \in A \ \ \  \mathrm{and} \ \ \     x \not \in B) \ \ \
\mathrm{or} \ \ \     (x \in A \ \ \  \mathrm{and} \ \ \ x \not \in
C)
\\ & \iff & (x \in A\setminus B) \ \ \  \mathrm{or}\ \ \ (x \in A
\setminus C) \\ & \iff & x \in (A\setminus B) \cup(A\setminus C)
 \end{array}
$$
\end{pf}

\begin{thm}[Monotonicity Reversing of
Complementation]\label{thm:mono-reversing}
Let $A, B, X$ be sets. Then
$$ A\subseteqq B \iff X\setminus B \subseteqq X\setminus A. $$
\end{thm}
\begin{pf}
We have
$$ \begin{array}{lll} A\subseteqq B  & \iff & (x\in A) \implies (x\in B) \\
& \iff & (x\not \in B)\implies (x\not\in A)\\
& \iff & (x\in X\quad \mathrm{and}\quad x\not \in B)\implies (x\in X\quad \mathrm{and}\quad x\not\in A)\\
& \iff & X\setminus B \subseteqq X\setminus A.
\end{array}$$
\end{pf}
\begin{df}
Let $A_1, A_2, \ldots, A_n$, be sets. The {\em Cartesian Product} of
these $n$ sets is defined and denoted by \index{sets!Cartesian
Product}
$$A_1\times A_2\times \cdots \times A_n =
\{(a_1, a_2, \ldots , a_n): a_k \in A_k\},$$ that is, the set of all
ordered $n$-tuples whose elements belong to the given sets.
\end{df}
\begin{rem}
In the particular case when all the $A_k$ are equal to a set $A$, we
write
$$A_1\times A_2\times \cdots \times A_n = A^n.$$If $a\in A$ and $b\in A$
we write $(a,b)\in A^2.$\end{rem}

\begin{exa}
The Cartesian product is not necessarily commutative. For example,
$(\sqrt{2}, 1)\in\BBR \times \BBZ$ but $(\sqrt{2}, 1)\not\in\BBZ
\times \BBR$. Since $\BBR \times \BBZ$ has an element that $\BBZ
\times \BBR$ does not, $\BBR \times \BBZ \neq \BBZ \times \BBR$.
\end{exa}
\begin{exa}
Prove that if $X\times X = Y\times Y$ then $X=Y$.
\end{exa}
\begin{solu}
Let $x\in X$. Then $(x,x)\in X\times X$, which gives $(x,x)\in
Y\times Y$, so $y\in Y$. Hence $X\subseteq Y$.

\bigskip

Similarly, if $y\in Y$ then $(y,y)\in Y\times Y$, which gives
$(y,y)\in X\times X$, so $y\in X$. Hence $Y\subseteq X$.

\bigskip
Thus $X\subseteq Y$ and $Y \subseteq X$ gives $X=Y$.
\end{solu}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}For a fixed $n\in\BBN$ put  $A_n =\{nk:k\in\BBN\} $.
\begin{enumerate}
\item Find $A_2\cap A_3$.
\item Find $\bigcap _{n=1} ^\infty A_n$.
\item Find $\bigcup _{n=1} ^\infty A_n$.
\end{enumerate}
\begin{answer}Observe that $A_n = \{0, n, 2n, 3n, \ldots \}$.
\begin{enumerate}
\item  $A_6$.
\item  $\BBN $.
\item $\{0\}$.
\end{enumerate}
\end{answer}
\end{pro}

\begin{pro}
Prove the following properties of the empty set: $$ A\cap
\varnothing = \varnothing ,\quad A\cup \varnothing =  A.
$$
\end{pro}
\begin{pro}
Prove the following commutative laws: $$ A\cap B = B \cap A,\quad
A\cup B = B \cup A.
$$
\end{pro}

\begin{pro}
Prove by means of set inclusion the following distributive law: $$(A
\cup B) \cap C = (A \cap C) \cup (B \cap C).$$ \begin{answer} We
have,
$$\begin{array}{lll} x \in (A \cup B) \cap C & \iff &
x\in (A \cup B) \quad \mathrm{and} \quad x\in C \\
& \iff & (x\in A \quad \mathrm{or} \quad x\in B) \quad \mathrm{and} \quad x\in C \\
& \iff & (x\in A \quad \mathrm{and} \quad x \in C) \quad \mathrm{or} \quad (x\in B \quad \mathrm{and} \quad x\in C) \\
& \iff & (x\in A\cap C) \quad \mathrm{or} \quad (x\in B \cap C)\\
& \iff & x \in (A \cap C) \cup (B \cap C),
\end{array}$$which establishes the equality.
\end{answer}
\end{pro}
\begin{pro}
Prove the following associative laws: $$ A\cap (B \cap C) = (A\cap
B)\cap C, \quad A \cup (B\cup C) = (A\cup B)\cup C.
$$
\end{pro}
\begin{pro}
Prove that $$ A\cap B = A \iff A \subseteq B.
$$
\end{pro}
\begin{pro}
Prove that $$ A\cup B = A \iff B \subseteq A.
$$
\end{pro}
\begin{pro}
Prove that $$ A\subseteq B \implies  A \cap C \subseteq B\cap C.
$$
\end{pro}
\begin{pro}
Prove that $$ A\subseteq B \quad \mathrm{and}\quad C \subseteq B
\implies A \cup C \subseteq B.
$$
\end{pro}
\begin{pro}
Prove the following distributive laws: $$ A\cap (B\cup C) = (A\cap
B)\cup (A\cap C), \qquad A\cup (B\cap C) = (A\cup B)\cap (A\cup C).
$$
\end{pro}
\begin{pro}
Is there any difference between the sets $\varnothing$,
$\{\varnothing\}$ and $\{\{\varnothing\}\}$? Explain.
\end{pro}
\begin{pro}
Is the Cartesian product associative? Explain.
\end{pro}
\begin{pro}
Let $A,B$, and $C$ be sets. Shew that
$$A\times (B\setminus C) = (A\times B)\setminus (A\times C).$$

\begin{answer}We check the two statements
$$x\in A\times (B\setminus C) \Longleftrightarrow x\in (A\times B)\setminus (A\times C).$$
Let us prove first $\Longrightarrow$. By definition of $\times$, $x
= (a,b)$, where $a\in A, b\in B, b\notin C$. Thus $x\in A\times B$
but $x\notin A\times C$. By definition of $\setminus$ we are done.
Now we  prove the assertion $\Longleftarrow$. By definition of
$\times$ and $\setminus$, $x=(a,b)$ where $a\in A, b\in B$. Since
$x\notin A\times C$, we observe that $b\notin C$. Thus $a\in A, b\in
B\setminus C$, and we gather that $x\in A\times (B\setminus C)$.
\end{answer}
\end{pro}
\begin{pro}
Prove that a set with $N\in\BBN$ elements has exactly $2^N$ subsets.
\begin{answer}
Attach a binary code to each element of the subset, $1$ if the
element is in the subset and $0$ if the element is not in the
subset.  The total number of subsets is the total number of such
binary codes, and there are $2^N$ in number.
\end{answer}
\end{pro}


\end{multicols}
\section{Numerical Functions}


\begin{df}
By a {\em (numerical) function}\index{function}
$f:\dom{f}\rightarrow\target{f}$ we mean the collection of the
following ingredients:
\begin{dingautolist}{202} \item a {\em name} for the function.
Usually we use the letter $f$.
\item  a set of real number inputs called the {\em domain} of the
function. The domain of $f$ is denoted by $\dom{f}\subseteqq \BBR$.
\item an {\em input parameter }, also called {\em independent
variable} or {\em dummy variable}. We usually denote a typical input
by the letter $x$.\index{function!domain}
\item a set of possible real number outputs of the function, called the {\em
target set} of the function. The target set of $f$ is denoted by
$\target{f}\subseteqq \BBR$.\index{function!target set}
\item an {\em assignment rule} or {\em formula}, assigning to {\bf
every  input} a {\bf unique} output. This assignment rule for $f$ is
usually denoted by $x\mapsto f(x)$. The output of $x$ under $f$ is
also referred to as the {\em image of $x$ under $f$},
\index{function!image} and is denoted by $f(x)$.
\index{function!assingment rule}
\end{dingautolist}
\end{df}

The notation\footnote{Notice the difference in the arrows. The
straight arrow $\longrightarrow$ is used to mean that a certain set
is associated with another set, whereas the arrow $\mapsto$ (read
``maps to'') is used to denote that an input becomes a certain
output.}
$$\fun{f}{x}{f(x)}{\dom{f}}{\target{f}}
$$ read ``the function $f$, with domain $\dom{f}$, target set
$\target{f}$, and assignment rule $f$ mapping $x$ to $f(x)$''
conveys all the above ingredients.


\begin{rem}
Oftentimes we will only need to mention the assignment rule of a
function, without mentioning its domain or target set. In such
instances we will sloppily say  ``the function $f$'' or more
commonly, ``the function $x\mapsto f(x)$'', e.g., the square
function $x\mapsto x^2$.\footnote{This corresponds to the even
sloppier American usage ``the function $f(x)=x^2$.''}
\end{rem}



\begin{df}
The {\em image} $\im{f}$ of a function $f$ is its set of actual
outputs. In other words, $$\im{f} = \{f(a): a\in\dom{f}\}.$$ Observe
that we always have $\im{f} \subseteq \target{f}$.  For a set $A$,
we also define
$$ f(A)=\{f(a): a\in A\}. $$

\end{df}

\begin{thm}
Let $f: X \rightarrow Y$ be a function and let $A\subseteqq X$,
$A'\subseteqq X$. Then
\begin{enumerate}
\item $A \subseteqq A' \implies f(A) \subseteqq f(A')$
\item $f(A\cup A') =f(A)\cup f(A')$
\item $f(A\cap A') \subseteqq f(A)\cap f(A') $
\item $ f(A)\setminus f(A')  \subseteqq  f(A\setminus A') $

\end{enumerate}
\end{thm}
\begin{pf}

\begin{enumerate}
\item $x\in A \implies x\in A' $ and hence $f(x) \in f(A)\implies f(x)\in f(A') \implies f(A)\subseteqq f(A')$
\item Since $A \subseteqq A\cup A'$ and $A' \subseteqq A\cup A'$, we
have $f(A) \subseteqq f(A\cup A')$ and $f(A') \subseteqq f(A\cup
A')$, by part (1) and thus $f(A) \subseteqq f(A') \subseteqq f(A\cup
A')$. Moreover, if $y\in f(A\cup A')$, then $\exists x\in A\cup A'$
such that $y=f(x)$. Then either $x\in A$ and so $f(x)\in f(A)$ or
$x\in A'$ and so $f{x}\in f(A')$. Either way, $f(x)\in f(A)\cup
f(A')$ and $$y\in f(A\cup A')\implies y\in f(A)\cup f(A')\implies
f(A \cup A')\subseteqq f(A)\cup f(A').$$Hence $$f(A \cup
A')=f(A)\cup f(A'). $$
\item Let $y\in f(A\cap A')$.  Then $\exists x\in A\cap A'$ such that
$f(x)=y$. Thus we have both $x\in A \implies f(x)\in f(A)$ and $x\in
A' \implies f(x)\in f(A')$. Therefore $f(x)\in f(A)\cap f(A')$ and
we conclude that $f(A\cap A') \subseteqq f(A)\cap f(A') $.
\item Let $ y\in f(A)\setminus f(A')$. Then $y\in f(A)$ and $y\notin f(A')$. Thus $\exists x\in A$ such that
$f(x)=y$. Since $y\notin f(A')$, then $x\notin A'$. Therefore $x\in
A\setminus A'$ and finally, $y\in f(A\setminus A')$. This means that
$ f(A)\setminus f(A')  \subseteqq  f(A\setminus A') $ as claimed.
\end{enumerate}

\end{pf}


\subsection{Injective and Surjective Functions}

\begin{df}
A function is {\em injective} or {\em one-to-one} whenever two
different values of its domain generate two different values in its
image. A function is {\em surjective} or {\em onto} if every element
of its target set is hit, that is, the target set is the same as the
image of the function. A function is {\em bijective} if it is both
injective and surjective.
\end{df}

\begin{exa}
The function
$$\fun{a}{x}{x^2}{\BBR}{\BBR}
$$is neither injective nor surjective.

\bigskip

The function $$\fun{b}{x}{x^2}{\BBR}{\lcro{0}{+\infty}}
$$is surjective but not injective.

\bigskip

The function $$\fun{c}{x}{x^2}{\lcro{0}{+\infty}}{\BBR}
$$is injective but not surjective.

\bigskip

The function $$\fun{d}{x}{x^2}{\lcro{0}{+\infty}}{\lcro{0}{+\infty}}
$$is a bijection.
\end{exa}


A bijection between two sets essentially tells us that the two sets
have the same size. We will make this statement more precise now for
finite sets.
\begin{thm}\label{thm:size_domain_image_injections_surjections}
Let $f:A\rightarrow B$ be a function, and let $A$ and $B$ be finite.
If $f$ is injective, then $\card{A} \leq \card{B}$. If $f$ is
surjective then $\card{B}\leq \card{A}$. If $f$ is bijective, then
$\card{A} = \card{B}$.
\end{thm}
\begin{pf}
Put $n = \card{A}$, $A = \{x_1,x_2, \ldots ,x_n\}$ and $m =
\card{B}$, $B = \{y_1,y_2, \ldots ,y_m\}$.

\bigskip
If $f$ were injective then $f(x_1), f(x_2), \ldots , f(x_n)$ are all
distinct, and among the $y_k$. Hence $n \leq m$.

\bigskip
If $f$ were surjective then each $y_k$ is hit, and for each, there
is an $x_i$ with $f(x_i) = y_k$. Thus there are at least $m$
different images, and so $n \geq m$.
\end{pf}

\subsection{Algebra of Functions}

\begin{df}
Let $f: \dom{f}\rightarrow \target{f}$ and $g: \dom{g}\rightarrow
\target{g}$. Then $\dom{f\pm g} = \dom{f}\cap \dom{g}$ and the sum
(respectively, difference) function $f + g$ (respectively, $f - g$)
is given by
$$\fun{f \pm g}{x}{f(x) \pm g(x)}{\dom{f}\cap \dom{g}}{\target{f\pm g}}.
$$
In other words, if $x$ belongs both to the domain of $f$ and $g$,
then $$(f \pm g)(x) = f(x) \pm g(x).$$
\end{df}

\begin{df}
Let $f: \dom{f}\rightarrow \target{f}$ and $g: \dom{g}\rightarrow
\target{g}$. Then $\dom{fg} = \dom{f}\cap \dom{g}$ and  the product
function $fg$ is given by
$$\fun{fg}{x}{f(x)\cdot g(x)}{\dom{f}\cap \dom{g}}{\target{fg}}.
$$
In other words, if $x$ belongs both to the domain of $f$ and $g$,
then $$(fg)(x) = f(x)\cdot g(x).$$
\end{df}

\begin{df}
Let $g:\dom{g} \rightarrow \target{g}$ be a function. The {\em
support} of $g$, denoted by $\supp{g}$ is the set of elements in
$\dom{g}$ where $g$ does not vanish, that is $$\supp{g} =
\{x\in\dom{g}: g(x) \neq 0\}.$$
\end{df}

\begin{df}
Let $f: \dom{f}\rightarrow \target{f}$ and $g: \dom{g}\rightarrow
\target{f}$. Then $\dom{\dfrac{f}{g}} = \dom{f}\cap \supp{g}$ and
the quotient function $\dfrac{f}{g}$ is given by
$$\fun{\dfrac{f}{g}}{x}{\dfrac{f(x)}{g(x)}}{\dom{f}\cap \supp{g}}{\target{f/g}}.
$$
In other words, if $x$ belongs both to the domain of $f$ and $g$ and
$g(x) \neq 0$, then $\dfrac{f}{g}(x) = \dfrac{f(x)}{g(x)}.$
\end{df}

\begin{df}
Let $f: \dom{f} \rightarrow \target{f}$, $g: \dom{g} \rightarrow
\target{g}$ and let $U = \{x\in\dom{g}: g(x) \in \dom{f}\}$. We
define the {\em composition} function of $f$ and $g$ as

\begin{equation}
\fun{f\circ g}{x}{f(g(x))}{U}{\target{f\circ g}} .\end{equation}We
read $f\circ g$ as ``$f$ {\em composed with} $g$.''
\end{df}



\subsection{Inverse Image}

\begin{df}Let $X$ and $Y$ be subsets of $\BBR$ and let $f:X\rightarrow Y$ be a
function. Let $B\subseteqq Y$. The {\em inverse image of $B$ by $f$}
is the set $$f^{-1}(B) =\{x\in X: f(x)\in B\}.$$ If $B=\{b\}$
consists of only one element, we write, abusing notation,
$f^{-1}(\{b\})=f^{-1}(b)$. It is clear that $f^{-1}(Y)=X$ and
$f^{-1}(\varnothing)=\varnothing$.
\end{df}

\begin{exa} Let $$\fun{f}{x}{x^2}{\{-2,-1,0,1,3\}}{\{0,1,4,5,9\}}.
$$Then $f^{-1}(\{0,1\})=\{0,-1,1\}$,  $f^{-1}(1)=\{-1,1\}$,
$f^{-1}(5)=\varnothing$, $f^{-1}(4)=2$, $f^{-1}(0)=0$, etc. Notice
that we have abused notation in all but the first example.
\end{exa}
\begin{thm}
Let $f: X \rightarrow Y$ be a function and let $B\subseteqq Y$,
$B'\subseteqq Y$. Then
\begin{enumerate}
\item $B \subseteqq B' \implies f^{-1}(B) \subseteqq f^{-1}(B')$
\item $f^{-1}(B\cup B') =f^{-1}(B)\cup f^{-1}(B')$
\item $f^{-1}(B\cap B') = f^{-1}(B)\cap f^{-1}(B') $
\item $ f^{-1}(B)\setminus f(B') = f^{-1}(B\setminus B') $

\end{enumerate}
\end{thm}

\begin{pf}

\begin{enumerate}
\item Assume  $x\in f^{-1}(B)$. Then there is $y\in B\subseteqq B'$
such that $f(x)=y$. But $y$ is also in $B'$ so  $ x\in f^{-1}(B')$.
Thus $f^{-1}(B) \subseteqq f^{-1}(B')$.
\item Since $B\subseteqq B\cup B'$ and $B'\subseteqq B\cup B'$, we have
$f^{-1}(B)\subseteqq f^{-1}(B\cup B')$ and $f^{-1}(B')\subseteqq
f^{-1}(B\cup B')$, by part (1). Thus $f^{-1}(B)\cup
f^{-1}(B')\subseteqq f^{-1}(B\cup B')$. Now, let $x\in f^{-1}(B\cup
B')$. There is $y\in B\cup B'$ such that $f(x)=y$. Either $y\in B$
and so $y\in B \implies x\in f^{-1}(B)$ or $y\in B'$ and so $y\in B
\implies x\in f^{-1}(B')$. Either way,  $x\in f^{-1}(B) \cup
f^{-1}(B')$. Thus $f^{-1}(B\cup B')\subseteqq f^{-1}(B) \cup
f^{-1}(B')$. We conclude that $f^{-1}(B\cup B')= f^{-1}(B) \cup
f^{-1}(B')$.

\item Let $x\in f^{-1}(B\cap B')$.  Then $\exists y\in B\cap B'$ such that
$f(x)=y$. Thus we have both $y\in B \implies x\in f^{-1}(B)$ and
$y\in B' \implies x\in f^{-1}(B')$. Therefore $x\in f^{-1}(B)\cap
f^{-1}(B')$ and we conclude that $f^{-1}(B\cap B') \subseteqq
f^{-1}(B)\cap f^{-1}(B') $. Now, let $x\in f^{-1}(B)\cap f^{-1}(B')
$. Then $x\in f^{-1}(B)$ and $x\in f^{-1}(B')$. Then $f(x)\in B$ and
$f(x)\in B'$. Thus $f(x)\in B\cap B'$ and so $x\in f^{-1}(B\cap
B')$. Hence $f^{-1}(B)\cap f^{-1}(B')\subseteqq f^{-1}(B\cap B')$
also, and we conclude that $f^{-1}(B)\cap f^{-1}(B')= f^{-1}(B\cap
B')$.
\item Let $ x\in f^{-1}(B)\setminus f^{-1}(B')$. Then $x\in f^{-1}(B)$ and $x\notin f^{-1}(B')$. Thus $f(x)\in
B$ and  $f(x)\notin B'$. Thus $f(x)\in B\setminus B'$ and therefore
$x\in f^{-1}(B\setminus B')$, giving $f^{-1}(B)\setminus f^{-1}(B')
\subseteqq f^{-1}(B\setminus B')$. Now, let $x\in f^{-1}(B\setminus
B')$. Then $f(x)\in B\setminus B'$, which means that $f(x)\in B$ but
$f(x)\notin B'$. Thus $x\in f^{-1}(B)$ but $x\notin f^{-1}(B')$,
which gives $x\in f^{-1}(B)\setminus f^{-1}(B')$ and so $
f^{-1}(B\setminus B')\subseteqq f^{-1}(B)\setminus f^{-1}(B')$. This
establishes the desired equality.
\end{enumerate}

\end{pf}


\begin{thm}
Let $f:X\rightarrow Y$ be a function. Let $A\times B\subseteqq
X\times Y$. Then
\begin{enumerate}
\item $A \subseteqq (f^{-1}\circ f)(A)$
\item $(f\circ f^{-1})(B)\subseteqq B$
\end{enumerate}
\end{thm}
\begin{pf}
We have
\begin{enumerate}
\item Let $x\in A$. Then $\exists y\in Y$ such that $y=f(x)$. Thus
$y\in f(A)$. Therefore $x\in f^{-1}(f(A))$.
\item $y\in (f\circ f^{-1})(B)$. Then $\exists x\in f^{-1}(B)$ such that $f(x)
=y$. Thus $x\in f^{-1}(y)$. Hence $f(x)\in B$. Therefore $y\in B$.
\end{enumerate}
\end{pf}

\subsection{Inverse Function}

\begin{df}
Let $A\times B \subseteqq \BBR^2$. A function $F: A \rightarrow B$
is said to be {\em invertible} if there exists a function $F^{-1}$
(called the {\em inverse} of $F$) such that $F\circ F^{-1} = \idefun
_B$ and $F^{-1}\circ F = \idefun _A$. Here $\idefun _S$ is the
identity on the set $S$ function with rule $\idefun _S(x) = x.$
\end{df}
The central question is now: given a function $F: A \rightarrow B$,
when is $F^{-1}: B \rightarrow A$ a function? The answer is given in
the next theorem.
\begin{thm}\label{thm:invertible<->bijective}
Let $A\times B \subseteqq \BBR^2$. A function $f: A \rightarrow B$
is invertible if and only if it is a bijection. That is, $f^{-1}: B
\rightarrow A$ is a function if and only if $f$ is bijective.
\end{thm}
\begin{pf} Assume first that $f$ is invertible. Then there is a
function $f^{-1}: B \rightarrow A$ such that
\begin{equation}
f\circ f^{-1} = \idefun _B \ \ {\rm and} \ \ f^{-1}\circ f = \idefun
_A.\end{equation} Let us prove that $f$ is injective and surjective.
Let $s, t$ be in the domain of $f$ and such that $f(s) = f(t)$.
Applying $f^{-1}$ to both sides of this equality we get
$(f^{-1}\circ f)(s) = (f^{-1}\circ f)(t)$. By the definition of
inverse function, $(f^{-1}\circ f)(s) = s$ and $(f^{-1}\circ f)(t) =
t$. Thus $s = t.$ Hence $f(s) = f(t) \implies s = t$ implying that
$f$ is injective. To prove that $f$ is surjective we must shew that
for every $b \in f(A) \ \exists a \in A$ such that $f(a) = b.$ We
take $a = f^{-1}(b)$ (observe that $f^{-1}(b) \in A$). Then $f(a) =
f(f^{-1}(b)) = (f\circ f^{-1})(b) = b$ by definition of inverse
function. This shews that $f$ is surjective. We conclude that if $f$
is invertible then it is also a bijection.

\bigskip


   Assume now that $f$ is a bijection. For every $b\in B$ there
exists a unique $a$ such that $f(a) = b$. This makes the rule $g: B
\rightarrow A$ given by $g(b) = a$  a function. It is clear that
$g\circ f = \idefun _A$ and $f\circ g = \idefun _B. $  We may thus
take $f^{-1} = g.$ This concludes the proof. \end{pf}




\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}Find all functions with domain $\{a, b\}$ and target set $\{c, d\}$. \begin{answer} There are $2^2 = 4$  such functions, namely:
\begin{dingautolist}{202}
\item $f_1$ given by $f_1(a) = f_1(b) = c.$  Observe that
$\im{f_1} = \{c\}$. \item $f_2$ given by $f_2(a) = f_2(b) = d.$
Observe that $\im{f_2} = \{d\}$. \item $f_3$ given by $f_3(a) = c,
f_3(b) = d.$ Observe that $\im{f_3} = \{c, d\}$.  \item $f_4$ given
by $f_4(a) = d, f_4(b) = c.$ Observe that $\im{f_4} = \{c, d\}$.
\end{dingautolist}
\end{answer}
\end{pro}

\begin{pro} Let $A$, $B$ be finite sets with $\card{A} = n$ and $\card{B} =
m$. Prove that \begin{itemize}
\item The number of functions from $A$ to $B$ is $m^n$.
\item If $n \leq m$, the number of injective functions from $A$ to $B$
is $m(m-1)(m-2)\cdots (m-n+1)$. If $n>m$ there are no injective
functions from $A$ to $B$.
\end{itemize}
\begin{answer}
Each of the $n$ elements of $A$ must be assigned an element of $B$,
and hence there are $\underbrace{m\cdot m \cdots m}_{n\
\mathrm{factors}} = m^n$ possibilities, and thus $m^n$ functions.If
a function from $A$ to $B$ is injective then we must have $n \leq m$
in view of Theorem
\ref{thm:size_domain_image_injections_surjections}. If to different
inputs we must assign different outputs then to the first element of
$A$ we may assign any of the $m$ elements of $B$, to the second any
of the $m-1$ remaining ones, to the third any of the $m-2$ remaining
ones, etc., and so we have $m(m-1)\cdots (m-n+1)$ injective
functions.
\end{answer}
\end{pro}

\begin{pro}
Let $A$ and $B$ be two finite sets with $\card{A} = n$ and $\card{B}
= m$. If $n< m$ prove that  there are no surjections from $A$ to
$B$. If $n \geq m$ prove that the number of surjective functions
from $A$ to $B$ is
$$m^n - \binom{m}{1}(m-1)^n + \binom{m}{2}(m-2)^n - \binom{m}{3}(m-3)^n + \cdots + (-1)^{m-1}\binom{m}{m-1}(1)^{n}.  $$
\end{pro}

\begin{pro}
Let $h:\BBR \rightarrow \BBR$ be given by $h(1 - x) = 2x$. Find
$h(3x)$. \begin{answer} Rename the independent variable,  say $h(1 -
s) = 2s.$ Now, if $1 - s = 3x$ then $s = 1 - 3x.$ Hence
$$h(3x) = h(1 - s) = 2s = 2(1 - 3x) = 2 - 6x.$$
\end{answer}
\end{pro}
\begin{pro}
Consider the polynomial
$$(1 - x^2 + x^4)^{2003} = a_0 + a_1x+a_2x^2 + \cdots +
a_{8012}x^{8012}.
$$Find
\begin{dingautolist}{202}
\item $a_0$ \item $a_0 + a_1+a_2 + \cdots + a_{8012}$ \item $a_0 -
a_1+a_2 -a_3 + \cdots -a_{8011}+ a_{8012}$
 \item $a_0+a_2 + a_4 +  \cdots + a_{8010} + a_{8012}$ \item
 $a_1+a_3+ \cdots +a_{8009}+a_{8011}$
\end{dingautolist}
\begin{answer} Put $$p(x) = (1 - x^2 + x^4)^{2003} = a_0 + a_1x+a_2x^2 +
\cdots + a_{8012}x^{8012}.$$Then
\begin{dingautolist}{202}
\item $a_0 = p(0) = (1 - 0^2 + 0^4)^{2003} = 1.$ \item $a_0 +
a_1+a_2 + \cdots + a_{8012} = p(1) = (1 - 1^2 + 1^4)^{2003} = 1.$
 \item $$\begin{array}{lll}a_0 -
a_1+a_2-a_3 + \cdots -a_{8011} + a_{8012} &= &  p(-1)\\ & = & (1 -
(-1)^2 + (-1)^4)^{2003}\\ & = & 1. \end{array}$$
\item The required sum is $\dfrac{p(1)+p(-1)}{2} = 1$.
\item The required sum is $\dfrac{p(1)-p(-1)}{2} = 0$.
\end{dingautolist}
\end{answer}
\end{pro}
\begin{pro}
Let $f:\BBR \rightarrow \BBR$, be a function such that $\forall x\in
]0;+\infty[$,
$$[f(x^3 + 1)]^{\sqrt{x}} = 5,$$ find the value of
$$\left[f\left(\frac{27 + y^3}{y^3}\right)\right]^{\sqrt{\frac{27}{y}}}$$ for $y\in ]0;+\infty[$.
\begin{answer}We have
$$\begin{array}{lll}\left[f\left(\frac{27 + y^3}{y^3}\right)\right]^{\sqrt{\frac{27}{y}}}
 & = & \left[f\left( \left(\frac{3}{y}\right)^3 +
1\right)\right]^{3\sqrt{\frac{3}{y}}} \\ & = & \left(\left[f\left(
\left(\frac{3}{y}\right)^3 +
1\right)\right]^{\sqrt{\frac{3}{y}}}\right)^3 \\
& = &  5^3 \\
& = & 125.
\end{array}$$
\end{answer}
\end{pro}


\begin{pro}
Let $f$ satisfy $f(n + 1) = (-1)^{n + 1}n - 2f(n), n \geq 1$ If
$f(1) = f(1001)$ find $$f(1) + f(2) + f(3) + \cdots + f(1000).$$
\begin{answer} We have \\



\begin{tabular}{lllll}
$f(2)$ & = & $(-1)^21 - 2f(1)$ & = & $1 - 2f(1)$ \\
$f(3)$ & = & $(-1)^32 - 2f(2)$ & = & $-2 - 2f(2)$ \\
$f(4)$ & = & $(-1)^43 - 2f(3)$ & = & $3 - 2f(3)$ \\
$f(5)$ & = & $(-1)^54 - 2f(4)$ & = & $-4 - 2f(4)$ \\
$\vdots$ & $\vdots$ & $\vdots$ & $\vdots$ & $\vdots$ \\
$f(999)$ & = & $(-1)^{999}998 - 2f(998)$ & = & $-998 - 2f(998)$ \\
$f(1000)$ & = & $(-1)^{1000}999 - 2f(999)$ & = & $999 - 2f(999)$ \\
$f(1001)$ & = & $(-1)^{1001}1000 - 2f(1000)$ & = & $-1000 - 2f(1000)$ \\
\end{tabular}


\bigskip
Adding columnwise,
$$f(2) + f(3) + \cdots + f(1001) = 1 - 2 + 3 - \cdots + 999 - 1000 - 2(f(1) + f(2)
+ \cdot + f(1000)).$$ This gives
$$2f(1) + 3(f(2) + f(3) + \cdots + f(1000)) + f(1001) = - 500.$$
Since $f(1) = f(1001)$ we have $2f(1) + f(1001) = 3f(1).$
Therefore$$f(1) + f(2) + \cdots + f(1000)  = - \frac{500}{3}.$$
\end{answer}
\end{pro}
\begin{pro}
If $f(a)f(b) = f(a + b) \ \forall \ a, b \in \BBR$ and $f(x) > 0 \
\forall \ x \in \BBR$, find $f(0)$. Also, find $f(-a)$ and $f(2a)$
in terms of $f(a).$ \begin{answer} Set $a = b = 0.$ Then $(f(0))^2 =
f(0)f(0) = f(0 + 0) = f(0)$. This gives $f(0)^2 = f(0).$ Since $f(0)
> 0$ we can divide both sides of this equality to get $f(0) = 1$.

\bigskip

Further,  set $b = -a.$ Then $f(a)f(-a) = f(a - a) = f(0) = 1.$
Since $f(a) \neq 0,$ may divide by $f(a)$ to  obtain $f(-a) =
\dis{\frac{1}{f(a)}}$.


Finally taking $a = b$ we obtain $(f(a))^2 = f(a)f(a) = f(a + a) =
f(2a).$ Hence $f(2a) = (f(a))^2$
\end{answer}
\end{pro}

\begin{pro}
Prove that
$\fun{f}{x}{\dfrac{x-1}{x+1}}{\BBR\setminus\{-1\}}{\BBR\setminus\{1\}}$
is a bijection and find $f^{-1}$.
\begin{answer}
To prove that $f$ is injective, we prove that $f(a)=f(b)\implies
a=b$. We have
$$ \begin{array}{lll} f(a)=f(b) & \implies & \dfrac{a-1}{a+1} = \dfrac{b-1}{b+1} \\
&\implies & (a-1)(b+1) = (a+1)(b-1)\\
&\implies & ab+a-b-1 = ab-a+b-1\\
& \implies & 2a=2b\\
&\implies & a=b,\\
\end{array}$$whence $f$ is injective.
To prove that $f$ is surjective we must prove that  any $y \in
\BBR\setminus\{1\}$ has a pre-image $a\in \BBR\setminus \{-1\}$ such
that $f(a)=y$. That is, $$\dfrac{a-1}{a+1}=y\implies a-1=ya+y
\implies a-ya=1+y \implies a(1-y)=1+y \implies a=\dfrac{1+y}{1-y}.$$
Thus $f\left(\dfrac{1+y}{1-y}\right)=y$, and $f$ is surjective. This
also serves to prove that $f^{-1}(x)=\dfrac{1+x}{1-x}$.
\end{answer}
\end{pro}


\begin{pro}
Let $f^{[1]}(x) = f(x) = x + 1, f^{[n + 1]} = f\circ f^{[n]}, n \geq
1.$ Find a closed formula for $f^{[n]}$ \begin{answer} We have
$f^{[2]}(x) = f(x + 1) = (x + 1) + 1 = x + 2, f^{[3]}(x) = f(x + 2)
= (x + 2) + 1 = x + 3$ and so, recursively, $f^{[n]}(x) = x + n.$
\end{answer}
\end{pro}

\begin{pro}
Let $f, g:\lcrc{0}{1}\rightarrow\BBR$ be functions. Demonstrate that
there exist $(a, b)\in \lcrc{0}{1}^2$ such that $\dfrac{1}{4}\leq
\absval{f(a)+g(b)-ab}$.
\end{pro}
\begin{pro}
Demonstrate that there is no function $f:\BBR\setminus
\{1/2\}\rightarrow \BBR$ such that $$  x\in \BBR\setminus
\{1/2\}\implies
f(x)\left(f\left(\dfrac{x-1}{2x-1}\right)\right)=x^2+x+1$$
\end{pro}
\begin{pro}
Find all functions $f:\BBR\setminus \{-1,0\}\rightarrow \BBR$ such
that $$ x\in \BBR\setminus \{-1,0\}\implies
f(x)+f\left(\dfrac{-1}{x+1}\right)=3x+2.$$
\end{pro}
\begin{pro}
Let $f^{[1]}(x) = f(x) = 2x, f^{[n + 1]} = f\circ f^{[n]}, n \geq
1.$ Find a closed formula for $f^{[n]}$ \begin{answer} We have
$f^{[2]}(x) = f(2x) = 2^2x, f^{[3]}(x) = f(2^2x) = 2^3x$ and so,
recursively, $f^{[n]}(x) = 2^nx.$
\end{answer}
\end{pro}
\begin{pro}
Find all functions $g:\BBR\rightarrow \BBR$ that satisfy $g(x + y) +
g(x - y) = 2x^2 + 2y^2$. \begin{answer} Let $y = 0.$ Then $2g(x) =
2x^2$, that is, $g(x) = x^2.$ Let us check that $g(x) = x^2$ works.
We have
$$g(x + y) + g(x - y) = (x + y)^2 + (x - y)^2 = x^2 + 2xy + y^2 + x^2 - 2xy + y^2 = 2x^2 + 2y^2,$$
which is the functional equation given. Our choice of $g$ works.
\end{answer}
\end{pro}
\begin{pro}
Find all the functions $f:\BBR\rightarrow \BBR$ that satisfy $f(xy)
= yf(x)$.
\begin{answer} Let $x = 1$. Then $f(y) = yf(1)$. Since $f(1)$ is a
constant, we may let $k = f(1).$ So all the functions satisfying the
above equation satisfy $f(y) = ky.$
\end{answer}
\end{pro}
\begin{pro}
Find all functions $f:\BBR\setminus \{0\}\rightarrow \BBR$ for which
$$f(x) + 2f\left(\frac{1}{x}\right) = x.$$
\begin{answer} From $\dis{f(x) + 2f(\frac{1}{x}) = x}$ we obtain
$\dis{f(\frac{1}{x}) =  \frac{x}{2} - \frac{1}{2}f(x)}$. Also,
substituting $1/x$ for $x$ on the original equation we get
$$f(1/x) + 2f(x) = 1/x.$$Hence
$$f(x) = \frac{1}{2x} - \frac{1}{2}f(1/x) = \frac{1}{2x} - \frac{1}{2}\left(\frac{x}{2} - \frac{1}{2}f(x)\right),$$
which yields $\dis{f(x) = \frac{2}{3x} - \frac{x}{3}}$. \\
\end{answer}
\end{pro}

\begin{pro}Find all functions $f:\BBR\setminus\{-1\}\rightarrow
\BBR$ such that $$(f(x))^2\cdot f\left(\frac{1 - x}{1 + x}\right) =
64x.$$
\begin{answer}
 We have
$$(f(x))^2\cdot f\left(\frac{1 - x}{1 + x}\right) = 64x, $$whence
$$(f(x))^4\cdot \left(f\left(\frac{1 - x}{1 + x}\right))\right)^2 = 64^2x^2 \qquad
(I) $$
 Substitute $x$ by $\frac{1 - x}{1 + x}$. Then
$$f\left(\frac{1
- x}{1 + x}\right) ^2 f(x) = 64\left(\frac{1 - x}{1 + x}\right) .
\qquad (II)
$$
Divide (I) by (II),
$$ f(x)^3 = 64 x^2\left(\frac{1+x}{1-x}\right),  $$
from where the result follows.
\end{answer}
\end{pro}


\begin{pro}
Let $f^{[1]} = f$ be given by $\dis{f(x) = \frac{1}{1 - x}}$. Find \\
(i) $f^{[2]}(x) = (f\circ f)(x)$, \\
(ii) $f^{[3]}(x) = (f\circ f\circ f)(x)$, and \\
(iii) $f^{[69]} = \underbrace{(f\circ f\circ \cdots f\circ f)}_{69 \
{\rm compositions \ with \ itself}}(x)$.
\begin{answer} We have (i) $\dis{f^{[2]}(x) = (f\circ f)(x) = f(f(x)) = \frac{1}{1 - \frac{1}{1 - x}} = \frac{x - 1}{x}}$. \\
(ii) $\dis{f^{[3]}(x) = (f\circ f \circ f)(x) = f(f^{[2]}(x))) =
f\left(\frac{x - 1}{x}\right) = \frac{1}{1 - \frac{x - 1}{x}}
= x}$. \\
(iii) Notice that $f^{[4]}(x) = (f\circ f^{[3]})(x) = f(f^{[3]}(x))
= f(x) = f^{[1]}(x)$. We see that $f$ is cyclic of period 3, that
is, $f^{[1]} = f^{[14]} = f^{[7]} = \ldots, f^{[2]} = f^{[5]} =
f^{[8]} = \ldots, f^{[3]} = f^{[6]} = f^{[9]} = \ldots$. Hence
$f^{[69]}(x) = f^{[3]}(x) = x.$
\end{answer}
\end{pro}
\begin{pro}
Let $f:A\rightarrow B$ and $g:B\rightarrow C$ be functions. Shew
that (i) if $g\circ f$ is injective, then $f$ is injective. (ii)  if
$g\circ f$ is surjective, then $g$ is surjective.
\begin{answer}
To see (i) observe that
$$f(a) = f(b)\implies g(f(a))=g(f(b))\implies a=b, $$whence $f$ is injective. (The first
implication is clear, the second implication follows because $g\circ
f$ is injective.)

\bigskip

To see (ii), given $y\in C$, \quad $\exists x\in A$ such that
$g(f(x))=y$, since $g\circ f$ is surjective. But then, letting
$a=f(x)\in B$ we have $g(a)=y$ and $g$ is surjective.
\end{answer}
\end{pro}

\end{multicols}




\section{Countability}

\begin{df} A set $X$ is
countable if either it is finite or if there is a bijection $f : X
\rightarrow \BBN$, that is, the set $X$ has  as many elements as
$\BBN$.
\end{df}
Any countable set can be thus enumerated a sequence
$$ x_1, x_2, x_3, \ldots . $$
Thus the strictly positive integers can be enumerated as
customarily:
$$1,2,3,\ldots .  $$
Another possible enumeration\footnote{Which is relevant in chaos
theory, for {\em Sarkovkii's Theorem}.}is the following
$$ 3, 5, 7, 9, \ldots, \qquad  ,2\cdot 3, 2\cdot 5, 2\cdot 7,2\cdot 9, \ldots
\qquad  , 2^2\cdot 3, 2^2\cdot 5, 2^2\cdot 7,  2^2\cdot 9, \ldots
\qquad, \ldots 2^4, 2^3, 2^2, 2, 1,
$$that is, we start  with the odd integers in increasing order,
then $2$ times the odd integers, $2^2$ times the odd integers, etc.,
and at the end we put the powers of $2$ in decreasing order.
\begin{lem}\label{lem:subsets-of-N-countable-are}
Any subset $X \subseteqq \BBN$ is countable.
\end{lem}



\begin{pf}If $X$ is finite, then there is nothing to prove. If $X$
is infinite, we can arrange the elements of $X$ increasing order,
say,
$$ x_1< x_2< x_3< \cdots. $$
We then map the smallest element $x_1\in S$ to $1$, the next
smallest $x_2$ to $2$, etc.
\end{pf}
\begin{rem}
Hence, even though $2\BBN \subsetneqq \BBN$, the sets $2\BBN$ and
$\BBN$ have the same number of elements. This can also be seen by
noticing that $f:\BBN \rightarrow 2\BBN$ given by $x_n=2n$ is a
bijection.
\end{rem}
\begin{lem}
A set $X$ is countable if and only if there is an injection $f : X
\rightarrow \BBN$.
\end{lem}

\begin{pf}
The assertion is evident if $X$ is finite.  Hence assume $X$ is
infinite. If $f : X \rightarrow \BBN$ is an injection then $f(X)$ is
an infinite subset of $\BBN$. Hence there is a bijection $g : f(X)
\rightarrow \BBN$ by virtue of Lemma
\ref{lem:subsets-of-N-countable-are}. Thus $(g\circ f) : X
\rightarrow \BBN$ is a bijection.
\end{pf}

\begin{rem}
An obvious consequence of the above lemma is that if $X'$ is
countable and there is an injection $f : X \rightarrow  X'$ then $X$
is countable.
\end{rem}
\begin{thm}
$\BBZ$ is countable.
\end{thm}

\begin{pf}
One can take, as a bijection between the two sets, for example, $f :
\BBZ \rightarrow \BBN$,
$$
f(x)= \begin{cases}
2 x + 1 & \text{if $x \ge  0$} \\
- 2 x & \text{if $x < 0$.}
\end{cases}
$$
\end{pf}

\begin{thm}
$\BBQ$ is countable.
\end{thm}

\begin{pf}
Consider $f : \BBQ \rightarrow  \BBN$ given
$$
f \left(\frac{a}{b}\right)=  2^{|a|} 3^b 5^{1+\signum{a}},
$$
where $\dfrac{a}{b}$ is in least terms, and $b > 0$. By the
uniqueness of the prime factorisation of an integer, $f$ is an
injection.
\end{pf}
\begin{rem}
The above theorem means that there as many rational numbers as
natural numbers. Thus the rationals can be enumerated as
$$q_1, q_2, q_3, \ldots ,  $$
\end{rem}


\begin{thm}[Cantor's Diagonal Argument]
$\BBR$ is uncountable.
\end{thm}

\begin{pf}
Assume $\BBR$ were countable so that  its complete set of elements
may be enumerated, say, as in the list
\begin{align*}
r_1 &= n_1 . d_{11} d_{12} d_{13} \dots \\
r_2 &= n_2 . d_{21} d_{22} d_{13} \dots \\
r_3 &= n_3 . d_{31} d_{32} d_{33} \dots ,
\end{align*}
where we have used decimal notation.  Define the new real $r = 0.d_1
d_2 d_3 \dots$ by $d_i = 0$ if $d_{ii} \neq 0$ and $d_i = 1$ if
$d_{ii} = 0$.  This is real number (as it is a decimal), but it
differs from $r_i$ in the $i^{\text{th}}$ decimal place. It follows
that  the list is incomplete and the reals are uncountable.
\end{pf}
\begin{thm}The interval $\loro{-1}{1}$ is uncountable.
\end{thm}
\begin{pf}
Observe that the map $f:\loro{-1}{1}\rightarrow \BBR$ given by
$f(x)=\tan \dfrac{\pi x}{2}$ is a bijection.
\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Prove that there as many numbers in $[0;1]$ as in any interval
$[a;b]$ with $a<b$.
\begin{answer}
The map $f:[0;1]\rightarrow [a;b]$ \quad $f(x)=\dfrac{x-a}{b-a}$ is
a bijection.
\end{answer}
\end{pro}
\begin{pro}
Prove that there as many numbers in $\loro{-\infty}{+\infty}$ as in
$\loro{0}{+\infty}$.
\begin{answer}
The map $f:\loro{-\infty}{+\infty}\rightarrow \loro{0}{+\infty}$
\quad $f(x)=e^x$ is a bijection.
\end{answer}
\end{pro}

\end{multicols}




\section{Groups and Fields}
Here we observe the rules of the game for the operations of addition
and multiplication in $\BBR$.
\begin{df}
Let $S, T$ be sets. A {\em binary operation} is a function
$$\fun{\otimes}{(a, b)}{\otimes(a, b)}{S\times S}{T}.$$We usually
use the ``infix'' notation $a \otimes   b$ rather than the
``prefix'' notation $  \otimes (a, b)$. If $S = T$ then we say that
the binary operation is {\em internal} or {\em closed} and if $S
\neq T$ then we say that it is {\em external}. \end{df}
\begin{exa}
Ordinary addition is a closed binary operation on the sets $\BBN$,
$\BBZ$, $\BBQ$, $\BBR$. Ordinary subtraction is a binary operation
on these sets. It is not closed on $\BBN$, since for example
$1-2=-1\not\in\BBN$, but it is closed in the remaining sets.
\end{exa}
\begin{exa}
The operation $\otimes :\BBR \times \BBR \rightarrow \BBR$ given by
$a\otimes b = 1+a\cdot b$, where $\cdot$ is the ordinary
multiplication of real numbers is commutative but not associative.
To see commutativity we have $$a\otimes b = 1+ab = 1+ba = b\otimes
a.
$$Now, $$1\otimes (1\otimes 2) = 1\otimes (1+1\cdot 2) =1\otimes (3) = 1+1\cdot 3 = 4,
\qquad \mathrm{but}\qquad  (1\otimes 1)\otimes 2 = (1+1\cdot
1)\otimes 2 = 2\otimes 2 = 1 + 2\cdot 2 = 5,
$$so the operation is not associative.
\end{exa}

\begin{df}
Let $G$ be a non-empty set and $ \otimes $ be a binary operation on
$G\times G$. Then $\magma{G}{ \otimes }$ is called a {\em group} if
the following axioms hold:
\begin{enumerate}
\item[{\bf G1:}] $ \otimes $ is closed, that is, $$\forall (a,
b)\in G^2, \ \ \ a \otimes  b \in G,$$ \item[{\bf G2:}] $ \otimes $
is associative, that is, $$ \forall(a, b, c)\in G^3, \ \ \ a \otimes
(b \otimes  c) = (a \otimes  b)  \otimes c,$$ \item[{\bf G3:}] $G$
has an identity element, that is
$$\exists e\in G  \ {\rm such\ that\ } \forall a\in G, \ \ e  \otimes a = a
 \otimes e = a,$$ \item[{\bf G4:}] Every element of $G$ is invertible, that
is
$$\forall a\in G, \ \ \ \exists a^{-1}\in G \ {\rm such\ that\ }
a  \otimes a^{-1} = a^{-1} \otimes  a = e.$$
\end{enumerate}
\end{df}
\begin{rem}
From now on, we drop the sign $\otimes$ and rather use juxtaposition
for the underlying binary operation in a given group. Thus we will
say a ``group $G$'' rather than the more precise ``a group
$\magma{G}{\otimes}$.''
\end{rem}
\begin{df}
A group $G$ is {\em abelian} if its binary operation is commutative,
that is, $\forall (a, b)\in G^2, a\otimes b = b\otimes a$.
\end{df}
\begin{exa}
$\magma{\BBZ}{+}$, $\magma{\BBQ}{+}$, $\magma{\BBR}{+}$,
$\magma{\BBC}{+}$ are all abelian groups under addition. The
identity element is $0$ and the inverse of $a$ is $-a$.
\end{exa}
\begin{exa}
$\magma{\BBQ\setminus \{0\}}{\cdot}$, $\magma{\BBR\setminus
\{0\}}{\cdot}$, $\magma{\BBC\setminus \{0\}}{\cdot}$ are all abelian
groups under multiplication. The identity element is $1$ and the
inverse of $a$ is $\dis{\frac{1}{a}}$.
\end{exa}
\begin{exa}
$\magma{\BBZ\setminus \{0\}}{\cdot}$ is not a group. For example the
element $2$ does not have a multiplicative inverse.
\end{exa}

\begin{exa}
Let $V_4 = \{e, a, b, c\}$ and define $ \otimes $ by the table
below.
$$\begin{array}{|c|c|c|c|c|}
\hline  \otimes  & e & a & b & c \\
\hline e & e & a & b & c \\
\hline a & a & e & c & b \\
\hline b & b & c & e & a \\
\hline c & c & b & a & e \\
\hline
 \end{array}$$

It is an easy exercise to check that $V_4$ is an abelian group,
called the {\em Klein Viergruppe}.
\end{exa}

\begin{thm}
Let $G$ be a group. Then
\begin{enumerate}
\item There is only one identity element, the identity element is
unique. \item The inverse of each element is unique. \item
$\forall(a, b)\in G^2$ we have $$(a b)^{-1} = b^{-1} a^{-1}.$$
\end{enumerate}
\end{thm}
\begin{pf}

\begin{enumerate}
\item Let $e$ and $e'$ be identity elements. Since $e$ is an
identity, $e  = e e'$. Since $e'$ is an identity, $e'  = e e'$. This
gives $e  = e e' = e'$. \item Let $b$ and $b'$ be inverses of $a$.
Then $e = a b$ and $b'  a = e$. This gives
$$b = e b = (b' a)  b = b'  (a b) = b' e = b'.$$
\item We have
$$(a b)(b^{-1} a^{-1}) = a  (b b^{-1}) a^{-1} =
a  (e) a^{-1} = a  a^{-1} = e.$$Thus $b^{-1} a^{-1}$ works as a
right inverse for $a b$. A similar calculation shews also that it
works as a left inverse. Since inverses are unique, we must have
$$(a b)^{-1} = b^{-1} a^{-1}.$$
\end{enumerate}This completes the proof.
\end{pf}
\begin{df}
Let $n\in\BBZ$ and let $G$ be a group. If $a\in G$, we define
$$a^0 = e,$$
$$a^{|n|} = \underbrace{a \cdot a  \cdots  a}_{|n| \ a{\rm 's}},$$and
$$a^{-|n|} = \underbrace{a^{-1}\cdot a^{-1}  \cdots  a^{-1}}_{|n| \ a^{-1}{\rm 's}}.$$
\end{df}
\begin{rem}
If $(m, n)\in\BBZ^2$, then by associativity $$(a^n) (a^m) = (a^m)
(a^n) = a^{m + n}.$$
\end{rem}


\begin{df}
Let $F$ be a set having at least two elements $0_F$ and $1_F$ ($0_F
\neq 1_F$) together with two binary operations $\cdot$ (field
multiplication) and $+$ (field addition). A {\em field}
$\field{F}{\cdot}{+}$ is a triplet such that $\magma{F}{+}$ is an
abelian group with identity $0_F$ , $\magma{F\setminus
\{0_F\}}{\cdot}$ is an abelian group with identity $1_F$ and the
operations $\cdot$ and $+$ satisfy
$$a \cdot (b+ c) = (a\cdot b) + (a\cdot c),$$that
is, field multiplication distributes over field addition.
\end{df}
\begin{rem}
We will continue our practice of denoting multiplication by
juxtaposition, hence the $\cdot$ sign will be dropped.
\end{rem}
\begin{exa}
$\field{\BBQ}{\cdot}{+}$, $\field{\BBR}{\cdot}{+}$, and
$\field{\BBC}{\cdot}{+}$ are all fields. The multiplicative identity
in each case is $1$ and the additive identity is $0$.
\end{exa}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Is the set of real irrational numbers closed under addition? Under
multiplication?
\begin{answer}
Both answers are ``no.'' If $a=-b=\sqrt{2}$, which we will prove
later on to be irrational, we have $a+b=0$, rational, and $ab=-2$,
also rational.
\end{answer}
\end{pro}
\begin{pro}
Let $$S = \{x\in\BBZ : \exists (a, b)\in \BBZ^2, x= a^3 + b^3 + c^3
-3abc \}.$$ Prove that $S$ is closed under multiplication, that is,
if $x\in S$ and $y\in S$ then $xy \in S$.
\begin{answer}
Let $\omega = -\frac{1}{2} + i\frac{\sqrt{3}}{2}$. Then $\omega^2 +
\omega + 1 = 0$ and $\omega^3=1$. Then $$x= a^3 + b^3 + c^3 -3abc =
(a +b+c)(a + \omega b + \omega^2 c)(a + \omega^2b + c\omega), $$ $$y
= u^3 + v^3 + w^3 - 3uvw = (u + v + w)(u + \omega v + \omega^2 w)(u
+ \omega^2v+ \omega w).$$ Then
$$(a+b+c)(u+v+w) = au + av + aw + bu + bv + bw + cu + cv + cw,
  $$
  $$\begin{array}{lll}(a + \omega b + \omega^2 c)(u + \omega v + \omega^2 w) & = &
   au + bw + cv \\
 & & \qquad +\omega (av + bu + cw) \\
  & & \qquad +\omega^2 (aw+bv+cu), \\
  \end{array}$$
and
$$\begin{array}{lll}(a + \omega^2 b + \omega c)(u + \omega^2 v + \omega w) &  = &
au + bw + cv \\
& & \qquad +\omega (aw + bv + cu) \\
& & \qquad +\omega^2 (av + bu + cw). \\
\end{array}$$
This proves that $$\begin{array}{lll}xy & = &(au + bw + cv)^3 + (aw
+ bv + cu)^3 + (av + bu + cw)^3\\ & &  -3(au + bw + cv)(aw +
bv + cu)(av + bu + cw), \\
\end{array}
$$which proves that $S$ is closed under multiplication.
\end{answer}
\end{pro}
\begin{pro}[Putnam, 1971] Let $S$ be a set and let $\circ$ be a
binary operation on $S$ satisfying the two laws
$$(\forall x\in S) (x \circ x = x),$$and
$$(\forall (x, y, z) \in S^3) ((x\circ y)\circ z = (y \circ z)\circ x). $$
Shew that $\circ$ is commutative. \begin{answer} We have
$$\begin{array}{lll}
x\circ y & = & (x\circ y)\circ (x\circ y) \\
& = & [y\circ (x \circ y)]\circ x \\
& = & [(x \circ y)\circ x] \circ y \\
& = & [(y \circ x)\circ x] \circ y \\
& = & [(x \circ x)\circ y] \circ y \\
& = & (y \circ y) \circ (x\circ x) \\
& = & y\circ x, \\
\end{array}$$proving commutativity.
\end{answer}
\end{pro}
\begin{pro}[Putnam, 1972]
Let ${\mathscr S}$ be a set and let $*$ be a binary operation of
${\mathscr S}$ satisfying the laws $\forall (x, y)\in {\mathscr
S}^2$
\begin{equation}x*(x*y) = y,\label{eq:law_1}\end{equation}
\begin{equation}(y*x)*x = y.\label{eq:law_2}\end{equation} Shew that $*$ is commutative, but
not necessarily associative.
\begin{answer} By (\ref{eq:law_2})
$$x*y = ((x*y)*x)*x.$$
By (\ref{eq:law_2}) again
$$((x*y)*x)*x = ((x*y)*((x*y)*y))*x.$$
By (\ref{eq:law_1})
$$((x*y)*((x*y)*y))*x = (y)*x = y*x,$$
which is what we wanted to prove.
\bigskip

  To shew that the operation is not necessarily associative,
  specialise
  ${\mathscr S} = \BBZ$ and $x*y = -x-y$ (the opposite of $x$ minus $y$).
  Then clearly in this case $*$ is commutative, and satisfies (\ref{eq:law_1}) and (\ref{eq:law_2}) but
  $$0*(0*1) = 0*(-0-1) = 0*(-1) = -0-(-1) = 1,$$ and
  $$(0*0)*1 = (-0-0)*1 = (0)*1 = -0 - 1 = -1,$$
evincing that the operation is not associative.
\end{answer}
\end{pro}
\begin{pro}
On $\BBQ \cap ]-1;1[$ define the  binary operation $ \otimes $ by
$$a \otimes  b = \frac{a + b}{1 + ab},$$where juxtaposition means ordinary
multiplication and $+$ is the ordinary addition of real numbers.
Prove that \mbox{$\magma{\BBQ \cap ]-1;1[}{ \otimes }$} is an
abelian group by following these steps.
\begin{enumerate}
\item Prove that $ \otimes $ is a closed binary operation on $\BBQ
\cap ]-1;1[$. \\

 \item Prove that $ \otimes $ is both commutative and
associative. \\


\item Find an element $e\in\BBQ \cap ]-1;1[$ such that $(\forall a
\in \BBQ \cap ]-1;1[)\ (e \otimes  a =
a)$. \\


\item Given $e$ as above and an arbitrary element $a\in \BBQ \cap
]-1;1[$, solve the equation $a \otimes  b = e$ for
$b$. \\

\end{enumerate}
\begin{answer} \begin{enumerate}  \item Clearly, if $a, b$ are rational
numbers,
$$|a| < 1, |b|<1 \implies |ab| < 1 \implies -1 < ab < 1 \implies 1 +
ab > 0,$$whence the denominator never vanishes and since sums,
multiplications and divisions of rational numbers are rational,
$\dfrac{a + b}{1 + ab}$ is also rational. We must prove now that $-1
< \dfrac{a + b}{1 + ab} < 1$ for $(a, b)\in ]-1; 1[^2$. We have
$$\begin{array}{lll} -1 < \dfrac{a + b}{1 + ab} <  1 & \Leftrightarrow &
-1 - ab < a + b < 1 + ab \\
 & \Leftrightarrow & -1 -ab - a - b < 0 < 1 + ab - a - b \\
& \Leftrightarrow & -(a + 1)(b + 1) < 0 < (a - 1)(b - 1).
\end{array}$$
Since $(a, b)\in ]-1; 1[^2$, $(a + 1)(b + 1) > 0$ and so $-(a + 1)(b
+ 1) < 0$ giving the sinistral inequality. Similarly $a - 1 < 0$ and
$b - 1 < 0$ give $(a - 1)(b - 1) > 0$, the dextral inequality. Since
the steps are reversible, we have established that indeed $-1 <
\dfrac{a + b}{1 + ab} <  1$. \item Since $a \otimes b = \dfrac{a +
b}{1 + ab} = \dfrac{b + a}{1 + ba} = b \otimes  a$, commutativity
follows trivially. Now
$$\begin{array}{ccc}a  \otimes (b \otimes  c) &  = &
 a \left(\frac{b + c}{1 + bc}\right) \\ & = &
 \dfrac{a + \left(\dfrac{b + c}{1 + bc}\right)}{1 + a\left(\dfrac{b + c}{1 +
 bc}\right)}\\
& = & \dfrac{a(1 + bc) + b + c}{1 + bc + a(b + c)} = \dfrac{a + b +
c + abc}{1 + ab + bc + ca}. \end{array}$$One the other hand,
$$\begin{array}{ccc} (a \otimes  b)  \otimes c &  = &  \left(\dfrac{a + b}{1 + ab}\right) c  \\
& = & \dfrac{\left(\dfrac{a + b}{1 + ab}\right) + c}{1 +
\left(\dfrac{a + b}{1 + ab}\right)c} \\ & = & \dfrac{(a + b)+ c(1 +
ab)}{1 + ab + (a + b)c}\\ &  = &  \dfrac{a + b + c + abc}{1 + ab +
bc + ca},\end{array}$$ whence $ \otimes $ is associative. \item If
$a \otimes e = a$ then $\dfrac{a + e}{1 + ae} = a$, which gives $a +
e = a + ea^2$ or $e(a^2 - 1) = 0$. Since $a \neq \pm 1$, we must
have $e = 0$. \item If $a  \otimes b = 0$, then $\dfrac{a + b}{1 +
ab} = 0$, which means that $b = -a$, that is, $a^{-1} = -a$.
\end{enumerate}
\end{answer}
\end{pro}
\begin{pro}
Let $G$ be a group satisfying $(\forall a\in G)$$$a^2 = e.$$ Prove
that $G$ is an abelian group.

\begin{answer} We must shew that $\forall (a, b)\in G^2$ we have $a b = b a.$
But
$$\begin{array}{lll}
a b & = & e  (a  b)  e \\
& = & (b^2)  (a  b)   (a^2) \\
& = & b  ((b  a ) (b   a)) a \\
& = & b  (b a)^2  a \\
& = & b  (e)  a \\
& = & b  a,
\end{array}$$whence the result follows.
\end{answer}
\end{pro}
\begin{pro}
Let $G$ be a group where  $(\forall (a, b)\in G^2)$
$$((a b)^3 = a^3 b^3) \quad \mathrm{and} \quad ((a b)^5 =
a^5 b^5).$$ Shew that $G$ is abelian. \begin{answer} We have
$$\begin{array}{lll}(a b)^3 = a^3 b^3 & \implies   &
 a  b (a b) a b = a (a^2  b^2)  b \\
 & \implies   & b  a  b  a = a^2  b^2 \\
 & \implies   & (b  a)^2 = a^2  b^2. \end{array}$$
 Similarly
 $$\begin{array}{lll}(a b)^5 = a^5 b^5 & \implies   &
(b a)^4 = a^4  b^4.
 \end{array}$$
But we also have
$$ (b a)^4\ = ((b a)^2)^2 = (a^2  b^2)^2 = a^2  (b^2 a^2) b^2,$$
and so
$$a^2  (b^2 a^2) b^2 = (b a)^4 = a^4  b^4 \implies   b^2  a^2 = a^2  b^2.$$
We have shewn that $\forall (a, b)\in G^2$
$$((b  a)^2 = a^2  b^2) \quad \mathrm{and} \quad (b^2  a^2 = a^2
b^2).$$Hence
$$\begin{array}{lll}
(b  a)^2 = a^2  b^2 = b^2  a^2 & \implies   &
b a  b  a = b^2  a^2 \\
& \implies   & a b = b  a, \\
\end{array}$$
proving that the group is abelian.
\end{answer}
\end{pro}
\begin{pro}
Suppose that in  a group $G$ there exists a pair $(a, b)\in G^2$
satisfying
$$(a b)^k = a^k  b^k$$ for three consecutive
integers $k = i, i + 1, i + 2.$ Prove that $a b = b  a$.
\begin{answer} Since
$$(a b)^{i + 2} = \underbrace{(a b) (a b) \cdots
 (a b)}_{ i + 2 \ \ {\rm times}} = a
(b a)^{i + 1} b,
$$ multiplying by $a^{-1}$ on the left and by $b^{-1}$ on the
right the equality
\begin{equation}(a b)^{i + 2} = a^{i + 2}  b^{i + 2}
\label{eq:group_5}
\end{equation}we obtain
\begin{equation}(b  a)^{i + 1} = (a)^{i + 1} (b)^{i + 1}.
\label{eq:group_6}
\end{equation}By hypothesis
\begin{equation}(a  b)^{i + 1} = (a)^{i + 1} (b)^{i + 1}.
\label{eq:group_7} \end{equation} Hence (\ref{eq:group_6}) and
(\ref{eq:group_7}) yield
\begin{equation}(a  b)^{i + 1} = (b a)^{i + 1}.
\label{eq:group_8}
\end{equation}
Similarly, from (\ref{eq:group_7}) we obtain
\begin{equation}(a  b)^{i} = (b a)^{i},
\label{eq:group_9}
\end{equation}
from which
\begin{equation}(a  b)^{-i} = (b a)^{-i}.
\label{eq:group_10}
\end{equation}
Multiplying (\ref{eq:group_8}) and (\ref{eq:group_10}) together, we
deduce
$$a b = b a,$$which is what we wanted to shew.
\end{answer}
\end{pro}
\end{multicols}
\section{Addition and Multiplication in $\BBR$}
Since $\BBR$ is a field, it satisfies the following list of axioms,
which we list  for future reference.
\begin{axi}[Arithmetical Axioms of $\BBR$] $\field{\BBR}{\cdot}{+}$---that is, the set of real
numbers endowed with multiplication $\cdot$ and addition $+$---is a
field. This entails that $+$ and $\cdot$ verify the following
properties.
\begin{enumerate}
\item[{\bf R1:}]\label{axi:r1} $+$ and $\cdot$ are closed binary operations, that is, $$\forall (a,
b)\in \BBR^2, \ \ \ a +  b \in \BBR, \quad \ \ \ a\cdot b \in
\BBR,$$
\item[{\bf R2:}]\label{axi:r2}  $+$  and $\cdot$ are associative binary operations, that is, $$ \forall(a,
b, c)\in \BBR^3, \quad a +(b +  c) = (a +  b)+ c, \quad a \cdot (b
\cdot c) = (a \cdot  b)\cdot c$$
\item[{\bf R3:}]\label{axi:r3}  $+$  and $\cdot$ are commutative binary operations, that is, $$ \forall(a,
b)\in \BBR^2, \quad a +b = b+a, \quad a \cdot b = b \cdot a,$$
\item[{\bf R4:}]\label{axi:r4}  $\BBR$ has an additive identity element $0$, and a multiplicative identity element $1$, with $0\neq 1$, such that
$$\forall a\in \BBR, \quad 0 +a = a
 +0 = a,\quad 1\cdot a = a\cdot 1 = a,$$ \item[{\bf R5:}]\label{axi:r5}  Every element of $\BBR$ has an additive inverse, and every element of $\BBR\setminus \{0\}$ has a multiplicative
 inverse, that is,
$$\forall a\in \BBR, \quad  \exists (-a)\in \BBR \ {\rm such\ that\ }
a  + (-a) = (-a) +  a = 0,$$
$$\forall b\in \BBR\setminus \{0\}, \quad  \exists b^{-1}\in \BBR\setminus \{0\} \ {\rm such\ that\ }
b\cdot b^{-1} = b^{-1}\cdot b = 1,$$
\item[{\bf R6:}]\label{axi:r6} $+$ and $\cdot$ satisfy the following distributive law:
$$\forall (a, b, c,)\in \BBR ^3, \quad  a\cdot (b+c) = a\cdot b + a\cdot c.$$
\end{enumerate}
\end{axi}

Since $+$ and $\cdot$ are associative in $\BBR$, we may write a sum
$a_1+a_2+\cdots + a_n$ or a product $a_1a_2\cdots a_n$ of real
numbers without risking ambiguity. We often use the following
shortcut notation.
\begin{df}
For real numbers $a_i$ we define $$a_1+a_2+\cdots + a_n=\sum _{k=1}
^n a_k \qquad \mathrm{and}\qquad a_1a_2\cdots a_n= \prod _{k=1} ^n
a_k.$$
\end{df}
\begin{rem}
By convention $\sum _{k\in\varnothing} a_k=0$ and  $\prod
_{k\in\varnothing} a_k=1$ .
\end{rem}
\begin{thm}[Lagrange's Identity]\label{thm:lagranges-id} Let $a_k,
b_k$ be real numbers. Then $$ \left(\sum _{k=1} ^na_kb_k\right)^2
=\left(\sum _{k=1} ^na_k ^2\right)\left(\sum _{k=1} ^nb_k
^2\right)-\sum _{1\leq k <j\leq n}(a_kb_j-a_jb_k)^2.$$
\end{thm}
\begin{pf}
For $j=k$, $a_kb_j-a_jb_k=0$, so we may relax the inequality in the
last sum.  We have
$$\begin{array}{lll}\sum _{1\leq k <j\leq n}(a_kb_j-a_jb_k)^2 & = &
\sum _{1\leq k \leq j\leq n}(a_k ^2b_j ^2
-2a_kb_ka_jb_j+a_j ^2b_k ^2)\\
& = & \sum _{1\leq k \leq j\leq n}a_k ^2b_j ^2
-2\sum _{1\leq k \leq j\leq n} a_kb_ka_jb_j+\sum _{1\leq k \leq j\leq n} a_j ^2b_k ^2\\
& = & \sum _{k=1} ^n\sum _{j=1} ^n a_k ^2 b_j ^2-\left( \sum _{k=1}
^na_kb_k\right)^2,\end{array}  $$proving the theorem.
\end{pf}
Recall that the factorial symbol $!$ is defined by
$$ 0!=1; \qquad k! = k(k-1)!\quad \mathrm{if}\quad k\geq 1. $$
\begin{df}[Binomial Coefficients] Let $n\in \BBN$
We define $\binom{n}{0}=1=\binom{n}{n}$ and for $1\leq k \leq n$,
$$ \binom{n}{k} = \dfrac{n!}{k!(n-k)!}. $$If $k>n$ we take $
\binom{n}{k}=0$.
\end{df}
\begin{lem}[Pascal's Identity]\label{lem:pascal-id} For $n\geq 1$ and
$1\leq k \leq n$,
$$\binom{n}{k} =  \binom{n-1}{k}+\binom{n-1}{k-1}. $$
\end{lem}
\begin{pf}
We have $$\begin{array}{lll}\binom{n-1}{k}+\binom{n-1}{k-1} & = &
\dfrac{(n-1)!}{k!(n-1-k)!}+ \dfrac{(n-1)!}{(k-1)!(n-k)!}\\
& = &
\dfrac{(n-1)!}{(k-1)!(n-1-k)}\left(\dfrac{1}{k}+\dfrac{1}{n-k}\right)\\
&= &\dfrac{(n-1)!}{(k-1)!(n-1-k)}\left(\dfrac{n}{k(n-k)}\right)\\
& = &\dfrac{n!}{k!(n-k)!} = \binom{n}{k}.\end{array}
$$
\end{pf}
Using Pascal's Identity we obtain {\em Pascal's Triangle.}
\renewcommand{\arraystretch}{1.2}
$$\begin{array}{ccccccccccc}
 & & & & & \binom{0}{0} & & & & & \\
 & & & &  \binom{1}{0} & &  \binom{1}{1} & & & &  \\
 & & &  \binom{2}{0} & &  \binom{2}{1} & &  \binom{2}{2} & & &  \\
 & &  \binom{3}{0} & &  \binom{3}{1} & &  \binom{3}{2} & &  \binom{3}{3} & & \\
 &  \binom{4}{0} & &  \binom{4}{1} & &  \binom{4}{2} & &  \binom{4}{3} & &  \binom{4}{4} &  \\
  \binom{5}{0} & &  \binom{5}{1} & &  \binom{5}{2} & &  \binom{5}{3} & &  \binom{5}{4} & &  \binom{5}{5}  \\
    \vdots & &  \vdots& &  \vdots & &  \vdots & &  \vdots & &  \vdots  \\
\end{array}$$




When the numerical values are substituted, the triangle then looks
like this.
$$
\begin{array}{ccccccccccc}
 & & & & & 1 & & & & &  \\
 & & & & 1 & & 1 & & & &  \\
 & & & 1 & & 2 & & 1 & & & \\
 & & 1 & & 3 & & 3 & & 1 & &  \\
 & 1 & & 4 & & 6 & & 4 & & 1 &  \\
 1 & & 5 & & 10 & & 10 & & 5 & & 1  \\
   \vdots & &  \vdots& &  \vdots & &  \vdots & &  \vdots & &  \vdots  \\
\end{array}$$





We see from Pascal's Triangle that binomial coefficients are
symmetric.  This symmetry is easily justified by the identity
$\binom{n}{k} = \binom{n}{n - k}$.  We also notice that the binomial
coefficients tend to increase until they reach the middle, and that
then they decrease symmetrically.


\begin{thm}[Binomial Theorem] For $n \in\BBN,$
$$(x + y)^n = \sum _{k = 0} ^n \binom{n}{k}x^ky^{n-k} .$$
\end{thm}
\begin{pf}
The theorem is obvious for $n=0$ (defining $(x+y)^0=1$),   $n=1$ (as
$(x+y)^1=x+y$), and $n=2$ (as $(x+y)^2=x^2+2xy+y^2$). Assume $n\geq
3$. The induction hypothesis is that $(x + y)^n = \sum _{k = 0} ^n
\binom{n}{k}x^ky^{n-k} .$ Then we have
$$\begin{array}{lll}(x+y)^{n+1} & = & (x+y)(x+y)^n\\
& = & (x+y)\left(\sum _{k = 0} ^n \binom{n}{k}x^ky^{n-k}\right)\\
& = & \sum _{k = 0} ^n \binom{n}{k}x^{k+1}y^{n-k}+ \sum _{k = 0} ^n
\binom{n}{k}x^ky^{n-k+1}\\
& = & x^{n+1} +\sum _{k = 0} ^{n-1} \binom{n}{k}x^{k+1}y^{n-k} +
\sum _{k = 1} ^n \binom{n}{k}x^ky^{n-k+1} + y^{n+1}\\
& = & x^{n+1} +\sum _{k = 1} ^{n} \binom{n}{k-1}x^{k}y^{n-k+1} +
\sum _{k = 1} ^n \binom{n}{k}x^ky^{n-k+1} + y^{n+1}\\
& = & x^{n+1} +\sum _{k = 1} ^{n}
\left(\binom{n}{k-1}+\binom{n}{k}\right)x^{k}y^{n-k+1}+ y^{n+1}\\
& = & x^{n+1} +\sum _{k = 1} ^{n}
\binom{n+1}{k}x^{k}y^{n-k+1}+ y^{n+1}\\
& = & \sum _{k = 0} ^{n+1} \binom{n+1}{k}x^{k}y^{n-k+1},\\
\end{array}$$proving the theorem.
\end{pf}
\begin{lem}\label{lem:finite-geom-sum}
 If $a\in\BBR$, $a\neq 1$ and $n\in\BBN\setminus \{0\}$, then
$$1 + a + a^2 + \cdots a^{n-1} = \dfrac{1 - a^{n}}{1 -a}.   $$
\end{lem}
\begin{pf}
For, put $S = 1 + a + a^2 + \cdots + a^{n-1}.$ Then $aS = a + a^2 +
\cdots + a^{n-1} + a^n.$ Thus $$S - aS = (1 + a + a^2 + \cdots
+a^{n-1}) - (a + a^2 + \cdots + a^{n-1} + a^n) = 1 - a^n,$$ and from
$(1-a)S = S-aS=1 - a^n $ we obtain the result.
\end{pf}
\begin{thm}
Let $n$ be a strictly positive integer. Then
$$y^n - x^n = (y - x)(y^{n-1} + y^{n-2}x + \cdots + yx^{n-2} + x^{n-1}).$$ \label{thm:diffbinom}\end{thm}
\begin{pf} By making the
substitution $a = \frac{x}{y}$ in Lemma \ref{lem:finite-geom-sum} we
see that
$$1 + \frac{x}{y} + \left(\frac{x}{y}\right)^2 + \cdots +
\left(\frac{x}{y}\right)^{n-1} = \dfrac{1-
\left(\frac{x}{y}\right)^n}{1-\frac{x}{y}}   $$ we obtain
$$\left(1-\frac{x}{y}\right)\left(1 + \frac{x}{y} +
\left(\frac{x}{y}\right)^2 + \cdots +
\left(\frac{x}{y}\right)^{n-1}\right) = 1-
\left(\frac{x}{y}\right)^n,   $$ or equivalently,
$$\left(1-\frac{x}{y}\right)\left(1 + \frac{x}{y} +
\frac{x^2}{y^2} + \cdots +\frac{x^{n-1}}{y^{n-1}}\right) = 1-
\frac{x^n}{y^n}.   $$ Multiplying by $y^n$ both sides,
$$y\left(1-\frac{x}{y}\right) y^{n-1}\left(1 + \frac{x}{y} +
\frac{x^2}{y^2} + \cdots +\frac{x^{n-1}}{y^{n-1}}\right) =
y^n\left(1- \frac{x^n}{y^n}\right),   $$ which is
$$y^n - x^n = (y-x)(y^{n-1} + y^{n-2}x + \cdots + yx^{n-2} + x^{n-1}),   $$
yielding the result.
\end{pf}
\begin{thm}\label{thm:sum-of-first-n-integers}
 $1 + 2 + \cdots + n= \dfrac{n(n+1)}{2}$.
\end{thm}
\begin{f-pf} Observe that
$$ k^2 - (k - 1)^2 = 2k - 1.$$
From this
$$
\begin{array}{lcl}
1^2 - 0^2 & = & 2\cdot 1 - 1 \\
2^2 - 1^2 & = & 2\cdot 2 - 1 \\
3^2 - 2^2 & = & 2\cdot 3 - 1 \\

\vdots & \vdots & \vdots \\
n^2 - (n - 1)^2 & = & 2\cdot n - 1
\end{array}
$$
Adding both columns,
$$n^2 - 0^2 = 2(1 + 2 + 3 + \cdots  + n) - n.$$
Solving for the sum,
$$1 + 2 + 3 + \cdots + n =  n^2/2 +  {n}/{2} = \frac{n(n + 1)}{2}.$$

\end{f-pf}
\begin{s-pf}
We may utilise Gauss' trick:  If
$$ A_n =  1 + 2 + 3 + \cdots + n $$
 then$$ A_n =  n + (n - 1) +  \cdots + 1.$$Adding these two quantities,
$$ \begin{array}{lcccccccc} A_n & = & 1 & + &  2 &  + &  \cdots & + & n \\
{A_n} & {=} & n & + & (n - 1) & + & \cdots & + & 1 \\
\hline
2A_n & =  & (n + 1) & + & (n + 1) & + & \cdots & + & (n + 1) \\
  & = & n(n + 1), & & & & & \end{array}$$since there are $n$ summands. This
  gives $A_n = \dis{\frac{n(n + 1)}{2}}$, that is,
$$ 1 + 2 + \cdots + n = \frac{n(n + 1)}{2}.$$Applying
Gauss's trick to the general arithmetic sum
$$ (a) + (a + d) + (a + 2d) + \cdots + (a + (n - 1)d) $$we obtain
\begin{equation}
(a) + (a + d) + (a + 2d) + \cdots + (a + (n - 1)d) = \frac{n(2a + (n
- 1)d)}{2}
\end{equation}
\end{s-pf}
\begin{thm}\label{thm:sum-of-first-squares}
$1^2 + 2^2 + 3^2 + \cdots + n^2=\frac{n(n + 1)(2n + 1)}{6}.$
\end{thm}\begin{pf} Observe that
$$ k^3 - (k - 1)^3 = 3k^2 - 3k + 1.$$
Hence
$$
\begin{array}{lcl}
1^3 - 0^3 & = & 3\cdot 1^2 - 3\cdot 1 + 1 \\
2^3 - 1^3 & = & 3\cdot 2^2 - 3\cdot 2 + 1 \\
3^3 - 2^3 & = & 3\cdot 3^2 - 3\cdot 3 + 1 \\
\vdots & \vdots & \vdots \\
n^3 - (n - 1)^3 & = & 3\cdot n^2 - 3\cdot n + 1
\end{array}
$$
Adding both columns,
$$n^3 - 0^3 = 3(1^2 + 2^2 + 3^2 + \cdots  + n^2) - 3(1 + 2 + 3 + \cdots + n) + n.$$
From the preceding example $1 + 2 + 3 + \cdots + n = \cdot n^2/2 +
{n}/{2} = \frac{n(n + 1)}{2}$ so

$$n^3 - 0^3 = 3(1^2 + 2^2 + 3^2 + \cdots  + n^2) - \frac{3}{2}\cdot n(n + 1) + n.$$
Solving for the sum,
$$1^2 + 2^2 + 3^2 + \cdots + n^2 = \frac{n^3}{3}  + \frac{1}{2}\cdot n(n + 1) - \frac{n}{3}.$$
After simplifying we obtain
$$
1^2 + 2^2 + 3^2 + \cdots + n^2 = \frac{n(n + 1)(2n + 1)}{6}.$$
\end{pf}


\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Prove that for $n\geq 1$, $$2^n = \sum _{k=0} ^n \binom{n}{k};
\qquad 0 = \sum _{k=0} ^n (-1)^k\binom{n}{k}, \qquad 2^{n-1} = \sum
_{\substack{0 \leq k \leq n\\ k \ \mathrm{even}}} \binom{n}{k} =
\sum _{\substack{1 \leq k \leq n \\ k \ \mathrm{odd}}} \binom{n}{k}.
$$
\begin{answer}
The first two follow immediately from the Binomial Theorem, the
first by putting $x=y=1$ and then $x=-y=1$. The third follows by
adding the first two and dividing by $2$. The fourth follows by
subtracting the second from the first and then dividing by $2$.
\end{answer}
\end{pro}
\begin{pro} Given that $1002004008016032$ has a
prime factor $p > 250000,$ find it. \begin{answer} If $a = 10^3 , b
= 2$ then
$$ 1002004008016032 = a^5 + a^4 b + a^3 b^2 + a^2 b^3 + ab^4 + b^5 =
\frac{ a^6 - b^6 }{ a - b }. $$ This last expression factorises as
$$ {\everymath{\displaystyle}
\begin{array}{lcl}\frac{a^6 -
b^6}{a - b} & = & (a + b)(a^2 + ab + b^2)(a^2 - ab + b^2 ) \\
&  = & 1002\cdot 1002004\cdot 998004 \\
& = & 4\cdot 4\cdot 1002\cdot 250501 \cdot k,\end{array} }$$where $k
< 250000$. Therefore $p = 250501$.
\end{answer}
\end{pro}


\begin{pro}
Prove that $(a+b+c)^2 = a^2+b^2+c^2+2ab+2bc+2ca$.
\end{pro}
\begin{pro}
Let $a, b,c$ be real numbers. Prove that
$$a^3+b^3+c^3-3abc=(a+b+c)(a^2+b^2+c^2-ab-bc-ca).$$
\begin{answer}
From the Binomial Theorem,
$$ (A+B)^3 = A^3+3A^2B+3AB^2+B^3\implies A^3+B^3=(A+B)^3-3AB(A+B). $$Then
$$
\begin{array}{lll}
a^3 + b^3 + c^3 - 3abc & = & (a + b)^3 + c^3 - 3ab(a + b) - 3abc \\
& = & (a + b + c)^3 - 3(a + b)c(a + b + c) - 3ab(a + b + c) \\
& = & (a + b + c)((a + b + c)^2 - 3ac - 3bc - 3ab) \\
& = & (a + b + c)(a^2 + b^2 + c^2 - ab - bc - ca).
\end{array}
$$
\end{answer}
\end{pro}
\begin{pro}Prove that
$$ \binom{n}{k} = \dfrac{n}{k}\binom{n-1}{k-1}.$$
\begin{answer}
$$ \binom{n}{k} = \dfrac{n!}{k!(n-k)!} = \dfrac{n}{k}\cdot \dfrac{(n-1)!}{(k-1)!(n-k)!}  = \dfrac{n}{k}\binom{n-1}{k-1}.$$
\end{answer}
\end{pro}
\begin{pro}Prove that
$$ \binom{n}{k} = \dfrac{n}{k}\cdot \dfrac{n-1}{k-1} \cdot\binom{n-2}{k-2}.$$
\begin{answer}
$$ \binom{n}{k} = \dfrac{n!}{k!(n-k)!} = \dfrac{n(n-1)}{k(k-1)}\cdot \dfrac{(n-2)!}{(k-2)!(n-k)!}  = \dfrac{n}{k}\cdot \dfrac{n-1}{k-1} \cdot\binom{n-2}{k-2}.$$
\end{answer}
\end{pro}
\begin{pro}Prove that
$$\sum _{k = 1} ^n k\binom{n}{k}p^{k}(1 - p)^{n - k} = np.  $$
\begin{answer}
We use the identity $k\binom{n}{k} = n\binom{n - 1}{k - 1}$. Then
$$\begin{array}{lll}\sum _{k = 1} ^n k\binom{n}{k}p^{k}(1 - p)^{n - k} & = &\sum _{k = 1} ^n  n\binom{n - 1}{k - 1}p^{k}(1 - p)^{n - k}   \\
& = &    \sum _{k = 0} ^{n-1}  n\binom{n - 1}{k}p^{k+1}(1 -
p)^{n-1 - k}                     \\
& = &    np\sum _{k = 0} ^{n-1}  \binom{n - 1}{k}p^{k}(1 -
p)^{n-1 - k} \\
& = & np(p + 1-p)^{n - 1} \\

& = & np.  \end{array}$$
\end{answer}
\end{pro}
\begin{pro}Prove that
$$\sum _{k = 2} ^n k(k-1)\binom{n}{k}p^{k}(1 - p)^{n - k} = n(n - 1)p^2.  $$
\begin{answer}
We use the identity
$$k(k-1)\binom{n}{k} = n(n-1)\binom{n - 2}{k - 2}.$$ Then
$$\begin{array}{lll}\sum _{k = 2} ^n k(k-1)\binom{n}{k}p^{k}(1 - p)^{n - k} & = &\sum _{k = 2} ^n  n(n-1)\binom{n - 2}{k - 2}p^{k}(1 - p)^{n - k}   \\
& = &    \sum _{k = 0} ^{n-2}  n(n-1)\binom{n - 2}{k}p^{k+2}(1 -
p)^{n-1 - k}                     \\
& = &    n(n-1)p^2\sum _{k = 0} ^{n-2}  \binom{n - 1}{k}p^{k}(1 -
p)^{n-2 - k} \\
& = & n(n-1)p^2(p + 1-p)^{n - 2} \\
& = & n(n-1)p^2.  \end{array}$$
\end{answer}
\end{pro}

\begin{pro}Demonstrate that
$$\sum _{k = 0} ^n (k - np)^2\binom{n}{k}p^{k}(1 - p)^{n - k} = np(1- p).  $$
\begin{answer}
We use the identity $$(k - np)^2 = k^2 -2knp + n^2p^2 = k(k-1) +
k(1-2np) + n^2p^2.$$ Then
$$\begin{array}{lll}
\sum _{k = 0} ^n (k - np)^2\binom{n}{k}p^{k}(1 - p)^{n - k} & = &
\sum _{k = 0} ^n (k(k-1) + k(1-2np)\\ & & \quad  +
n^2p^2)\binom{n}{k}p^{k}(1 -
p)^{n - k} \\
& = & \sum _{k = 0} ^n k(k-1)\binom{n}{k}p^{k}(1 - p)^{n - k}\\ &
&  + (1 - 2np)\sum _{k = 0} ^n k\binom{n}{k}p^{k}(1 - p)^{n - k}\\
& & + n^2p^2\sum _{k = 0} ^n \binom{n}{k}p^{k}(1 -
p)^{n - k} \\
& = & n(n-1)p^2 + np(1 -2np) + n^2p^2\\
& = & np(1-p).
\end{array}$$
\end{answer}
\end{pro}
\begin{pro}
Let $x\in\BBR\setminus\{1\}$ and let $n\in\BBN\setminus\{0\}$. Prove
that
$$ \sum _{k=0} ^n \dfrac{2^k}{x^{2^k}+1}=\dfrac{1}{x-1}-\dfrac{2^{n+1}}{x^{2^{n+1}}+1}. $$
\end{pro}
\begin{pro}
Consider the $n^k$ $k$-tuples $(a_1, a_2, \ldots , a_k)$ which can
be formed by taking $a_i\in\{1,2,\ldots , n\}$, repetitions allowed.
Demonstrate that
$$ \sum _{a_i\in \{1,2,\ldots , n\}} \min (a_1, a_2, \ldots ,a_k) = 1^k + 2^k + \cdots + n^k. $$
\begin{answer}
Observe that the number of $k$-tuples with $\min ((a_1, a_2, \ldots
, a_k))=t$ is $(n-t+1)^k-(n-t)^k$.
\end{answer}
\end{pro}

\end{multicols}

\section{Order Axioms}
\begin{rem}
\fcolorbox{red}{cyan}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Vocabulary Alert!}} We will
    call a number $x$ {\em positive} if $x\geq 0$ and {\em strictly
    positive} if $x>0$. Similarly, we will
    call a number $y$ {\em negative} if $y\leq 0$ and {\em strictly
    negative} if $y<0$. This usage differs from most Anglo-American
    books, who prefer such terms as {\em non-negative} and {\em
    non-positive}.
\end{minipage}}
\end{rem}
We assume $\BBR$ endowed with a relation $>$ which satisfies the
following axioms.
\begin{axi}[Trichotomy Law]\label{axi:trichotomy} $\forall (x,y)\in\BBR
^2$ exactly one of the following holds:
$$ x> y, \quad x=y, \quad \mathrm{or}\quad y>x.$$
\end{axi}
\begin{axi}[Transitivity of Order]\label{axi:transitivity} $\forall (x,y, z)\in\BBR
^3$,
$$\mathrm{if}\quad  x> y \quad \mathrm{and}\quad  y>z \quad \mathrm{then}\quad x>z.$$
\end{axi}
\begin{axi}[Preservation of Inequalities by Addition]\label{axi:preservation-of-ineqs-by-addition} $\forall (x,y, z)\in\BBR
^3$,
$$\mathrm{if} \quad  x> y \quad \mathrm{then}\quad x+z>y+z.$$
\end{axi}
\begin{axi}[Preservation of Inequalities by Positive Factors]\label{axi:preservation-of-ineqs} $\forall (x,y, z)\in\BBR
^3$,
$$\mathrm{if} \quad  x> y \quad \mathrm{and}\quad  z>0 \quad \mathrm{then}\quad xz>yz.$$
\end{axi}
\begin{rem}
$x<y$ means that $y>x$.  $x\leq y$ means that either $y>x$ or $y=x$,
etc.
\end{rem}

\begin{thm}\label{thm:square-of-a-real}
The square of any real number is positive, that is, $\forall a\in
\BBR$,  $\qquad a^2 \geq 0$. In fact, if $a\neq 0$ then $a^2>0$.
\end{thm}
\begin{pf}
If $a=0$, then $0^2= 0$ and there is nothing to prove. Assume now
that $a\neq 0$. By trichotomy, either $a>0$ or $a<0$. Assume first
that $a>0$. Applying Axiom \ref{axi:preservation-of-ineqs} with
$x=z=a$ and $y=0$ we have $$aa>a0\implies a^2>0,$$so the theorem is
proved if $a>0$.

\bigskip

If $a<0$ then $-a>0$ and we apply the result just obtained: $$ -a>0
\implies (-a)^2>0 \implies 1\cdot a^2>0 \implies a^2>0,
$$so the result is true regardless the sign of $a$.
\end{pf}
Theorem \ref{thm:square-of-a-real} will prove to be extremely
powerful and will be the basis for many of the classical
inequalities that follow.

\begin{thm}If $(x, y)\in\BBR^2$,
$$ x>y \iff x-y > 0. $$
\end{thm}
\begin{pf}
This is a direct consequence of Axiom
\ref{axi:preservation-of-ineqs-by-addition} upon taking $z=-y$.
\end{pf}
\begin{thm}If $(x, y, a, b)\in\BBR^4$,
$$ x>y \quad \mathrm{and} \quad a \geq b \implies  x+a > y + b. $$
\end{thm}
\begin{pf}
We have $$x>y \implies x+a>y+a, \quad y+a\geq y+b,  $$by Axiom
\ref{axi:preservation-of-ineqs-by-addition} and so by Axiom
\ref{axi:transitivity} $x+a>y+b$.
\end{pf}
\begin{thm}If $(x, y, a, b)\in\BBR^4$,
$$ x>y >0\quad \mathrm{and} \quad a \geq b>0 \implies  xa > yb. $$
\end{thm}
\begin{pf}
Indeed $$x>y \implies xa>ya, \quad ya\geq yb,  $$by Axiom
\ref{axi:preservation-of-ineqs} and so by Axiom
\ref{axi:transitivity} $xa>yb$.
\end{pf}
\begin{thm}\label{thm:0<1} $1>0$.
\end{thm}
\begin{pf}
By definition of $\BBR$ being a field $0\neq 1$. Assume that  $1<0$
then $1^2>0$ by Theorem \ref{thm:square-of-a-real}. But $1^2 = 1$
and so $1>0$, a contradiction to our original assumption.
\end{pf}
\begin{thm}
$x>0 \implies -x<0\quad \mathrm{and}\quad x^{-1}>0$.
\end{thm}
\begin{pf}
Indeed, $-1<0$ since $-1\neq 0$ and assuming $-1>0$ would give
$0=-1+1>1$, which contradicts Theorem \ref{thm:0<1}. Thus $$
-x=-1\cdot x < 0.
$$Similarly, assuming $x^{-1}<0$ would give $1=x^{-1}x<0$.
\end{pf}
\begin{thm}
$x>1 \implies x^{-1}<1$.
\end{thm}
\begin{pf}
Since $x^{-1}\neq 1$, assuming $x^{-1}>1$ would give
$1=xx^{-1}>1\cdot 1=1$, a contradiction.
\end{pf}
\subsection{Absolute Value}
\begin{df}[The Signum (Sign) Function] Let $x$ be a real number. We
define $ \signum{x}= \left\{
\begin{tabular}{cl}
$-1$       & \rm{if} \ $x < 0$, \\
$0$ & \rm{if} \ $x = 0$,\\
$+1$       & \rm{if} \ $x > 0$. \\
\end{tabular}
\right. $
\end{df}
\begin{lem}\label{lem:signum-is-multiplicative}
The signum function is multiplicative, that is, if $(x,y)\in\BBR^2$
then $\signum{x\cdot y}=\signum{x}\signum{y}$.
\end{lem}
\begin{pf}
Immediate from the definition of signum.
\end{pf}
\begin{df}[Absolute Value] Let $x\in\BBR$. The {\em absolute value}
of $x$ is defined and denoted by $$ \absval{x}=\signum{x}x. $$

\end{df}
\begin{thm}\label{thm:absval-properties}
Let $x\in\BBR$. Then
\begin{enumerate}
\item  $ |x| =
\left\{
\begin{array}{ll}
-x       & \mathrm{if} \ x < 0, \\
x & \mathrm{if} \ x \geq 0.
\end{array}
\right.$
\item $\absval{x}\geq 0$,
\item $\absval{x}=\max (x,-x)$,

\item $\absval{-x} = \absval{x}$,

\item \label{eq:abs_val_interval} $-\absval{x} \leq x \leq \absval{x}$.
\item $\sqrt{x^2} = |x| $
\item $|x|^2 = |x^2| = x^2$
\item $x = \signum{x}\absval{x}$
\end{enumerate}
\end{thm}
\begin{pf} These are immediate from the definition of $\absval{x}$.
\end{pf}
\begin{thm}
$(\forall (x, y)\in \BBR^2)$,
$$\absval{xy} =\absval{x}\absval{y}. $$
\end{thm}
\begin{pf}
We have $$\absval{xy} = \signum{xy}xy = \left(\signum{x}x\right)
\left(\signum{y}y\right) = \absval{x}\absval{y},  $$where we have
used  Lemma \ref{lem:signum-is-multiplicative}.
\end{pf}
\begin{thm}\label{thm:|x|within_t}Let $t\geq 0$. Then
$$|x| \leq t \iff -t \leq x \leq t.  $$
\end{thm}
\begin{pf}
Either $\absval{x} =x$ or $\absval{x}=-x$. If   $\absval{x} =x$,
$$\absval{x}\leq t \iff x\leq t \iff -t \leq 0\leq x \leq t.   $$
If   $\absval{x} =-x$,
$$\absval{x}\leq t \iff -x\leq t \iff -t \leq x \leq 0 \leq t.   $$
\end{pf}


\begin{thm}If $(x,y)\in\BBR^2$,
$\max (x, y) = \dfrac{x+y+\absval{x-y}}{2}$ and $\min (x, y) =
\dfrac{x+y-\absval{x-y}}{2}$.
\end{thm}
\begin{pf}Observe that $\max (x, y) + \min (x, y) = x+y$, since one
of these quantities must be the maximum and the other the minimum,
or else, they are both equal.

\bigskip

Now, either $\absval{x-y} = x-y$, and so $x\geq y$, meaning that
$\max (x, y)-\min (x, y) = x-y$, or $\absval{x-y} = -(x-y)=y-x$,
which means that $y\geq x$ and so $\max (x, y)-\min (x, y) = y-x$.
In either case we get $\max(x,y)-\min (x, y)=\absval{x-y}$. Solving
now the system of equations $$\begin{array}{lll}\max (x, y) + \min
(x, y) & = & x+y\\  \max(x,y)-\min (x, y) & = &\absval{x-y},\\
\end{array}
$$for $\max(x,y)$ and $\min (x,y)$ gives the result.
\end{pf}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Let $x, y$ be real numbers. Then $$ 0 \leq  x < y \iff x^2 < y^2. $$
\end{pro}
\begin{pro}Let $t \geq 0$. Prove that
$$|x| \geq t \iff (x \geq t) \quad \mathrm{or}\quad (x\leq -t).  $$
\end{pro}
\begin{pro}
Let $(x, y)\in \BBR^2$. Prove that $\max (x, y)=-\min (-x, -y)$.
\end{pro}

\begin{pro}
Let $x, y , z$ be real numbers. Prove that
$$
\max (x, y, z)   =   x + y + z - \min (x, y) - \min (y, z)  - \min
(z, x) + \min (x, y , z). $$
\end{pro}

\begin{pro}
Let $a<b$. Demonstrate that $$\absval{x-a}<\absval{x-b} \iff
x<\dfrac{a+b}{2}.
$$
\end{pro}
\end{multicols}

\section{Classical Inequalities}
\subsection{Triangle Inequality}
\begin{thm}[Triangle Inequality] Let $(a, b)\in \BBR^2$. Then
\begin{equation}\fcolorbox{blue}{white}{ $|a + b| \leq |a| + |b|$.}\end{equation}
\label{tri_ineq}
\end{thm}
\begin{pf}
From \ref{eq:abs_val_interval} in Theorem
\ref{thm:absval-properties}, by addition,
$$-|a| \leq a \leq |a| $$to$$-|b| \leq b \leq |b| $$we obtain
$$-(|a| + |b| ) \leq a + b \leq (|a| + |b|),$$whence the theorem follows by applying Theorem \ref{thm:|x|within_t}.
\end{pf}
By induction, we obtain the following generalisation to $n$ terms.
\begin{cor}\label{cor:triangle-ineq}
Let $x_1, x_2, \ldots , x_n$ be real numbers. Then $$ \absval{x_1 +
x_2 + \cdots + x_n}\leq \absval{x_1}+\absval{x_2}+\cdots +
\absval{x_n}.
$$
\end{cor}
\begin{pf}
We apply Theorem \ref{tri_ineq} $n-1$ times
$$ \begin{array}{lll} \absval{x_1 +
x_2 + \cdots + x_n}& \leq & \absval{x_1} +\absval{ x_2 + \cdots
x_{n-1}+ x_n}\\
& \leq & \absval{x_1} +\absval{x_2} +\absval{ x_3 + \cdots
x_{n-1}+ x_n}\\
& \vdots & \\
& \leq & \absval{x_1}+\absval{x_2}+\cdots + \absval{x_{n-1}+x_n}\\ &
\leq & \absval{x_1}+\absval{x_2}+\cdots +
\absval{x_{n-1}}+\absval{x_n}.
\end{array}$$


\end{pf}
\begin{cor}
 Let $(a, b)\in \BBR^2$. Then
\begin{equation} \fcolorbox{blue}{white}{$ \absval{|a| - |b|}  \leq \absval{a - b}$}.\end{equation}
\label{tri_ineq_2}
\end{cor}
\begin{pf}
We have $$|a| = |a - b + b| \leq |a - b| + |b|,$$giving
$$|a| - |b| \leq |a - b|.$$Similarly,
$$|b| = |b - a + a| \leq |b - a| + |a| = |a - b| + |a|,$$gives
$$|b| - |a| \leq |a - b|\implies -\absval{a-b} \leq \absval{a}-\absval{b}.$$Thus
$$-\absval{a-b} \leq \absval{a}-\absval{b}  \leq  \absval{a-b},  $$
and we now apply Theorem \ref{thm:|x|within_t}.
\end{pf}

\begin{thm}\label{thm:min-max-fractions}
Let $b_i > 0$ for $1\leq i \leq n$. Then
$$\min \left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n} \right) \leq \dfrac{a_1+a_2+\cdots + a_n}{b_1+b_2+\cdots + b_n}\leq \max \left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n} \right) . $$
\end{thm}
\begin{pf}
For every $k$, \quad  $1\leq k \leq n$,
$$\min \left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n} \right) \leq \dfrac{a_k}{b_k}
\leq \max \left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots ,
\dfrac{a_n}{b_n} \right) \implies b_k\min
\left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n}
\right) \leq a_k \leq b_k\max
\left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n}
\right). $$Adding all these inequalities for $1\leq k\leq n$,
$$(b_1+b_2+\cdots + b_n)\min
\left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n}
\right) \leq a_1+a_2+\cdots + a_n \leq (b_1+b_2+\cdots + b_n)\max
\left(\dfrac{a_1}{b_1},\dfrac{a_2}{b_2},\ldots , \dfrac{a_n}{b_n}
\right), $$from where the result is obtained.\end{pf}

\subsection{Bernoulli's Inequality}
\begin{thm}
If $0 \leq a < b, \ n\geq 1 \in \BBN $
$$na^{n - 1} < \frac{b^{n} - a^{n}}{b - a} < nb^{n - 1}.$$
\label{thm:ineq_bin}\end{thm}
\begin{pf}
By Theorem \ref{thm:diffbinom},
$$\begin{array}{lll}
\dfrac{b^{n} - a^{n}}{b - a}  & = & b^{n - 1} + b^{n - 2}a + b^{n -
3}a^2 + \cdots + b^2a^{n - 3} + ba^{n - 2} + a^{n - 1} \\
& < & b^{n - 1} + b^{n - 1} + \cdots + b^{n - 1} + b^{n - 1} \\
& = & nb^{n - 1},
\end{array} $$from where the dextral inequality follows. The
sinistral inequality can be established similarly.
\end{pf}
\begin{thm}[Bernoulli's Inequality] If $x > -1, x  \neq 0,$ and if
$n \in\BBN\setminus \{0\}$ then
$$(1 + x)^n > 1 + nx.$$
\label{thm:bernoulli}
\end{thm}
\begin{pf}
Set $b = 1 + x, a = 1$ in Theorem \ref{thm:ineq_bin} and use the
sinistral inequality.
\end{pf}
\begin{rem}
If $x> 0$ then Bernoulli's Inequality is an easy consequence of the
Binomial Theorem, as
$$(1+x)^n = 1 + \binom{n}{1}x+ \binom{n}{2}x^2+ \cdots > 1 + \binom{n}{1}x = 1+nx. $$
\end{rem}

\subsection{Rearrangement Inequality}
\begin{df}
Given a set of real numbers $\{x_1, x_2, \ldots , x_n\}$ denote by
$$\check{x}_1\geq  \check{x}_2\geq  \cdots \geq \check{x}_n$$ the
decreasing rearrangement of the $x_i$ and denote by
$$\hat{x}_1\leq  \hat{x}_2\leq  \cdots \leq \hat{x}_n$$the
increasing rearrangement of the $x_i$.
\end{df}
\begin{df}
Given two sequences  of real numbers $\{x_1, x_2, \ldots , x_n\}$
and $\{y_1, y_2, \ldots , y_n\}$ of the same length $n$, we say that
they are {\em similarly sorted} if they are both increasing or both
decreasing, and {\em differently sorted} if one is increasing and
the other decreasing..
\end{df}
\begin{exa}
The sequences $1 \leq 2 \leq \cdots \leq n$ and $1^2 \leq 2^2 \leq
\cdots \leq n^2$ are similarly sorted, and the sequences
$\dfrac{1}{1^2} \geq \dfrac{1}{2^2} \geq \cdots \geq \dfrac{1}{n^2}$
and $1^3 \leq 2^3 \leq \cdots \leq n^3$ are differently sorted.
\end{exa}
\begin{thm}[Rearrangement Inequality]\label{thm:rearrangement-ineq}
Given sets of real numbers $\{a_1, a_2, \ldots , a_n\}$ and $\{b_1,
b_2, \ldots , b_n\}$ we have $$ \sum _{1\leq k\leq
n}\check{a}_k\hat{b}_k \leq \sum _{1\leq k \leq n} a_kb_k \leq \sum
_{1\leq k \leq n}\hat{a}_k\hat{b}_k.
$$Thus the sum $ \sum _{1\leq k \leq n} a_kb_k $ is minimised when
the sequences are differently sorted, and maximised when the
sequences are similarly sorted.
\end{thm}
\begin{rem}
Observe that $$\sum _{1\leq k \leq n}\check{a}_k\hat{b}_k = \sum
_{1\leq k \leq n}\hat{a}_k\check{b}_k\qquad\mathrm{and}\qquad \sum
_{1\leq k \leq n}\hat{a}_k\hat{b}_k = \sum _{1\leq k \leq
n}\check{a}_k\check{b}_k.
$$
\end{rem}
\begin{pf}
Let $\{\sigma (1),\sigma(2), \ldots , \sigma(n)\}$ be a reordering
of $\{1,2, \ldots , n\}$. If there are two sub-indices   $i, j$,
such that the sequences pull in opposite directions, say, $a_i
>a_j$ and $b_{\sigma (i)}< b_{\sigma (j)}$, then consider the sums
$$ \begin{array}{lll}S & = & a_1b_{\sigma (1)}+a_2b_{\sigma (2)}+\cdots +a_ib_{\sigma (i)}+\cdots +a_jb_{\sigma (j)}+\cdots + a_nb_{\sigma (n)}\\
S' & = & a_1b_{\sigma (1)}+a_2b_{\sigma (2)}+\cdots +a_ib_{\sigma (j)}+\cdots +a_jb_{\sigma (i)}+\cdots + a_nb_{\sigma (n)}\\
 \end{array}$$
 Then $$ S'-S = (a_i-a_j)(b_{\sigma(j)}-b_{\sigma (i)}) >0. $$This
 last inequality shews that  the closer the $a$'s and the $b$'s are to pulling in the same direction the larger the sum
 becomes. This proves the result.
\end{pf}
\subsection{Arithmetic Mean-Geometric Mean Inequality}
\begin{thm}[Arithmetic Mean-Geometric Mean Inequality]\label{thm:AMGM-ineq}
Let $a_{1}, \dots, a_{n}$ be positive real numbers. Then their
geometric mean is at most their arithmetic mean, that is,
    $$
   \sqrt[n]{a_{1}\cdots a_{n}} \leq  \dfrac{a_{1} + \cdots + a_{n}}{n},
    $$
    with equality if and only if $a_{1} = \cdots = a_{n}$.
\end{thm}
We will provide multiple proofs of this important inequality. Some
other proofs will be found in latter chapters.
\begin{f-pf}
Our first  proof uses the Rearrangement Inequality (Theorem
\ref{thm:rearrangement-ineq}) in a rather clever way. We may assume
that the $a_k$ are strictly positive. Put
$$x_1=\dfrac{a_1}{(a_1a_2\cdots a_n)^{1/n}}, \quad x_2=\dfrac{a_1a_2}{(a_1a_2\cdots
a_n)^{2/n}},\quad  \ldots ,\quad  x_n=\dfrac{a_1a_2\cdots
a_n}{(a_1a_2\cdots a_n)^{n/n}}=1,
$$
and$$ y_1 = \dfrac{1}{x_1}, \quad y_2 = \dfrac{1}{x_2},\quad \ldots
,\quad y_n = \dfrac{1}{x_n}=1.
$$
Observe that for $2 \leq k \leq n$, $$x_ky_{k-1} =
\dfrac{a_1a_2\cdots a_k}{(a_1a_2\cdots a_n)^{k/n}}\cdot
\dfrac{(a_1a_2\cdots a_n)^{(k-1)/n}} {a_1a_2\cdots a_{k-1}} =
\dfrac{a_k}{(a_1a_2\cdots a_n)^{1/n}}.$$

The $x_k$ and $y_k$ are differently sorted, so by virtue of the
Rearrangement Inequality we gather
$$\begin{array}{lll} 1+1+\cdots + 1 & = & x_1y_1+x_2y_2+\cdots + x_ny_n \\
& \leq  & x_1y_n+x_2y_{1}+\cdots + x_ny_{n-1} \\
& = & \dfrac{a_1}{(a_1a_2\cdots a_n)^{1/n}}+
\dfrac{a_2}{(a_1a_2\cdots a_n)^{1/n}} + \cdots +
\dfrac{a_n}{(a_1a_2\cdots a_n)^{1/n}},
 \end{array}$$
 or $$ n \leq \dfrac{a_1+a_2+\cdots + a_n}{(a_1a_2\cdots a_n)^{1/n}},
 $$from where we obtain the result.
\end{f-pf}
\begin{s-pf}
This second proof is a clever induction argument due to Cauchy. It
proves the inequality first for powers of $2$ and then interpolates
for numbers between consecutive powers of $2$.

\bigskip
 Since the square of a
real number is always positive, we have, for positive real numbers
$a, b$
$$ (\sqrt{a}-\sqrt{b})^2 \geq 0 \implies \sqrt{ab} \leq \dfrac{a+b}{2},
$$proving the inequality for $k=2$. Observe that equality happens if and only if $a=b$. Assume now that the inequality
is valid for $k=2^{n-1}>2$. This means that for any positive real
numbers $x_1, x_2, \ldots , x_{2^{n-1}} $ we have
\begin{equation}\label{eq:amgm-n-1}
\left(x_1x_2\cdots x_{2^{n-1}}\right)^{1/2^{n-1}} \leq
\dfrac{x_1+x_2+\cdots +x_{2^{n-1}}}{2^{n-1}}.
\end{equation}
Let us prove the inequality for $2k=2^n$. Consider any any positive
real numbers $y_1, y_2, \ldots , y_{2^{n}}$. Notice that there are
$2^n-2^{n-1} = 2^{n-1}(2-1) = 2^{n-1}$ integers in the interval
$\lcrc{2^{n-1}+1}{2^n}$. We have
$$\begin{array}{lll}\left(y_1y_2\cdots y_{2^{n}}\right)^{1/2^{n}} & = &
\sqrt{\left(y_1y_2\cdots
y_{2^{n-1}}\right)^{1/2^{n-1}}\left(y_{2^{n-1}+1}\cdots
y_{2^{n}}\right)^{1/2^{n-1}}} \\
& \leq & \dfrac{\left(y_1y_2\cdots
y_{2^{n-1}}\right)^{1/2^{n-1}}+\left(y_{2^{n-1}+1}\cdots
y_{2^{n}}\right)^{1/2^{n-1}}}{2}\\
& \leq & \dfrac{\dfrac{y_1+y_2+\cdots
+y_{2^{n-1}}}{2^{n-1}}+\dfrac{y_{2^{n-1}+1}+\cdots
+y_{2^{n}}}{2^{n-1}}}{2}\\
& = & \dfrac{y_1 + \cdots + y_{2^n}}{2^n},
\end{array}$$
where the first inequality follows by the Case $n=2$ and the second
by the induction hypothesis (\ref{eq:amgm-n-1}). The theorem is thus
proved for powers of $2$.

\bigskip

Assume now that $2^{n-1}<k<2^n$, and consider the $k$ positive real
numbers $a_1, a_2, \ldots , a_k$. The trick is to pad this
collection of real numbers up to the next highest power of $2$, the
added real numbers being the average of the existing ones. Hence
consider the $2^n$ real numbers
$$a_1, a_2, \ldots , a_k, a_{k+1}, \ldots , a_{2^n}  $$
with $a_{k+1}= \ldots = a_{2^n}=\dfrac{a_1+ a_2+ \cdots + a_k}{k}$.
Since we have already proved the theorem for $2^n$ we have
$$ \left(a_1a_2\cdots a_k\left(\dfrac{a_1+ a_2+ \cdots + a_k}{k}\right)^{2^n-k}\right)^{1/2^n} \leq
\dfrac{a_1+ a_2+ \cdots + a_k+(2^n-k)\left(\dfrac{a_1+ a_2+ \cdots +
a_k}{k}\right)}{2^n},$$whence $$\left(a_1a_2\cdots
a_k\right)^{1/2^n}\left(\dfrac{a_1+ a_2+ \cdots +
a_k}{k}\right)^{1-k/2^n} \leq \dfrac{k\dfrac{a_1+ a_2+ \cdots +
a_k}{k}+(2^n-k)\left(\dfrac{a_1+ a_2+ \cdots + a_k}{k}\right)}{2^n},
$$which implies
$$\left(a_1a_2\cdots
a_k\right)^{1/2^n}\left(\dfrac{a_1+ a_2+ \cdots +
a_k}{k}\right)^{1-k/2^n} \leq \left(\dfrac{a_1+ a_2+ \cdots +
a_k}{k}\right),
$$


Solving for $\dfrac{a_1+ a_2+ \cdots + a_k}{k}$ gives the desired
inequality.
\end{s-pf}
\begin{t-pf}
As in the second proof, the Case $k=2$ is easily established. Put $$
A_k = \dfrac{a_1+a_2+\cdots + a_k}{k}, \qquad G_k =
\left(a_1a_2\cdots a_k\right)^{1/k}.$$Observe that
$$ a_{k+1} = (k+1)A_{k+1}-kA_k. $$
The inductive hypothesis is that $A_k \geq G_k$ and we must shew
that $A_{k+1}\geq G_{k+1}$. Put
$$A= \dfrac{a_{k+1}+(k-1)A_{k+1}}{k}, \qquad G=\left(a_{k+1}A_{k+1} ^{k-1}\right)^{1/k}.
$$By the inductive hypothesis $A \geq G$.
Now, $$ \dfrac{A+A_k}{2} =
\dfrac{\dfrac{(k+1)A_{k+1}-kA_k+(k-1)A_{k+1}}{k}+A_k}{2}=A_{k+1}.
$$Hence
$$\begin{array}{lll} A_{k+1} & = & \dfrac{A+A_k}{2}\\
& \geq & \left(AA_k\right)^{1/2} \\
& \geq & \left(GG_k\right)^{1/2}. \\
& = & \left(G_{k+1} ^{k+1}A_{k+1} ^{k-1}\right)^{1/2k}\\
\end{array}$$ We have established that $$A_{k+1} \geq \left(G_{k+1} ^{k+1}A_{k+1} ^{k-1}\right)^{1/2k}\implies A_{k+1}\geq G_{k+1},
$$completing the induction.
\end{t-pf}
\begin{fo-pf}
We will make a series of substitutions that preserve the sum
$$a_1+a_2+\cdots +a_n$$while strictly increasing the product $$a_1a_2\cdots a_n. $$ At the
end, the $a_i$ will all be equal and the arithmetic mean $A$ of the
numbers will be equal to their geometric mean $G$. If the $a_i$
where all $>A$ then $\dfrac{a_1+a_2+\cdots
+a_n}{n}>\dfrac{nA}{n}=A$, impossible. Similarly, the $a_i$ cannot
be all $<A$. Hence there must exist two indices say $i, j$, such
that $a_i < A < a_j$. Put $a_i '=A$, $a_j'=a_i+a_j-A$. Observe that
$a_i+a_j=a_i'+a_j'$, so replacing the original $a$'s with the primed
$a$'s does not alter the arithmetic mean. On the other hand,
$$
 a_i'a_j'=   A\left(a_i + a_j - A\right) = a_ia_j + \left(a_j - A\right)\left(A - a_i\right) > a_ia_j
$$since $a_{j}-A>0$ and $A-a_i>0$.

\bigskip
This change has replaced one of the $a$'s by a quantity equal to the
arithmetic mean, has not changed the arithmetic mean, and made the
geometric mean larger. Since there at most $n$ $a$'s to be replaced,
the procedure must eventually terminate when all the $a$'s are equal
(to their arithmetic mean). Strict inequality then holds when at
least two of the $a$'s are unequal.
\end{fo-pf}

\subsection{Cauchy-Bunyakovsky-Schwarz Inequality}

\begin{thm}[Cauchy-Bunyakovsky-Schwarz Inequality]\label{thm:CBS-ineq}
Let $x_k, y_k$ be real numbers, $1 \leq k \leq n$. Then
$$\absval{\sum _{k = 1} ^n x_ky_k} \leq \left(\sum _{k = 1} ^n x_k
^2 \right)^{1/2}\left(\sum _{k = 1} ^n y_k ^2 \right)^{1/2},   $$
with equality if and only if
$$(a_1,a_2,\ldots , a_n)=t(b_1,b_2,\ldots , b_n)
$$for some real constant $t$.
\end{thm}
\begin{f-pf}The inequality follows at once from Lagrange's Identity
$$ \left(\sum _{k=1} ^nx_ky_k\right)^2
=\left(\sum _{k=1} ^nx_k ^2\right)\left(\sum _{k=1} ^ny_k
^2\right)-\sum _{1\leq k <j\leq n}(x_ky_j-x_jy_k)^2$$ (Theorem
\ref{thm:lagranges-id}), since $\sum _{1\leq k <j\leq
n}(x_ky_j-x_jy_k)^2\geq 0$.
\end{f-pf}
\begin{s-pf}
Put $\dis{a = \sum _{k = 1} ^n x_k ^2}$, $\dis{b = \sum _{k = 1} ^n
x_ky_k }$, and $\dis{c = \sum _{k = 1} ^n y_k ^2}$. Consider the
quadratic polynomial
$$at^2 +
bt + c= t^2\sum _{k = 1} ^n x_k ^2 - 2t \sum _{k = 1} ^n x_ky_k +
\sum _{k = 1} ^n y_k ^2 = \sum _{k = 1} ^n (tx_k - y_k)^2  \geq 0,
$$ where the inequality follows because a sum of squares of real numbers is being summed. Thus this  quadratic polynomial is positive for all
real $t$, so it must have complex roots. Its discriminant $b^2 -
4ac$ must be negative, from where we gather
$$4\left(\sum _{k = 1} ^n x_ky_k\right)^2 \leq 4\left(\sum _{k = 1} ^n x_k
^2 \right)\left(\sum _{k = 1} ^n y_k ^2 \right),   $$which gives the
inequality
\end{s-pf}
For our third proof of the CBS Inequality we need the following
lemma.

\begin{lem}\label{lem:for-CBS}
For $(a,b,x,y)\in \BBR^4$ with $x>0$ and $y>0$ the following
inequality holds: $$\dfrac{a^2}{x}+\dfrac{b^2}{y}\geq
\dfrac{(a+b)^2}{x+y}.
$$Equality holds if and only if $\dfrac{a}{x} = \dfrac{b}{y}$.
\end{lem}
\begin{pf}
Since the square of a real number is always positive, we have
$$\begin{array}{lll}(ay-bx)^2\geq 0& \implies & a^2y^2-2abxy+b^2x^2\geq 0\\ & \implies & a^2y(x+y)+b^2x(x+y) \geq
(a+b)^2xy\\
& \implies & \dfrac{a^2}{x}+\dfrac{b^2}{y}\geq
\dfrac{(a+b)^2}{x+y}.\\
\end{array}$$ Equality holds if and only if the first inequality is $0$.\end{pf}
\begin{rem}
Iterating the result on Lemma \ref{lem:for-CBS},
$$\dfrac{a_1 ^2}{b_1} + \dfrac{a_2 ^2}{b_2} + \cdots + \dfrac{a_n ^2}{b_n} \geq \dfrac{(a_1+a_2+\cdots + a_n)^2}{b_1+b_2+\cdots + b_n}, $$
with equality if and only if
$\dfrac{a_1}{b_1}=\dfrac{a_2}{b_2}=\cdots = \dfrac{a_n}{b_n}.$
\end{rem}
\begin{t-pf}
By the preceding remark, we have
$$\begin{array}{lll}x_1 ^2 + x_2 ^2 + \cdots + x_n ^2 & = & \dfrac{x_1 ^2 y_1 ^2}{y_1 ^2}+ \dfrac{x_2 ^2 y_2 ^2}{y_2 ^2}+\cdots +  \dfrac{x_n ^2 y_n ^2}{y_n ^2}
\\ & \geq & \dfrac{(x_1y_1+x_2y_2+\cdots + x_ny_n)^2}{y_1 ^2+y_2 ^2 +
\cdots + y_n ^2},
\end{array}
$$and upon rearranging, CBS is once again obtained.\end{t-pf}
\subsection{Minkowski's Inequality}
\begin{thm}[Minkowski's Inequality]\label{thm:minkowski-ineq}
Let $x_k, y_k$ be any real numbers. Then
$$ \left(\sum _{k=1} ^n (x_k+y_k)^2\right)^{1/2}\leq  \left(\sum _{k=1} ^n x_k ^2\right)^{1/2} +  \left(\sum _{k=1} ^n y_k ^2\right)^{1/2}. $$
\end{thm}
\begin{pf}
We have
$$ \begin{array}{lll}\sum _{k=1} ^n (x_k+y_k)^2 & = & \sum _{k=1} ^n x_k ^2+2\sum _{k=1} ^n x_ky_k      +\sum _{k=1} ^ny_k ^2\\
& \leq & \sum _{k=1} ^n x_k ^2+2\left(\sum _{k=1} ^n x_k
^2\right)^{1/2}\left(\sum _{k=1} ^ny_k ^2\right)^{1/2} +\sum _{k=1} ^ny_k ^2\\
& = & \left(\left(\sum _{k=1} ^nx_k ^2\right)^{1/2} +\left(\sum
_{k=1} ^ny_k ^2\right)^{1/2} \right)^2,\end{array}$$where the
inequality follows from the CBS Inequality.\end{pf}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Let $(a, b, c, d)\in \BBR^4$. Prove that $$
\absval{\absval{a-c}-\absval{b-c}} \leq \absval{a-b} \leq
\absval{a-c}+\absval{b-c}.
$$
\end{pro}
\begin{pro}Let $(x_1,x_2, \ldots, x_n)\in\BBR^n$ be such that $$ x_1 ^2+x_2 ^2+\cdots +x_n ^2=
x_1 ^3+x_2 ^3+\cdots +x_n ^3=x_1 ^4+x_2 ^4+\cdots +x_n ^4.
$$Prove that $x_k\in \{0,1\}$.
\begin{answer}
The given equalities entail t $$ \sum _{k=1} ^n (x_k ^2-x_k)^2=0.
$$A sum of squares is $0$ if and only if every term is $0$. This
gives the result.
\end{answer}
\end{pro}
\begin{pro}Let $n \geq 2$ an integer. Let $(x_1,x_2, \ldots, x_n)\in\BBR^n$ be such that $$ x_1 ^2+x_2 ^2+\cdots +x_n ^2=x_1x_2+x_2x_3+\cdots + x_{n-1}x_n+x_nx_1  .
$$Prove that $x_1=x_2=\cdots=x_n$.
\begin{answer}
The given equality entails that $$
\dfrac{1}{2}\left((x_1-x_2)^2+(x_2-x_3)^2+\cdots +
(x_{n-1}-x_n)^2+(x_n-x_1)^2\right)=0.
$$A sum of squares is $0$ if and only if every term is $0$. This
gives the result.
\end{answer}
\end{pro}

\begin{pro}\label{pro:mediant}
If $b>0$ and $B>0$ prove that
$$ \dfrac{a}{b}<\dfrac{A}{B}\implies \dfrac{a}{b}<\dfrac{a+A}{b+B}<\dfrac{A}{B}.
$$Further, if
$p$ and $q$ are positive integers such that
$$ \dfrac{7}{10} < \dfrac{p}{q} < \dfrac{11}{15},
$$what is the least value of $q$? \\
\begin{answer}
Since $aB<Ab$ one has $a(b+B)=ab +aB <ab+Ab = (a+A)b$ so
$\dfrac{a}{b}<\dfrac{a+A}{b+B}$. Similarly $B(a+A) = aB+AB<Ab+AB =
A(b+B)$ and so $\dfrac{a+A}{b+B}<\dfrac{A}{B}$.

\bigskip
We have $$ \dfrac{7}{10} < \dfrac{11}{15} \implies \dfrac{7}{10} <
\dfrac{18}{25} < \dfrac{11}{15} \implies \dfrac{7}{10}
<\dfrac{25}{35} < \dfrac{18}{25} < \dfrac{11}{15}.    $$ Since
$\dfrac{25}{35} = \dfrac{5}{7},$ we have $q \leq 7$. Could it be
smaller? Observe that $\dfrac{5}{6} > \dfrac{11}{15} $ and that
$\dfrac{4}{6} < \dfrac{7}{10}$. Thus by considering the cases with
denominators $q = 1, 2, 3, 4, 5, 6$, we see that no such fraction
lies in the desired interval. The smallest denominator is thus $7$.
\end{answer}
\end{pro}

\begin{pro}
Prove that if $r \geq s \geq t$ then
$$
r^2 - s^2 + t^2  \geq (r - s + t)^2. $$
\begin{answer}  We have
$$ (r - s + t)^2 - t^2 = (r - s + t - t)(r - s + t + t) = (r - s)(r - s + 2t).$$
Since $t - s \leq 0,$ $r - s + 2t = r + s + 2(t - s) \leq r + s$ and
so
$$(r - s + t)^2 - t^2 \leq (r - s)(r + s) = r^2 - s^2$$which gives
$$(r - s + t)^2 \leq r^2 - s^2 + t^2.$$
\end{answer}
\end{pro}

\begin{pro}
Assume that $a_k, b_k, c_k, k = 1, \ldots, n$, are positive real
numbers. Shew that
$$\left(\sum _{k = 1} ^n a_kb_kc_k\right)^{4}
\leq \left(\sum _{k = 1} ^n a_k ^4\right)\left(\sum _{k = 1} ^n b_k
^4\right) \left(\sum _{k = 1} ^n c_k
^2\right)^{2}.$$\begin{answer}Using the CBS Inequality (Theorem
\ref{thm:CBS-ineq}) on $\sum _{k = 1} ^n (a_kb_k)c_k$ once we obtain
$$\sum _{k = 1} ^n a_kb_kc_k
\leq \left(\sum _{k = 1} ^n a_k ^2b_k ^2\right)^{1/2} \left(\sum _{k
= 1} ^n c_k ^2\right)^{1/2}.
$$Using CBS again on $\left(\sum _{k = 1} ^n a_k ^2b_k ^2\right)^{1/2}$ we obtain
$$
\begin{array}{lll}
\sum _{k = 1} ^n a_kb_kc_k  & \leq &
 \left(\sum _{k = 1} ^n a_k ^2 b_k ^2\right)^{1/2}
\left(\sum _{k = 1} ^n c_k ^2\right)^{1/2} \\
  & \leq & \left(\sum _{k = 1} ^n a_k ^4\right)^{1/4}
\left(\sum _{k = 1} ^n b_k ^4\right)^{1/4}
\left(\sum _{k = 1} ^n c_k ^2\right)^{1/2}, \\
\end{array}
$$which gives the required inequality.
\end{answer}
\end{pro}
\begin{pro}
Prove that for integer $n>1$, $$ n!<\left(\dfrac{n+1}{2}\right)^n.
$$
\begin{answer}
This follows directly from the AM-GM Inequality applied to
$1,2,\ldots , n$:
$$ n!^{1/n} (1\cdot 2 \cdots n)^{1/n}< \dfrac{1+2+\cdots + n}{n} = \dfrac{n+1}{2},
$$where strict inequality follows since the factors are unequal for
$n>1$.
\end{answer}
\end{pro}
\begin{pro}\label{pro:factorial-n^n/2}
Prove that for integer $n>2$, $$ n^{n/2}<n!.
$$
\begin{answer}
First observe that for integer $k$, $1< k < n$,\quad
$k(n-k+1)=k(n-k)+k>1(n-k)+k= n$. Thus
$$ n!^2 = (1\cdot n)(2\cdot (n-1))(3\cdot (n-2))\cdots ((n-1)\cdot 2)(n\cdot 1)>n\cdot n\cdot n\cdots n=n^n.  $$
\end{answer}
\end{pro}

\begin{pro}
Prove that for all integers $n\geq 0$ the inequality
$n(n-1)<2^{n+1}$ is verified.
\begin{answer}
From the Binomial Theorem, for $n\geq 2$,
$$2^n = (1+1)^n = \binom{n}{0}+\binom{n}{1}+\binom{n}{2}+\cdots +\binom{n}{n}>\binom{n}{2}=\dfrac{n(n-1)}{2}\implies 2^{n+1}>n(n-1).  $$
This establishes the inequality for $n\geq 2$. For $n=0$,
$0=0(0-1)<2^{0+1}$ and for $n=1$,  $0=1(1-1)<2^{1+1}$, so the
inequality is true for all natural numbers.
\end{answer}
\end{pro}
\begin{pro}\label{pro:sum-squares-ineq}
Prove that $\forall (a, b, c)\in\BBR^3$,
$$ a^2+b^2+c^2\geq ab + bc +ca. $$
\begin{answer}
Assume without loss of generality that  $a\geq b \geq c$. Then
$a\geq b \geq c$ is similarly sorted as itself, so by the
Rearrangement Inequality
$$ a^2 + b^2 + c^2=aa+bb+cc \geq ab + bc + ca. $$
This also follows directly from the identity
$$ a^2+b^2+c^2-ab-bc-ca = \left(a-\dfrac{b+c}{2}\right)^2+\dfrac{3}{4}\left(b-c\right)^2. $$
One can also use the AM-GM Inequality thrice:
$$a^2+b^2\geq 2ab; \quad  b^2+c^2\geq 2bc;\quad c^2+a^2\geq 2ca,
$$and add.
\end{answer}
\end{pro}
\begin{pro}
Prove that $\forall (a, b, c)\in\BBR^3$, with $a\geq 0$, $b \geq 0$,
$c\geq 0$, the following inequalities hold:
$$ a^3+b^3+c^3\geq \max(a^2b + b^2c +c^2a, a^2c+b^2a+c^2b), $$
$$ a^3+b^3+c^3\geq 3abc,  $$ $$ a^3+b^3+c^3\geq \dfrac{1}{2}\left(a^2(b+c) + b^2(c+a) +
c^2(a+b)\right).  $$
\begin{answer}
Assume without loss of generality that  $a\geq b \geq c$. Then
$a\geq b \geq c$ is similarly sorted as $a^2\geq b^2 \geq c^2$, so
by the Rearrangement Inequality
$$  a^3+b^3+c^3= aa^2 + bb^2 + cc^2 \geq a^2b + b^2c + c^2a,$$
and
$$  a^3+b^3+c^3= aa^2 + bb^2 + cc^2 \geq a^2c + b^2a + c^2b.$$
Upon adding $$  a^3+b^3+c^3= aa^2 + bb^2 + cc^2 \geq
\dfrac{1}{2}\left(a^2(b+c) + b^2(c+a) + c^2(a+b)\right).$$Again, if
$a\geq b \geq c$ then $$ab\geq ac \geq bc, $$ thus
$$ a^3+b^3+c^3=  \geq a^2b + b^2c + c^2a = (ab)a+(bc)b+(ac)c\geq (ab)c+(bc)a+(ac)b=3abc.   $$
This last inequality also follows directly from the AM-GM
Inequality, as
$$ (a^3b^3c^3)^{1/3}\leq \dfrac{a^3+b^3+c^3}{3}, $$or from the
identity
$$ a^3+b^3+c^3-3abc= (a+b+c)(a^2+b^2+c^2-ab-bc-ca), $$and the
inequality of problem \ref{pro:sum-squares-ineq}.
\end{answer}
\end{pro}
\begin{pro}[Chebyshev's Inequality] Given sets of real numbers $\{a_1, a_2, \ldots , a_n\}$ and $\{b_1,
b_2, \ldots , b_n\}$ prove that $$\dfrac{1}{n} \sum _{1\leq k\leq
n}\check{a}_k\hat{b}_k \leq \left(\dfrac{1}{n}\sum _{1\leq k \leq n}
a_k\right)\left(\dfrac{1}{n}\sum _{1\leq k \leq n}b_k \right)\leq
\dfrac{1}{n}\sum _{1\leq k \leq n}\hat{a}_k\hat{b}_k.
$$
\begin{answer}
We apply $n$ times the Rearrangement Inequality
$$\begin{array}{lllll}\check{a}_1\hat{b}_1+
\check{a}_2\hat{b}_2+\cdots + \check{a}_n\hat{b}_n &  \leq  &
a_1b_1+a_2b_2+ \cdots + a_nb_n & \leq & \hat{a}_1\hat{b}_1+
\hat{a}_2\hat{b}_2+\cdots + \hat{a}_n\hat{b}_n\\
\check{a}_1\hat{b}_1+ \check{a}_2\hat{b}_2+\cdots +
\check{a}_n\hat{b}_n &  \leq  & a_1b_2+a_2b_3+ \cdots + a_nb_1 &
\leq & \hat{a}_1\hat{b}_1+
\hat{a}_2\hat{b}_2+\cdots + \hat{a}_n\hat{b}_n\\
\check{a}_1\hat{b}_1+ \check{a}_2\hat{b}_2+\cdots +
\check{a}_n\hat{b}_n &  \leq  & a_1b_3+a_2b_4+ \cdots + a_nb_2 &
\leq & \hat{a}_1\hat{b}_1+
\hat{a}_2\hat{b}_2+\cdots + \hat{a}_n\hat{b}_n\\
& & \vdots & & \\
\check{a}_1\hat{b}_1+ \check{a}_2\hat{b}_2+\cdots +
\check{a}_n\hat{b}_n &  \leq  & a_1b_n+a_2b_1+ \cdots + a_nb_{n-1} &
\leq & \hat{a}_1\hat{b}_1+
\hat{a}_2\hat{b}_2+\cdots + \hat{a}_n\hat{b}_n\\
\end{array}$$
Adding we obtain the desired inequalities.

\end{answer}
\end{pro}
\begin{pro}
If $x > 0$, from
$$\sqrt{x + 1} - \sqrt{x} = \frac{1}{\sqrt{x + 1} + \sqrt{x}},$$prove that
$$\frac{1}{2\sqrt{x + 1}} < \sqrt{x + 1} - \sqrt{x} < \frac{1}{2\sqrt{x}}.$$
Use this to prove that if  $n > 1$ is a positive integer, then
$$2\sqrt{n + 1} - 2 < 1 + \frac{1}{\sqrt{2}} + \frac{1}{\sqrt{3}} + \cdots + \frac{1}{\sqrt{n}}  < 2\sqrt{n} - 1$$
\end{pro}
\begin{pro}
If $0 < a \leq b$, shew that
$$\frac{1}{8}\cdot\frac{(b - a)^2}{b} \leq \frac{a + b}{2} - \sqrt{ab} \leq \frac{1}{8}\cdot\frac{(b - a)^2}{a} $$
\begin{answer}
Use the fact that $(b - a)^2 = (\sqrt{b} - \sqrt{a})^2(\sqrt{b} +
\sqrt{a})^2$.
\end{answer}
\end{pro}
\begin{pro}
Shew that $$\frac{1}{2}\cdot\frac{3}{4}\cdot\frac{5}{6}\cdots
\frac{9999}{10000} < \frac{1}{100}.$$ \begin{answer} Let
$$A = \frac{1}{2}\cdot\frac{3}{4}\cdot\frac{5}{6}\cdots \frac{9999}{10000} $$
and
$$ B =\frac{2}{3}\cdot\frac{4}{5}\cdot\frac{6}{7}\cdots\frac{10000}{10001}.$$

\bigskip

Clearly, $x^2 - 1 < x^2$  for all real numbers $x$. This implies
that
$$\frac{x - 1}{x} < \frac{x}{x + 1}$$ whenever these four quantities are
positive. Hence
$${\everymath{\displaystyle}\begin{array}{ccc}
{1}/{2} & < & {2}/{3} \\
{3}/{4} & < & {4}/{5} \\
{5}/{6} & < & {6}/{7} \\
\vdots & \vdots & \vdots \\
{9999}/{10000} & < & {10000}/{10001} \\
\end{array} }$$
As all the numbers involved are positive, we multiply both columns
to obtain
$$\frac{1}{2}\cdot\frac{3}{4}\cdot\frac{5}{6}\cdots \frac{9999}{10000}
<
\frac{2}{3}\cdot\frac{4}{5}\cdot\frac{6}{7}\cdots\frac{10000}{10001},$$
or $A < B.$ This yields $A^2 = A\cdot A < A\cdot B.$ Now
$$A\cdot B = \frac{1}{2}\cdot\frac{2}{3}\cdot\frac{3}{4}\cdot\frac{4}{5}\cdot\frac{5}{6}\cdot\frac{6}{7}
\cdot\frac{7}{8}\cdots\frac{9999}{10000}\cdot\frac{10000}{10001} =
\frac{1}{10001},$$ and consequently, $A^2 < A\cdot B = 1/10001.$ We
deduce that $A < 1/\sqrt{10001} < 1/100.$
\end{answer}
\end{pro}
\begin{pro}
Prove that for all $x>0$,
$$\sum _{k=1} ^n \dfrac{1}{(x+k)^2}<\dfrac{1}{x}-\dfrac{1}{x+n}.  $$
\begin{answer}
Observe that  for $k\geq 1$, $(x+k)^2>(x+k)(x+k-1)$ and so $$
\dfrac{1}{(x+k)^2} <\dfrac{1}{(x+k)(x+k-1)}  =
\dfrac{1}{x+k-1}-\dfrac{1}{x+k}.
$$Hence
$$\begin{array}{lll}\dfrac{1}{(x+1)^2} + \dfrac{1}{(x+2)^2} + \dfrac{1}{(x+3)^2} + \cdots + \dfrac{1}{(x+n-1)^2} + \dfrac{1}{(x+n)^2}
&  <  & \dfrac{1}{x(x+1)} +
\dfrac{1}{(x+1)(x+2)}+\dfrac{1}{(x+2)((x+3))} + \cdots +
\dfrac{1}{(x+n-2)(x+n-1)} +
\dfrac{1}{(x+n-1)(x+n)}\\
& = &
\dfrac{1}{x}-\dfrac{1}{x+1}+\dfrac{1}{x+1}-\dfrac{1}{x+2}+\dfrac{1}{x+2}-\dfrac{1}{x+3}+\cdots
+\dfrac{1}{x+n-2}-\dfrac{1}{x+n-1}+\dfrac{1}{x+n-1}-\dfrac{1}{x+n}\\
& = & \dfrac{1}{x}-\dfrac{1}{x+n}.
 \end{array}$$
\end{answer}
\end{pro}
\begin{pro}
Let $x_i\in\BBR$ such that $\sum _{i=1}\absval{x_i}=1$ and $\sum
_{i=1}x_i=0$. Prove that $$ \absval{\sum _{i=1} ^n \dfrac{x_i}{i}}
\leq \dfrac{1}{2}\left(1-\dfrac{1}{n}\right).
$$
\begin{answer}
For $1 \leq i \leq n$, we have
$$\absval{\dfrac{2}{i}-1-\dfrac{1}{n}} \leq 1 - \dfrac{1}{n} \iff \left(\dfrac{2}{i}-\left(1+\dfrac{1}{n}\right)\right)^2\leq \left(1-\dfrac{1}{n}\right)^2
\iff
\dfrac{4}{i^2}-\dfrac{4}{i}\left(1+\dfrac{1}{n}\right)+\dfrac{4}{n}\leq
0 \iff \dfrac{(i-n)(i-1)}{i^2n}\leq 0.$$Thus
$$\absval{\sum _{i=1} ^n \dfrac{x_i}{i}} = \dfrac{1}{2}\absval{\sum _{i=1} ^n \left(\dfrac{2}{i}-\left(1+\dfrac{1}{n}\right)\right)x_i},
$$as $\sum
_{i=1}x_i=0$. Now
$$ \absval{\sum _{i=1} ^n \left(\dfrac{2}{i}-\left(1+\dfrac{1}{n}\right)\right)x_i} \leq
\sum _{i=1} ^n \absval{ \dfrac{2}{i}-1-\dfrac{1}{n}}\absval{x_i}\leq
\left(1-\dfrac{1}{n}\right)\sum _{i=1} ^n\absval{x_i} =
\left(1-\dfrac{1}{n}\right).
$$
\end{answer}

\end{pro}
\begin{pro} Let $n$ be a strictly positive integer.
Let $x_i \geq 0$. Prove that $$ \prod _{k=1} ^n (1+x_k) \geq 1 +
\sum _{k=1} ^n x_k.
$$When does equality hold?
\begin{answer}
Expanding the product
$$\prod _{k=1} ^n (1+x_k) = 1 +
\sum _{k=1} ^n x_k  + \sum _{1 \leq i <j\leq n} ^n x_ix_j+\cdots
\geq  1 + \sum _{k=1} ^n x_k,
$$since the $x_k\geq 0$. When $n=1$ equality is obvious. When $n>1$
equality is achieved when $\sum _{1 \leq i <j\leq n} ^n x_ix_j=0$.
\end{answer}
\end{pro}

\begin{pro}[Nesbitt's Inequality] Let $a, b, c$ be strictly positive
real numbers. Then $$
\dfrac{a}{b+c}+\dfrac{b}{c+a}+\dfrac{c}{a+b}\geq \dfrac{3}{2}.
$$
\begin{answer}
Assume $a\geq b \geq c$. Put $s=a+b+c$. Then $$-a \leq -b \leq -c
\implies s-a\leq s-b\leq s-c\implies \dfrac{1}{s-a}\geq
\dfrac{1}{s-b}\geq \dfrac{1}{s-c}$$ and so the sequences $a, b, c$
and $\dfrac{1}{s-a},\dfrac{1}{s-b}, \dfrac{1}{s-c}$ are similarly
sorted. Using the Rearrangement Inequality twice: $$ \dfrac{a}{s-a}+
\dfrac{b}{s-b}+\dfrac{c}{s-c}\geq
\dfrac{a}{s-c}+\dfrac{b}{s-a}+\dfrac{c}{s-b}; \qquad \dfrac{a}{s-a}+
\dfrac{b}{s-b}+\dfrac{c}{s-c}\geq
\dfrac{a}{s-b}+\dfrac{b}{s-c}+\dfrac{c}{s-a}.
$$Adding these two inequalities
$$ 2\left( \dfrac{a}{s-a}+
\dfrac{b}{s-b}+\dfrac{c}{s-c}\right) \geq
\dfrac{b+c}{s-a}+\dfrac{c+a}{s-b}+\dfrac{c+a}{s-c},
$$whence
$$ 2\left( \dfrac{a}{b+c}+
\dfrac{b}{c+a}+\dfrac{c}{a+b}\right) \geq 3,
$$from where the result follows.
\end{answer}
\end{pro}
\begin{pro}\label{pro:continued-product}
Let $a>0$. Use mathematical induction to prove that
$$ \sqrt{a+\sqrt{a+\sqrt{a+\cdots + \sqrt{a}}}} < \dfrac{1+\sqrt{4a+1}}{2},  $$
where the left member contains an arbitrary number of radicals.
\begin{answer} Let $$P(n): \ \ \ \underbrace{\sqrt{a+\sqrt{a+\sqrt{a+\cdots
+ \sqrt{a}}}}}_{n \ \mathrm{radicands}} < \dfrac{1+\sqrt{4a+1}}{2}.
$$Let us prove $P(1)$, that is
$$\forall a > 0, \ \ \ \sqrt{a} <  \dfrac{1+\sqrt{4a+1}}{2}.$$
To get this one, let's work backwards. If $a>\dfrac{1}{4}$
$$ \begin{array}{lll}\sqrt{a} <  \dfrac{1+\sqrt{4a+1}}{2} & \iff &  2\sqrt{a} < 1 + \sqrt{4a + 1} \\
& \iff & 2\sqrt{a} -1 < \sqrt{4a + 1} \\
& \iff &  (2\sqrt{a} -1)^2 < (\sqrt{4a + 1})^2 \\
& \iff & 4a - 4\sqrt{a} + 1 < 4a + 1 \\
& \iff & -2\sqrt{a} < 0. \end{array}$$ all the steps are reversible
and the last inequality is always true. If $a\leq \dfrac{1}{4}$ then
trivially $2\sqrt{a} -1 < \sqrt{4a + 1}$. Thus $P(1)$ is true.
Assume now that $P(n)$ is true and let's derive $P(n + 1)$. From
$$\underbrace{\sqrt{a+\sqrt{a+\sqrt{a+\cdots +
\sqrt{a}}}}}_{n \ \mathrm{radicands}} < \dfrac{1+\sqrt{4a+1}}{2}
\implies \underbrace{\sqrt{a+\sqrt{a+\sqrt{a+\cdots +
\sqrt{a}}}}}_{n+1 \ \mathrm{radicands}} < \sqrt{a+
\dfrac{1+\sqrt{4a+1}}{2}}.
$$
we see that it is enough to shew that

$$\sqrt{a+
\dfrac{1+\sqrt{4a+1}}{2}} = \dfrac{1+\sqrt{4a+1}}{2}.
$$
But observe that
$$\begin{array}{lll}
(\sqrt{4a + 1} + 1)^2 = 4a + 2\sqrt{4a + 1} + 2 & \implies &
\dfrac{1+\sqrt{4a+1}}{2}=\sqrt{a+ \dfrac{1+\sqrt{4a+1}}{2}},
\end{array}$$proving the claim.

\end{answer}
\end{pro}

\begin{pro}
Let $a, b, c$ be positive real numbers. Prove that $$
(a+b)(b+c)(c+a)\geq 8abc.
$$
\begin{answer}
From the AM-GM Inequality, $$ a+b \geq 2\sqrt{ab}; \quad  b+c \geq
2\sqrt{bc}; c+a \geq 2\sqrt{ca}, $$and the desired inequality
follows upon multiplication of these three inequalities.
\end{answer}
\end{pro}
\begin{pro}[IMO, 1978]Let $a_k$ be a sequence of pairwise distinct
positive integers. Prove that $$\sum _{k=1} ^n \dfrac{a_k}{k^2}\geq
\sum _{k=1} ^n \dfrac{1}{k}.
$$
\begin{answer}
By the Rearrangement inequality
$$\sum _{k=1} ^n \dfrac{a_k}{k^2}\geq
\sum _{k=1} ^n \dfrac{\check{a}_k}{k^2}\geq \sum _{k=1} ^n
\dfrac{1}{k},
$$as $\check{a}_k \geq k$, the $a$'s being pairwise distinct
positive integers.
\end{answer}
\end{pro}
\begin{pro}[Harmonic
Mean-Geometric Mean Inequality] Let $x_i>0$ for $1 \leq i \leq n$.
Then
$$ \dfrac{n}{\dfrac{1}{x_1}+\dfrac{1}{x_2}+\cdots + \dfrac{1}{x_n}} \leq (x_1x_2\cdots x_n)^{1/n},  $$
with equality iff $x_1=x_2=\cdots =x_n$.
\begin{answer}By the AM-GM Inequality,
$$ \left(\dfrac{1}{x_1}\dfrac{1}{x_2}\cdots \dfrac{1}{x_n}\right)^{1/n}\leq \dfrac{\dfrac{1}{x_1}+\dfrac{1}{x_2}+\cdots + \dfrac{1}{x_n}}{n},  $$
whence the inequality.
\end{answer}
\end{pro}
\begin{pro}[Arithmetic
Mean-Quadratic Mean Inequality] Let $x_i\geq 0$ for $1 \leq i \leq
n$. Then
$$ \dfrac{x_1+x_2+\cdots + x_n}{n} \leq \left(\dfrac{x_1 ^2+x_2 ^2+\cdots + x_n ^2}{n}\right)^{1/2},  $$
with equality iff $x_1=x_2=\cdots =x_n$.
\begin{answer}By the CBS Inequality,
$$ \left(1\cdot x_1 + 1\cdot x_2 + \cdots + 1\cdot x_n\right)^2\leq \left(1^2+1^2+ \cdots + 1^2\right)\left(x_1 ^2 + x_2 ^2 + \cdots + x_n ^2\right),
$$which gives the desired inequality.
\end{answer}
\end{pro}

\begin{pro} Given a set of real numbers $\{a_1, a_2, \ldots , a_n\}$
prove that there is an index $m\in \{0,1,\ldots , n\}$ such that $$
\absval{\sum _{1\leq k \leq m} a_k - \sum _{m<k \leq n}a_k}\leq \max
_{1\leq k \leq n}\absval{a_k}.$$If $m=0$ the first sum is to be
taken as $0$ and if $m=n$ the second one will be taken as $0$.
\begin{answer}
Put $$  T_m = \sum _{1\leq k \leq m} a_k - \sum _{m<k \leq
n}a_k.$$Clearly $T_0 = -T_n$. Since the sequence $T_0, T_1, \ldots ,
T_n$ changes signs, choose an index $p$ such that $T_{p-1}$ and
$T_p$ have different signs. Thus either $T_{p-1}-T_p = 2|a_p|$ or
$T_p-T_{p-1}=2|a_p|$.  We claim that
$$ \min \left(\absval{T_{p-1}}, \absval{T_p}\right)=  \leq \max _{1\leq k \leq n}\absval{a_k}. $$
For

For, if contrariwise both $\absval{T_{p-1}} >  \max _{1\leq k \leq
n}\absval{a_k}$ and $\absval{T_p }>  \max _{1\leq k \leq
n}\absval{a_k}$, then $2|a_p|=|T_{p-1}-T_p|>2\max _{1\leq k \leq
n}\absval{a_k}$, a contradiction.
\end{answer}
\end{pro}

\begin{pro} Give a purely geometric proof of  Minkowski's Inequality for $n=2$. That is, prove that
if  $(a, b), (c, d) \in \BBR^2$, then
$$\sqrt{(a + c)^2 + (b + d)^2} \leq \sqrt{a^2 + b^2}  + \sqrt{c^2 + d^2}.$$Equality occurs if and only if
$ad = bc.$ \label{minkowski}
\begin{answer}
It is enough to prove this in the case when $a, b, c, d$ are all
positive. To this end, put $O = (0,0)$, $L = (a, b)$ and $M = (a+c,
b+d)$. By the triangle inequality $OM \leq OL + LM$, where equality
occurs if and only if the points are collinear. But then
$$\sqrt{(a+c)^2 + (b+d)^2} = OM \leq OL + LM = \sqrt{a^2+b^2}+
\sqrt{c^2+d^2},$$and equality occurs if and only if the points are
collinear, that is $\dfrac{a}{b} = \dfrac{c}{d}$.
\end{answer}
\end{pro}
\begin{pro}
Let $x_k\in \lcrc{0}{1}$ for $1\leq k \leq n$. Demonstrate that
$$\min \left(\prod _{k=1} ^n x_k,\prod _{k=1} ^n (1-x_k) \right) \leq \dfrac{1}{2^n}.  $$
\end{pro}
\begin{pro}
If $n>0$ is an integer and if $a_k>0$, $1\leq k \leq n$ are real
numbers, demonstrate that $$ \left(\sum
_{k=1}^n\dfrac{a_k}{k}\right)^2 \leq \sum _{j=1}^n\sum _{k=1}^n
\dfrac{a_ja_k}{j+k-1}.
$$
\end{pro}
\begin{pro}
Let $n$ be a strictly positive integer, let $a_k\geq 0$, $1\leq k
\leq n$ be real numbers such that $a_1\geq a_2\geq \cdots \geq a_n$,
and let $b_k$, $1\leq k \leq n$ be real numbers. Assume that for all
indices $k\in\{1,2,\ldots , n\}$, $$\sum _{i=1} ^ka_i\leq \sum
_{i=1} ^kb_i.$$ Prove that $$\sum _{i=1} ^na_i ^2\leq \sum _{i=1}
^nb_i ^2$$
\end{pro}
\begin{pro}
Let $n\geq 2$ an integer and let $a_k$, $1\leq k \leq n$ be real
numbers such that $a_1\leq a_2\leq \cdots \leq a_n$. Prove that
there is an index $k\in\{1,2,\ldots , n\}$ such that
$$(a_{k+1}-a_k)^2 \leq \dfrac{12}{n(n^2-1)}(a_1 ^2 + a_2 ^2 + \cdots
a_n ^2).
$$
\end{pro}
\begin{pro}[AIME 1991] Let  $P = \{a_1, a_2, \ldots, a_n\}$ be a collection of points with
$$0 < a_1 < a_2 < \cdots < a_n < 17.$$ Consider
$$S_n = \min _P \sum _{k = 1} ^n\sqrt{(2k - 1)^2 + a_k ^2},$$where the minimum runs over all
such partitions $P$. Shew that exactly one of $S_2, S_3, \ldots ,
S_n, \ldots$ is an integer, and find which one it is.
\begin{answer} Use Minkowski's Inequality and
the fact that $17^2 + 144^2= 145^2$. The desired value is $S_{12}$.
\end{answer}
\end{pro}

\end{multicols}
\section{Completeness Axiom}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}}We saw that both
    $\BBQ$ and $\BBR$ are fields, and hence they both satisfy the
    same arithmetical axioms. Why the need then for $\BBR$? In this
    section we will study a property of $\BBR$ that is not shared
    with $\BBQ$, that of completeness. It essentially means that
    there are no `holes' on the real line.
\end{minipage}}
    \end{center}

\begin{df}A number $u$ is an {\em upper bound} for a set of numbers $A\subseteq \BBR$
if for all $a\in A$ we have $a \leq u$. The smallest such upper
bound is called the {\em supremum or least upper bound} of the set
$A$, and is denoted by $\sup A$. If $\sup A\in A$ then we say that
$A$ has a {\em maximum} and we denote it by $\max A (=\sup A)$.
Similarly, a number $l$ is a {\em lower bound} for a set of numbers
$B \subseteq \BBR$ if for all $b\in B$ we have $l \leq b$. The
largest such lower bound is called the {\em infimum or greatest
lower bound} of the set $B$, and is denoted by $\inf B$. If $\sup
B\in B$ then we say that $B$ has a {\em minimum} and we denote it by
$\inf B (=\inf B)$.
\end{df}
\begin{rem}
We define $\inf (\BBR) = -\infty$, $\sup (\BBR) = +\infty$,  $\inf
(\varnothing) = +\infty$ and $\sup (\varnothing)=-\infty$.
\end{rem}

\begin{df}
A set  of numbers $A$  is said to be {\em complete} if every
non-empty subset of $A$ which is bounded above has a supremum lying
in $A$.
\end{df}
\begin{axi}[Completeness of $\BBR$]\label{axi:completeness-of-R} Any non-empty set of real numbers which is
bounded above has a supremum. Any non-empty set of real numbers
which is bounded below has a infimum.
\end{axi}

\begin{thm}[Approximation Property of the
Supremum and Infimum]\label{thm:approx-sup-inf} Let $A\neq
\varnothing$ be a set of real numbers possessing a supremum $\sup
A$. Then
$$\forall \varepsilon >0 \quad \exists a\in A \quad \mathrm{such\
that}\quad \sup A-\varepsilon \leq a.$$

\bigskip

Let $B\neq \varnothing$ be a set of real numbers possessing an
infimum $\inf B$. Then
$$\forall \varepsilon >0 \quad \exists b\in B \quad \mathrm{such\
that}\quad \inf A+\varepsilon \geq b.$$
\end{thm}
\begin{pf}
If  $\forall a\in A,\quad \sup A-\varepsilon > a$ then $\sup
A-\varepsilon$ would be an upper bound smaller than the least upper
bound, a contradiction to the definition of $\sup A$. Hence there
must be a rogue $a\in A$ such that $\sup A-\varepsilon \leq a$.

\bigskip

If  $\forall b\in A,\quad \inf B+\varepsilon < b$ then $\inf
B+\varepsilon$ would be a lower bound greater than the greatest
lower bound, a contradiction to the definition of $\inf B$. Hence
there must be a rogue $b\in B$ such that $\inf B+\varepsilon \geq
b$.


\end{pf}
\begin{rem}
The above result should be intuitively clear. $\sup A$ sits on the
fence, just to the right of $A$, so that going just a bit to the
left should put $\sup A-\varepsilon$ within $A$, etc.
\end{rem}

\begin{thm}[Monotonicity Property of the Supremum and Infimum]\label{thm:monotonicity-sup-inf}
Let $\varnothing \subsetneqq A \subseteq B \subseteqq \BBR$ and
suppose that both $A$ and $B$ have a supremum and an infimum. Then
$\sup A \leq \sup B$ and  $\inf B \leq \inf A$.
\end{thm}
\begin{pf}
Assume $B$ is bounded above with supremum $\sup B$. Suppose $x\in
A$. Then $x\in B$ and so $x\leq \sup B$. Thus $\sup B$ is an upper
bound for the elements of $A$, and so $A$  and so by definition,
$\sup A \leq \sup B$.

\bigskip
Assume $B$ is bounded below with infimum $\inf B$. Suppose $x\in A$.
Then $x\in B$ and so $x\geq \inf B$. Thus $\inf B$ is a lower bound
for the elements of $A$ and so by definition, $\inf A \geq \inf B$.
\end{pf}
\begin{lem}\label{lem:a+epsilon<b}
Let $a, b$ be real numbers and assume that for all numbers
$\varepsilon
> 0$ the following inequality holds:
$$ a < b + \varepsilon .$$ Then $a \leq b.$
\end{lem}
\begin{pf} Assume contrariwise that $a > b.$ Hence $\dis{\frac{a - b}{2}
> 0}$. Since the inequality $a < b + \varepsilon$ holds for every
$\varepsilon > 0$ in particular it holds for $\varepsilon = \dfrac{a
- b}{2}$. This implies that
$$a < b + \frac{a - b}{2} \quad \mathrm{or}  \quad  a < b.$$Thus starting with the assumption that
$a > b$ we reach the incompatible conclusion that $a < b.$ The
original assumption must be wrong. We therefore conclude that $a
\leq b.$
\end{pf}
\begin{thm}[Additive Property of the Supremum]\label{thm:additive-sup-inf}
Let $\varnothing \subsetneqq A \subseteq \BBR$, and  $B \subseteqq
\BBR$. Put
$$ A+B = \{x+y: (x, y)\in A\times B\} $$
and suppose that both $A$ and $B$ have a supremum. Then $A+B$ has
also a supremum and  $$\sup (A+B) = \sup A+\sup B.$$
\end{thm}
\begin{pf}
If $t\in A+B$ then $t= x+y$ with $(x, y)\in A\times B$. Then $t= x+y
\leq \sup A + \sup B$, and so $\sup A + \sup B$ is an upper bound
for $A+B$. By the Completeness Axiom, $A+B$ is bounded. Thus $\sup
(A+B)\leq \sup A + \sup B$.

\bigskip

We now prove that $\sup A + \sup B \leq \sup (A+B)$. By the
approximation property, $\forall \varepsilon > 0$ $\exists a\in A$
and $b\in B$ such that $\sup A - \dfrac{\varepsilon}{2}<a$ and $\sup
B - \dfrac{\varepsilon}{2}<b$. Observe that $a+b\in A+B$ and so $a+b
\leq \sup (A+B)$. Then
$$ \sup A + \sup B - \varepsilon < a+b \leq \sup (A+B),  $$and by Lemma
\ref{lem:a+epsilon<b} we must have $$ \sup A + \sup B \leq \sup
(A+B).$$This completes the proof.\end{pf}

\begin{thm}[Archimedean Property of the Real Numbers]\label{thm:archimedean-prop}
If $(x,y)\in\BBR^2$ with $x>0$, then there exists a natural number
$n$ such that $nx>y$.
\end{thm}
\begin{pf}
Consider the set $$ A=\{nx: n\in\BBN\}. $$Since $1\cdot x\in A$, $A$
is non-empty. If $\forall n\in\BBN$ we had $nx\leq y$, then $A$
would be bounded above by $y$. By the Completeness Axiom, $A$ would
have a supremum $\sup A$. Thus $\forall n\in\BBN, \quad nx\leq \sup
A$. Since $(n+1)x\in A$, we would also have $$(n+1)x\leq \sup
A\implies nx\leq \sup A-x.$$This means that $\sup A-x$ is an upper
bound for $A$ which is smaller than its supremum, a contradiction
Thus there must be an $n$ for which $nx>y$.
\end{pf}

\begin{cor}$\BBN$ is unbounded above.
\end{cor}
\begin{pf}
This follows by taking $x=1$ in Theorem \ref{thm:archimedean-prop}.
\end{pf}

The Completeness Axioms tells us, essentially, that there are no
``holes'' in the real numbers. We will see that this property
distinguishes the reals from the rational numbers.
\begin{lem}\label{lem:root-2-irrational}[Hipassos of Metapontum]$\sqrt{2}$
is irrational.
\end{lem}
\begin{pf}
Assume there is $s\in \BBQ$ such that  $s^2=2$. We can find integers
$m, n\neq 0$ such that $s=\dfrac{m}{n}$. The crucial part of the
argument is that  we can choose $m, n$ such that this fraction be in
least terms, and hence, $m, n$ must not be both even. Now,
$m^2s^2=n^2$, that is $2m^2=n^2$. This means that $n^2$ is even. But
then $n$ itself must be even, since the product of two odd numbers
is odd. Thus $n=2a$ for some non-zero integer $a$ (since $n\neq 0$).
This means that $2m^2 = (2a)^2=4a^2 \implies m^2=2a^2$. This means
once again that $m$ is even. But then we have a contradiction, since
$m$ and $n$ were not both even.
\end{pf}
\begin{thm}
$\BBQ$ is not complete. \label{thm:Q-is-incomplete}
\end{thm}
\begin{pf}
We must shew that there is a non-empty set of rational numbers which
is bounded above but that does not have a supremum in $\BBQ$.
Consider the set $A=\{r\in \BBQ: r^2\leq 2\}$ of rational numbers.
This set is bounded above by $u=2$. For assume that there were a
rogue element of $A$, say $r_0$ such that $r_0>2$. Then $r_0 ^2 > 4$
and so $r_0$ would not belong to $A$, a contradiction. Thus $r\leq
2$ for every $r\in A$ and so $A$ is bounded above. Suppose that $A$
had a supremum $s$, which must satisfy $s\leq 2$. Now, by Lemma
\ref{lem:root-2-irrational} we cannot have   $s^2 = 2$ and thus
$s^2<2$. By Theorem \ref{thm:archimedean-prop} there is an integer
$n$ such that $2-s^2>\dfrac{1}{10^n}$. Put
$t=s+\dfrac{1}{10^{n-1}}$, a rational number and observe that since
$s\leq 2$ one has
$$t^2=s^2+\dfrac{2s}{10^{n-1}}+ \dfrac{1}{10^{2n-2}}
<s^2+\dfrac{2s}{10^{n-1}}+\dfrac{1}{10^{n-1}}\leq s^2
+\dfrac{5}{10^{n-1}}<s^2+ \dfrac{1}{10^n}<2.$$ Thus $t\in A$ and
$t>s$, that is $t$ is an element of $A$ larger than its least upper
bound, a contradiction. Hence $A$ does not have a least upper bound.
\end{pf}


\subsection{Greatest Integer Function}
\begin{thm}\label{thm:existence-of-floor}
Given $y\in \BBR$ there exists a unique integer $n$ such that $$
n\leq y <n+1.
$$
\end{thm}
\begin{pf}
By Theorem \ref{thm:archimedean-prop}, the set $\{n\in \BBZ:  n \leq
y \}$  is non-empty and bounded above. We put $$ \floor{y} =  \sup
\{n\in \BBZ:  n \leq y \}.$$
\end{pf}
\begin{rem}
$\forall x\in \BBR$,\qquad  $\floor{x}\leq x < \floor{x}+1$.
\end{rem}
\begin{df}
The unique integer in Theorem \ref{thm:existence-of-floor} is called
the {\em floor} of $x$ and is denoted by $\floor{x}$.
\end{df}
The greatest integer function enjoys the following properties:
\begin{thm} \label{thm:floor_properties}Let $\alpha , \beta \in \BBR , a \in \BBZ, n \in \BBN$. Then
\begin{enumerate}
\item $\floor{\alpha + a} = \floor{\alpha } + a$ \item $\floor{\dfrac{\alpha
}{n}} =\floor{\dfrac{\floor{\alpha }}{n}}$ \item $\floor{\alpha }+
\floor{\beta } \leq \floor{\alpha + \beta } \leq \floor{\alpha } +
\floor{\beta } + 1$

\end{enumerate}
\end{thm}
\begin{pf} \begin{enumerate} \item Let $m = \floor{\alpha + a}$. Then
$m \leq \alpha + a < m + 1.$ Hence $m - a \leq \alpha < m - a + 1.$
This means that $m - a = \floor{\alpha}$, which is what we wanted.
\item Write ${\alpha}/{n}$ as ${\alpha}/{n} = \floor{{\alpha}/{n}} +
\theta , 0 \leq \theta < 1.$ Since $n\floor{{\alpha}/{n}}$ is an
integer, we deduce by (1) that
$$ \floor{\alpha } = \floor{n\floor{\alpha /n} + n\theta} = n\floor{\alpha /n} + \floor{n\theta}.$$
Now, $0 \leq \floor{n\theta} \leq n\theta < n,$ and so $0 \leq
\floor{n\theta }/n < 1.$ If we let $\Theta = \floor{n\theta }/n,$ we
obtain
$$ \dfrac{\floor{\alpha }}{n} = \floor{\dfrac{\alpha}{n}} + \Theta , \ \ 0 \leq \Theta < 1.$$
This yields the required result. \item From the inequalities $\alpha
- 1 < \floor{\alpha } \leq \alpha, \beta - 1 < \floor{\beta } \leq
\beta$ we get $\alpha + \beta - 2 < \floor{\alpha }+ \floor{\beta }
\leq \alpha + \beta $. Since $\floor{\alpha } + \floor{\beta }$ is
an integer less than or equal to $\alpha  + \beta ,$ it must be less
than or equal to the integral part of $\alpha  + \beta ,$ i.e.
$\floor{\alpha + \beta }.$ We obtain thus $\floor{\alpha } +
\floor{\beta } \leq \floor{\alpha + \beta }.$ Also, $\alpha + \beta$
is less than the integer $\floor{\alpha } + \floor{\beta } + 2$, so
its integer part $\floor{\alpha + \beta }$ must be less than
$\floor{\alpha }+ \floor{\beta} + 2$, but $\floor{\alpha + \beta } <
\floor{\alpha } + \floor{\beta } + 2$ yields $\floor{\alpha + \beta
} \leq \floor{\alpha } + \floor{\beta } + 1$. This proves the
inequalities.
\end{enumerate}
\end{pf}

\begin{df}
The {\em ceiling of a real number $x$} is the unique integer
$\ceil{x}$ satisfying the inequalities
$$ \ceil{x}-1<  x\leq \ceil{x}.$$
\end{df}
\begin{df}
The {\em fractional part of a real number $x$} is defined and
denoted by
$$ \fracpart{x} = x-\floor{x}.$$Observe that $0 \leq
\fracpart{x}<1$.
\end{df}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}Let $A$ and $B$ be non-empty sets of real numbers.
Put $$-A = \{-x: x\in A\}, \qquad A-B = \{a-b: (a, b)\in A\times
B\}.
$$
 Prove
that
\begin{enumerate}
\item If $A$ is bounded above, then $-A$ is bounded below and $\sup A = -\inf
(-A)$.
\item If $A$ and $B$ are bounded above then $A\cup B$ is also
bounded above and $\sup (A\cup B) = \max (\sup A, \sup B)$.
\item  If $A$ is bounded above and $B$ is bounded below, then $A-B$ is bounded above and $\sup (A-B) = \sup A - \inf B$.
\end{enumerate}


\end{pro}
\begin{pro}
Assume that $A$ is a subset of the strictly positive real numbers.
Prove that if $A$ is bounded above, then the set $ A^{-1} =
\{\dfrac{1}{x}: x\in A\}$ is bounded below and that $\sup A =
\dfrac{1}{\inf A^{-1}}$.
\end{pro}

\begin{pro}
Let $n \geq 2$ be an integer. Prove that
$$ \max _{0 \leq x_1 \leq x_2 \leq \cdots \leq x_n \leq 1} \left(\sum _{1 \leq i < j \leq n} (x_j - x_i)\right)  = \floor{\dfrac{n^2}{4}}. $$
\begin{answer}
We have $$\begin{array}{lll}\sum _{1 \leq i < j \leq n} (x_j - x_i)
& = & \sum _{1 \leq i < j \leq n} x_j-\sum _{1 \leq i < j \leq
n}x_i\\
& = & \sum _{j=2} ^n (j-1)x_j-\sum _{i=1} ^{n-1}(n-1)x_i\\
& = & -(n-1)x_1 + \sum _{k=2} ^{n-1}((k-1)-(n-k))x_k+(n-1)x_n\\
& = & -(n-1)x_1-(n-3)x_2-\cdots + (n-3)x_{n-1}+(n-1)x_n.\\
\end{array}$$
This sum is maximal when the negative coefficients of the $x_i$ are
$0$ and the positive coefficients of the $x_i$ are equal to $1$. If
$n$ is even the maximum is $$ 1+3+\cdots + (n-1). $$ If $n$ is odd,
the maximum coefficient is $$2 + 4+\cdots + (n-1). $$The result
follows thus.
\end{answer}

\end{pro}


\begin{pro} Find a non-zero polynomial $P(x, y)$ such that $$P(\floor{2t}, \floor{3t}) = 0$$
for all real $t$. \begin{answer} We claim that $3\floor{2t} -
2\floor{3t} = 0, \pm 1$ or $-2.$ We can then take $$P(x, y) = (3x -
2y)(3x - 2y - 1)(3x - 2y + 1)(3x - 2y + 2).$$

In order to prove the claim, we observe that $\floor{x}$ has unit
period, so it is enough to prove the claim for $t \in [0, 1)$. We
divide $[0; 1[$ as
$$ [0, 1[ = [0; 1/3[ \cup [1/3; 1/2[ \cup [1/2; 2/3[ \cup [2/3;
1[.$$ If $t\in [0, 1/3[,$ then both $\floor{2t}$ and $\floor{3t}$
are $= 0,$ and so $3\floor{2t} - 2\floor{3t} = 0.$ If $t\in [1/3;
1/2[$ then $\floor{3t} = 1$ and $\floor{2t} = 0,$ and so
$3\floor{2t} - 2\floor{3t} = - 2$. If $t\in [1/2; 2/3[,$ then
$\floor{2t} = 1, \floor{3t} = 1$, and so $3\floor{2t} - 2\floor{3t}
= 1.$ If $t\in [2/3; 1[$, then $\floor{2t} = 1, \floor{3t} = 2,$ and
$3\floor{2t} - 2\floor{3t} = -1.$
\end{answer}
\end{pro}

\begin{pro} Prove that the integers $$ \floor{  \left( 1 + \sqrt{2}\right)^n}$$
with $n$ a positive integer, are alternately even or odd.
\begin{answer} By the Binomial Theorem
$$ (1 + \sqrt{2})^{n} + (1 - \sqrt{2})^n = 2 \sum _{0 \leq k \leq n/2} (2)^k
\binom{n}{2k} := 2N,$$an even integer. Since $-1 < 1 - \sqrt{2} <
0$,  it must be the case that $(1 - \sqrt{2})^n$ is the fractional
part of $(1 + \sqrt{2})^n$ or $(1 + \sqrt{2})^n + 1$ depending on
whether $n$ is odd or even, respectively. Thus for odd $n$, $(1 +
\sqrt{2})^n  - 1 < (1 + \sqrt{2})^n + (1 - \sqrt{2})^n < (1 +
\sqrt{2})^n $, whence $(1 + \sqrt{2})^n + (1 - \sqrt{2})^n =
\floor{(1 + \sqrt{2})^n}$, always even, and for $n$ even $2N := (1 +
\sqrt{2})^n + (1 - \sqrt{2})^n = \floor{(1 + \sqrt{2})^n} + 1$, and
so $\floor{(1 + \sqrt{2})^n} = 2N - 1,$ always odd for even $n.$
\begin{exa} Prove that the first thousand digits after the decimal point in $$ (6 + \sqrt{35})^{1980}$$
are all $9$'s. \end{exa} Solution: Reasoning as in the preceding
problem, $$ (6 + \sqrt{35})^{1980} + (6 - \sqrt{35})^{1980} =
2k,$$an even integer.  But $0 < 6 - \sqrt{35} < 1/10$, (for if
$\dfrac{1}{10} < 6 - \sqrt{35}$, upon squaring $3500 < 3481$, which
is clearly nonsense), and hence $ 0 < (6 - \sqrt{35})^{1980} <
10^{-1980} $ which yields
 $$2k - 1 + \underbrace{0.9\ldots 9}_{1979 \ {\rm nines}} = 2k -
 \dfrac{1}{10^{1980}} < (6 + \sqrt{35})^{1980} < 2k,$$
This proves the assertion of the problem.
\end{answer}
\end{pro}
\begin{pro}
Let $x\in \BBR$ and let $n$ be a strictly positive integer. Prove
that
$$ \floor{nx} = \sum _{k=1} ^{n-1} \floor{x+\dfrac{k}{n}}. $$
\end{pro}

\begin{pro}[Putnam 1948] If $n$ is a positive integer, demonstrate
that
$$ \floor{ \sqrt{n} + \sqrt{n + 1} } = \floor{  \sqrt{4n + 2} } .$$ \begin{answer}
By squaring, it is easy to see that $$ \sqrt{4n + 1} < \sqrt{n} +
\sqrt{n + 1} < \sqrt{4n + 3}.$$ Neither $4n + 2$ nor $4n + 3$ are
squares since squares are either congruent to $0$ or $1$ mod $4$, so
$$ \floor{  \sqrt{4n + 2}} = \floor{ \sqrt{4n + 3}},$$ and the result follows.
\end{answer}
\end{pro}
\begin{pro} Find a formula for the $n$-th non-square. \begin{answer} Let $T_n$ be the $n$-th non-square. There is a natural number $m$
such that $m^2 < T_n < (m + 1)^2$. As there are $m$ squares less
than $T_n$ and $n$ non-squares up to $T_n$, we see that $T_n = n +
m.$ We have then $m^2 < n + m < (m + 1)^2$ or $m^2 - m < n < m^2 + m
+ 1.$ Since $n, m^2 - m, m^2 + m + 1$ are all integers, these
inequalities imply $m^2 - m + \dfrac{1}{4} < n < m^2 + m +
\dfrac{1}{4}$, that is to say, $(m - 1/2)^2 < n < (m + 1/2)^2.$ But
then $m = \floor{ \sqrt{n} + \dfrac{1}{2}}.$ Thus the $n$-th
non-square is $T_n = n + \floor{ \sqrt{n} + 1/2}.$
\end{answer}
\end{pro}
\begin{pro}
Prove that if $a, b$ are strictly positive integers then
$$\dfrac{a^2}{b^2}<2\implies \dfrac{(a+2b)^2}{(a+b)^2}<2.$$ Prove,
moreover, that
$$\dfrac{(a+2b)^2}{(a+b)^2}-2<2-\dfrac{a^2}{b^2}.$$This means that
$\dfrac{(a+2b)^2}{(a+b)^2}$  is closer to $2$ than
$\dfrac{a^2}{b^2}$ is.
\begin{answer} Assume on the contrary that
$$\dfrac{(a+2b)^2}{(a+b)^2}\geq 2 \implies a^2+4ab+4b^2\geq 2(a^2+2ab+b^2)\implies 2b^2\geq a^2 \implies \dfrac{a^2}{b^2}\geq 2,  $$
a contradiction. By adding,
$$ \dfrac{a^2}{b^2}<2, \quad \dfrac{(a+2b)^2}{(a+b)^2}<2 \implies  \dfrac{a^2}{b^2}+ \dfrac{(a+2b)^2}{(a+b)^2}<4 \implies  \dfrac{(a+2b)^2}{(a+b)^2}-2<2-\dfrac{a^2}{b^2}.$$

\end{answer}
\end{pro}
\begin{pro}
Shew that $\forall x> 0$, $x$ is farther from $\sqrt{5}$ than
$\dfrac{2x+5}{x+2}$ is.
\begin{answer}
It needs to be proved that $$
\absval{\dfrac{2x+5}{x+2}-\sqrt{5}}<\absval{x-\sqrt{5}}.
$$
\end{answer}
\end{pro}
\begin{pro}[Existence of $n$-th Roots]
Let $a>0$ and let $n\in \BBR$, $n\geq 2$. Prove that there is a
unique $b\in \BBR$ such that $b^n = a$.
\begin{answer}
Consider the set $E=\{x: x>0, x^n<a\}$. Shew that $E$ is bounded
above with supremum $b=\sup E$. Then shew that $b^n=a$ by arguing by
contradiction first against $b^n<a$ and then against $b^n>a$. In the
first case it may be advantageous to prove
$\left(b+\dfrac{a-b^n}{N}\right)^n<a$ for $N$ large enough and use
the Binomial Theorem to establish the inequality. In the second case
consider $b^n\left(1+\dfrac{b^n}{Ma}\right)^{-n}>a$, for integral
$M$ sufficiently large, again using the Binomial Theorem to
establish the inequality.
\end{answer}
\end{pro}
\end{multicols}


\chapter{Topology of $\BBR$}
\section{Intervals}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} In this section
    we give a more precise definition of what an interval is, and
    establish the interesting property that between any two real
    numbers there is always a rational number.
\end{minipage}}
    \end{center}

\begin{df}
An {\em interval}\index{interval} $I$ is a subset of the real
numbers with the following property: if $s\in  I$ and $t\in I$, and
if $s < x < t$, then $x\in I$. In other words, intervals are those
subsets of real numbers with the property that every number between
two elements is also contained in the set. Since there are
infinitely many decimals between two different real numbers,
intervals with distinct endpoints contain infinitely many members.
\end{df}
\begin{rem}
The empty set $\varnothing$ is trivially an interval.
\end{rem}
We will now establish that there are nine types of intervals.
\renewcommand{\arraystretch}{2}
\begin{table}[h]\begin{center}\begin{tabular}{llc}{\bf Interval Notation}  & {\bf Set Notation}  & {\bf Graphical
Representation} \\
 $[a; b]$ & $\{x\in \BBR: a \leq x \leq b\}$\footnote{This is ``the
set of all real numbers $x$ such that $a$ is less than or equal to
$x$, and $x$ is less than or equal to
 $b$.''} & $  \psset{unit=1pc} \psline[linewidth=1.5pt,
linecolor=red]{*-*}(-5,0)(5,0)\uput[d](-5,0){a}\uput[d](5,0){b} $ \\
$]a; b[$ & $\{x\in \BBR: a < x < b\}$ & $  \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{o-o}(-5,0)(5,0)\uput[d](-5,0){a}\uput[d](5,0){b} $ \\
$[a; b[$ & $\{x\in \BBR: a \leq x < b\}$ & $  \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{*-o}(-5,0)(5,0)\uput[d](-5,0){a}\uput[d](5,0){b} $ \\
$]a; b]$ & $\{x\in \BBR: a < x \leq b\}$ & $  \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{o-*}(-5,0)(5,0)\uput[d](-5,0){a}\uput[d](5,0){b} $ \\
$]a; +\infty[$ & $\{x\in \BBR: x>a\}$ & $  \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{o->}(-5,0)(5,0)\uput[d](-5,0){a}\uput[d](5,0){+\infty} $ \\
$[a; +\infty[$ & $\{x\in \BBR: x\geq a\}$ & $  \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{*->}(-5,0)(5,0)\uput[d](-5,0){a}\uput[d](5,0){+\infty} $ \\
$]-\infty; b[$ & $\{x\in \BBR: x< b\}$ & $ \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{<-o}(-5,0)(5,0)\uput[d](-5,0){-\infty}\uput[d](5,0){b} $ \\
$]-\infty; b]$ & $\{x\in \BBR: x\leq b\}$ & $ \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{<-*}(-5,0)(5,0)\uput[d](-5,0){-\infty}\uput[d](5,0){b} $ \\
$]-\infty; +\infty[$ & $\BBR$ & $ \psset{unit=1pc}
\psline[linewidth=1.5pt,
linecolor=red]{<->}(-5,0)(5,0)\uput[d](-5,0){-\infty}\uput[d](5,0){+\infty} $ \\
\end{tabular}\footnotesize\hangcaption{Types of Intervals. Observe that we indicate that the endpoints are included by means of
shading the dots at the endpoints and that the endpoints are
excluded by not shading the dots at the endpoints.}
\label{tab:intervals}\end{center}\end{table}
\begin{rem}
If $x\in \BBR$, then $\{x\}=\lcrc{x}{x}$.
\end{rem}

\begin{thm}
The only kinds of intervals are those sets shewn in Table
\ref{tab:intervals}, and conversely, all sets shewn in this table
are intervals.
\end{thm}
\begin{pf}
The converse is easily established, so assume that $I \subseteqq
\BBR$ possesses the property that $\forall (a, b)\in I^2, \quad
\lcrc{a}{b}\subseteqq I$. Since $\varnothing$ is an interval one may
assume that $I\neq \varnothing$. Let $a\in I$ be a fixed element of
$I$ and put $M_a = \{x\in I: x\leq a\} = \lorc{-\infty}{a}\cap I$
and $N_a = \{x\in I: x\geq a\} = \lcro{a}{+\infty}\cap I$.

\bigskip
If $N_a$ is not bounded above, then $\forall b\in \lcro{a}{+\infty},
\quad \exists c\in N_a$ such that $b\leq c$. Since $a\leq b \leq c$,
this entails that $b\in N_a$. Thus $N_a = \lcro{a}{+\infty}$.

\bigskip

If $N_a$ is  bounded above, then it has supremum $s=\sup (N_a)$ and
$N_a \subseteqq \lcrc{a}{s}$. By Theorem \ref{thm:approx-sup-inf},
$\forall b\in \lcro{a}{s}, \quad c\in N_a$ such that $b \leq c$, and
since $a\leq b \leq c$, this entails that $b\in N_a$. Thus
$$\lcro{a}{s}\subseteqq N_a \subseteqq \lcrc{a}{s},$$ and so $N_a=
\lcro{a}{s}$ or $N_a=\lcrc{a}{s}$.

\bigskip
Thus $N_a$ is one among three possible forms: $\lcro{a}{+\infty}$,
$\lcrc{a}{s}$, or $\lcro{a}{s}$. Applying a similar reasoning, one
obtains gathers that  $M_a$ is of one of the forms
$\lorc{-\infty}{a}$, $\lorc{l}{a}$, or $\lcrc{l}{a}$, where $l=\inf
(M_a)$. Since $I = M_a\cup N_a$, there are $3$ choices for $M_a$ and
$3$ for $N_a$, hence there are $3\cdot 3 = 9$ choices for $I$. The
result is established.
\end{pf}


\begin{exa}
Determine $\bigcap _{k=1} ^\infty
\lcrc{1-\dfrac{1}{2^k}}{1+\dfrac{1}{k}}$.
\end{exa}
\begin{solu} Observe that the intervals are, in sequence,
$$\lcrc{\frac{1}{2}}{2}; \quad \lcrc{\frac{3}{4}}{\frac{3}{2}}; \quad \lcrc{\frac{7}{8}}{\frac{4}{3}};\quad \ldots .    $$
We claim that $\bigcap _{k=1} ^\infty
\lcrc{1-\dfrac{1}{2^k}}{1+\dfrac{1}{k}}=1$. For we see that
$$\forall k\geq 1, \quad \dfrac{1}{2}\leq 1-\dfrac{1}{2^k}< 1 <
1+\dfrac{1}{k}\leq 2,$$ so $1$ is in every interval. Could this
intersection contain a number smaller than $1$? No, for if
$\dfrac{1}{2}\leq a<1$,  then we can take $k$ large enough so that
$$a< 1-\dfrac{1}{2^k},  $$for example
$$ a<1-\dfrac{1}{2^k} \implies k>-\log_2(1-a), $$ so taking $k\geq
\floor{-\log_2(1-a)}+1$ will work. Could the intersection contain a
number $b$ larger than $1$? No, for if $1< b<2$,  then we can take
$k$ large enough so that
$$ 1+\dfrac{1}{k}<b,  $$for example
$$ 1+\dfrac{1}{k}<b \implies k>\dfrac{1}{b-1}, $$ so taking $k\geq
\floor{\dfrac{1}{b-1}}+1$ will work. Hence the only number in the
intersection is $1$.
\end{solu}

\section{Dense Sets}

\begin{df}
A set $B\subseteqq \BBR$ is {\em dense in $A\subseteqq \BBR$} if
$\forall (a_1, a_2)\in A^2$,\quad  $a_1<a_2$, \quad $\exists b\in B$
such that $a_1<b<a_2$, that is, between any two different elements
of $A$ one can always find an element of $B$.
\end{df}

\begin{thm}\label{thm:rationals-are-dense}
$\BBQ$ is dense in $\BBR$.
\end{thm}
\begin{pf}
Let $x, y$ be real numbers with $x<y$. Since there are infinitely
many positive integers, there must be a positive integer $n$ such
that $n> \dfrac{1}{y-x}$ by the Archimedean Property of $\BBR$.
Consider the rational number $r = \dfrac{m}{n}$, where $m$ is the
least natural number with $m>nx$. This means that $$m > nx \geq
m-1.$$ We claim that $x<\dfrac{m}{n}<y$. The first inequality is
clear, since by choice $x<\dfrac{m}{n}$. For the second inequality
observe that, again
$$nx \geq m-1\ \mathrm{and}\ y-x
> \dfrac{1}{n} \implies x> \dfrac{m}{n}-\dfrac{1}{n} \mathrm{and}\ y>x+ \dfrac{1}{n}
\implies y > \dfrac{m}{n}-\dfrac{1}{n} + \dfrac{1}{n} =
\dfrac{m}{n}.
$$Thus $\dfrac{m}{n}$ is a rational number between $x$ and $y$.
\end{pf}
\begin{thm}\label{thm:irrationals-are-dense}
$\BBR\setminus \BBQ$ is dense in $\BBR$.
\end{thm}
\begin{pf}
Let $a<b$ be two real numbers. By Theorem
\ref{thm:rationals-are-dense}, there is a rational number $r$ with
$\dfrac{a}{\sqrt{2}} < r < \dfrac{b}{\sqrt{2}}$. But then $a <
\sqrt{2}r< b$, and the number $\sqrt{2}r$ is an irrational
number.\end{pf}


\begin{thm}[Dirichlet] For any real number $\theta$ and any
integer $Q \geq 1$, there exist integers $a$ and $q$, $1 \leq q \leq
Q$, such that
$$\left|\theta - \frac{a}{q}\right| \leq \frac{1}{qQ}.$$
\label{thm:dirichlet_1}\end{thm}\begin{pf} For $1 \leq n \leq Q$,
let
$$I_n = \left[\frac{n - 1}{Q};\frac{n}{Q}\right[.$$ Thus these $Q$
intervals partition  the interval $[0; 1[$. The $Q + 1$ numbers
$$\{0\theta\}, \{1\theta\}, \{2\theta\}, \ldots , \{Q\theta\}$$lie
in $[0;1[$. Hence by the pigeonhole principle there is an $n$ such
that $I_n$ contains at least two of these numbers, say
$$\{q_1\theta\}\in I_n ,\ \ \  \{q_2\theta\}\in I_n, \ \ \ 0 \leq q_1 < q_2 \leq
Q.$$ Put $q = q_2 - q_1$, $a = [q_2\theta] - [q_1\theta]$. Since
$\{q_1\theta\}\in I_n, \{q_2\theta\}\in I_n$ we must have
$$\left|\{q_2\theta\} - \{q_1\theta\}\right| < \frac{1}{Q}.$$But
$$\{q_2\theta\} - \{q_1\theta\} = q_2\theta - [q_2\theta] - q_1\theta +  [q_1\theta] = q\theta - a, $$
whence the result.
\end{pf}

\begin{cor}
If $\theta$ is irrational prove that there exist infinitely many
rational numbers $\dis{\frac{a}{q}, \ \ \gcd (a, q) = 1}$, such that
$ \theta$ lies in the open intervals $
\loro{\dfrac{a}{q}-\dfrac{1}{q^2}}{\dfrac{a}{q}+\dfrac{1}{q^2}}$.
\label{cor:dirichlet_1}

\end{cor}
\begin{pf}
Suppose that $\dis{\left|\theta - \frac{a_r}{q_r}\right| <
\frac{1}{q_r ^2}}$ for $1 \leq r \leq R$. Since the differences
$\dis{\theta - \frac{a_r}{q_r}}$ are non-zero, we may choose $Q$ so
large in Theorem \ref{thm:dirichlet_1} that none of these rational
numbers is a solution of $\dis{\left|\theta - \frac{a}{q}\right| <
\frac{1}{qQ}}$. Since this latter inequality does have a solution,
the $R$ given rational approximations do not exhaust the set of
solutions of $\dis{\left|\theta - \frac{a}{q}\right| <
\frac{1}{q^2}}$.
\end{pf}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Determine $\bigcap _{1 \leq k \leq 500}\lcrc{k}{1001-k}$.
\begin{answer}
$\lcrc{500}{501}$.
\end{answer}
\end{pro}
\begin{pro}
Determine $\bigcup _{k=1} ^\infty \lcrc{1}{1+\dfrac{1}{k}}$.
\begin{answer}
$\lcrc{1}{2}$.
\end{answer}
\end{pro}

\begin{pro}
Determine $\bigcup _{k=1} ^\infty \lcrc{-k}{k}$.
\begin{answer}
$\BBR$.
\end{answer}
\end{pro}
\begin{pro}
Determine $\bigcap _{k=1} ^\infty \lcrc{1}{1+\dfrac{1}{k}}$.
\begin{answer}
$\{1\}$.
\end{answer}
\end{pro}
\begin{pro}
Determine $\bigcap _{k=1} ^\infty \lcro{k}{+\infty}$.
\begin{answer}
$\varnothing$.
\end{answer}
\end{pro}
\begin{pro}
Determine $\bigcap _{k=1} ^\infty \lorc{1}{1+\dfrac{1}{k}}$.
\begin{answer}
$\varnothing$.
\end{answer}
\end{pro}
\begin{pro}
Let $I = \lcrc{a}{b}$, and $I' = \lcrc{a'}{b'}$ be closed intervals
in $\BBR$. Prove that $I \subseteqq I'$ if and only if $a'\leq a$
and $b\leq b'$.
\end{pro}
\begin{pro}
Let
$$\BBQ + \sqrt{2}\BBQ = \{a + \sqrt{2}b : (a, b)\in \BBQ^2\}$$
and define addition on this set as
$$(a + \sqrt{2}b) + (c + \sqrt{2}d) = (a + c) + \sqrt{2}(b +
d),$$and multiplication as
$$(a + \sqrt{2}b)  (c + \sqrt{2}d) = (ac + 2bd) + \sqrt{2}(ad +
bc).$$Then $\field{\BBQ + \sqrt{2}\BBQ}{\cdot}{+}$ is a field.
\begin{answer}Closure is immediate. Most of the other axioms are inherited
from the larger set $\BBR$. Observe $0_F = 0$, $1_F = 1$ and the
multiplicative inverse of $a + \sqrt{2}b, (a, b) \neq (0, 0)$ is
$$(a + \sqrt{2}b)^{-1} = \frac{1}{a + \sqrt{2}b} = \frac{a - \sqrt{2}b}{a^2 - 2b^2} = \frac{a}{a^2 - 2b^2} -
\frac{\sqrt{2}b}{a^2 - 2b^2}. $$ Here $a^2 - 2b^2 \neq 0$ since
$\sqrt{2}$ is irrational.
\end{answer}
\end{pro}

\begin{pro}
Put $D=\{x: x=q^2\quad \mathrm{or} \quad x=-q^2, \quad q\in \BBQ\}$.
Prove that $D$ is dense in $\BBR$.
\begin{answer}Assume $(a, b)\in \BBR^2$ with $a<b$.
If $ab<0$, then $0\in D$ is between $a$ and $b$. If $0<a<b$ then
$\sqrt{a}< \sqrt{b}$, and since $\BBQ$ is dense in $\BBR$, there is
a rational number $r$ such that $\sqrt{a}<r<\sqrt{b}\implies a < r^2
< b$. If $a < b < 0$, then $\sqrt{-b}< \sqrt{-a}$, and since $\BBQ$
is dense in $\BBR$, there is a rational number $s$ such that
$\sqrt{-b}<s<\sqrt{-a}\implies -b < s^2 < -a \implies a < -s^2 < b$.
\end{answer}
\end{pro}
\begin{pro}
A {\em dyadic rational} is a rational number of the form
$\dfrac{m}{2^n}$, where $m\in\BBZ$, $n\in \BBN$. Prove that the set
of dyadic rationals is dense in $\BBR$.
\begin{answer}
Assume $(a, b)\in \BBR^2$ with $a<b$. There is a strictly positive
integer $n$ such that $n>\dfrac{1}{b-a}$. Thus $$0 <
\dfrac{1}{2^n}<\dfrac{1}{n}<b-a.$$Put $m = \floor{2^na}+1$, and so
by definition $m-1 \leq 2^nx<m$. Hence $$ a< \dfrac{m}{2^n}\leq a +
\dfrac{1}{2^n}<a+\dfrac{1}{n}< a+b-a=a.
$$
\end{answer}
\end{pro}





\end{multicols}
\section{Open and Closed Sets}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} Many of the
    properties that we will study in these notes generalise to sets
    other than $\BBR$. To better understand what is it from the features of $\BBR$ that is essential for a generalisation,
    the language of topology is used.
\end{minipage}}
    \end{center}

\begin{df}
The {\em open ball $\ball{x_0}{r}$ centred at $x=x_0$ and radius
$\varepsilon
>0$} is the set$$\ball{x_0}{\varepsilon}=\loro{x_0-\varepsilon}{x_0+\varepsilon}.  $$
\end{df}
\begin{df}
A set $\N{x_0}\subseteqq \BBR$ is an  {\em open neighbourhood of a
point $x_0$} if $\exists \varepsilon >0$ such that
$\ball{x_0}{r}\subseteqq \N{x_0}$, that is, there is a sufficiently
small open ball containing $x_0$ completely contained in $\N{x_0}$.
\end{df}
\begin{df} A set  $U\subseteqq\BBR$  is said to be  {\em open in $\BBR$} if
$\forall\ x\in U$ there is an open neighbourhood $\N{x_0}$ such that
$\N{x_0}\subseteqq U$. A set $F\subseteq \BBR$ is said to be {\em
closed in $\BBR$} if its complement $U=\BBR\setminus F$ is open in
$\BBR$.
\end{df}
\begin{thm}\label{thm:open-balls-are-open}
Every open ball is open.
\end{thm}
\begin{pf}
Let $\ball{x_0}{r}$ with $r>0$ be an open ball and let $x\in
\ball{x_0}{r}$. We must shew that there is a sufficiently small
neighbourhood of $x$ completely within  $\ball{x_0}{r}$ . That is,
we search for $\varepsilon > 0$ such that $y\in
\ball{x}{\varepsilon}\implies y\in \ball{x_0}{r}$. Now,
$$y\in
\ball{x}{\varepsilon}\implies y \in\ball{x_0}{r} \iff
|y-x|<\varepsilon \implies |y-x_0|<r.
$$By the Triangle Inequality
$$|y-x_0|\leq |y-x|+|x-x_0|<\varepsilon + |x-x_0|. $$So, as long as
$$\varepsilon + |x-x_0| < r, $$we will be within $\ball{x_0}{r}$.
One can take $$ \varepsilon = \dfrac{r-|x-x_0|}{2}. $$
\end{pf}
\begin{exa}
The open intervals  $\loro{a}{b}$, $\loro{a}{+\infty}$,
$\loro{-\infty}{b}$, $\loro{-\infty}{+\infty}$, are open in $\BBR$.

\bigskip

The closed intervals $\{a\}$, $\lcrc{a}{b}$, $\lcro{a}{+\infty}$,
$\lorc{-\infty}{b}$, $\loro{-\infty}{+\infty} = \BBR$, are closed in
$\BBR$.

\bigskip

The sets $\varnothing$ and $\BBR$ are simultaneously open and closed
in $\BBR$.

\bigskip

The intervals $\lorc{a}{b}$ and $\lcro{a}{b}$ are neither open nor
closed in $\BBR$.
\end{exa}

\begin{thm}The union of any (finite or infinite) number  of open sets in $\BBR$ is open in $\BBR$. The union
of a finite number of closed in $\BBR$ sets is closed in $\BBR$.

\bigskip

The intersection of a finite number of open sets in $\BBR$ is open
in $\BBR$. The intersection of any (finite or infinite) number of
closed sets in $\BBR$ is closed in $\BBR$.
\end{thm}

\begin{pf}
Let $U_1, U_2, \ldots,$ be a sequence of open sets in $\BBR$ (some
may be empty) and consider $x\in \bigcup _{n=1} ^\infty U_n$. There
is an index $N$ such that $x\in U_N$. Since $U_N$ is open in $\BBR$,
there is an open neighbourhood of $x$ \quad
$\loro{x-\varepsilon}{x+\varepsilon}\subseteqq U_N$, for
$\varepsilon >0$ small enough. But then
$$\loro{x-\varepsilon}{x+\varepsilon}\subseteqq U_N\subseteqq \bigcup _{n=1} ^\infty U_n,
$$and so given an arbitrary point of the union, there is a small
enough open neighbourhood enclosing the point and within the union,
meaning that the union is open.

\bigskip

If $\bigcap _{n=1} ^\infty F_n$ is an arbitrary intersection  of
closed sets, then there are open sets $U_n = \BBR\setminus F_n$. By
the De Morgan Laws, $$\bigcap _{n=1} ^\infty F_n =\bigcap _{n=1}
^\infty (\BBR\setminus U_n) = \BBR \setminus \bigcup _{n=1} ^\infty
U_n,$$and since $\bigcup _{n=1} ^\infty U_n$ is open by the above
paragraph, $\bigcap _{n=1} ^\infty F_n$ is the complement of an open
set, that is, it is closed.


\bigskip
Let $U_1, U_2, \ldots, U_L$ be  a sequence of open sets in $\BBR$
and consider $x\in \bigcap _{n=1} ^L U_n$. Then   $x$ belongs to
each of the $U_k$ and so there are $\varepsilon _k > 0$ such that
$x\in \loro{x-\varepsilon_k}{x+\varepsilon_k}\subseteqq U_k$. Let
$\varepsilon = \min _{1\leq k \leq L} \varepsilon _k$ be the
smallest one of such. But then for all $k$,
$$\loro{x-\varepsilon}{x+\varepsilon}\subseteqq \loro{x-\varepsilon_k}{x+\varepsilon_k}
\subseteqq U_k, \implies
\loro{x-\varepsilon}{x+\varepsilon}\subseteqq \bigcap _{n=1} ^L U_n,
$$and so given an arbitrary point of the intersection, there is a small
enough open neighbourhood enclosing the point and within the
intersection, meaning that the intersection is open.

\bigskip

Using the De Morgan Laws and the preceding paragraph, the remaining
statement can be proved. \end{pf}
\begin{exa}
The intersection of an infinite number of open sets may not be open.
For example $$ \bigcap _{k=1} ^\infty
\loro{1-\dfrac{1}{n+1}}{2-\dfrac{1}{n+1}} = \lcro{1}{2},$$ which is
neither open nor closed.\end{exa}

\begin{thm}[Characterisation of the Open Sets of
$\BBR$]\label{thm:character-open-in-R} A set $A\subseteqq \BBR$ is
open if an only if it is the countable union of open sets of $\BBR$.
\end{thm}
\section{Interior, Boundary, and Closure of a Set}

\begin{df}
Let $A\subseteqq \BBR$. The {\em interior} of $A$ is defined and
denoted by $$\interiorone{A} = \bigcup _{\substack{\Omega \subseteqq
A\\ \Omega\ \mathrm{open}}} \Omega,
$$that is, the largest open set inside $A$. The points of $\interiorone{A}$ are
called the {\em interior points of $A$}.
\end{df}
\begin{df}
Let $A\subseteqq \BBR$. The {\em closure} of $A$ is defined and
denoted by
$${\closure{A}} = \bigcup _{\substack{\Omega \supseteqq A\\ \Omega\
\mathrm{closed}}}\Omega,
$$that is, the smallest closed set containing $A$. The points of $\closure{A}$ are
called the {\em adherence points of $A$}.
\end{df}

\begin{rem}
One always has $\interiorone{A}\subseteqq A\subseteqq \closure{A}$.
A set $U$ is open if and only if $U=\interiorone{U}$. A set $F$ is
closed if and only if $F=\closure{F}$.
\end{rem}

\begin{df}
Let $A\subseteqq \BBR$. The {\em boundary} of $A$ is defined and
denoted by
$$\bdy{A} = \closure{A}-\interiorone{A}.
$$The elements of $\bdy{A}$ are called the {\em boundary points} of
$A$.
\end{df}
\begin{exa}We have
\begin{enumerate}
\item $\interior{\lorc{0}{1}} = \loro{0}{1}$, \quad $\closure{\lorc{0}{1}} =
\lcrc{0}{1}$, \quad $\bdy{\lorc{0}{1}} = \{0,1\}$
\item $\interior{\{0,1\}} = \varnothing$, \quad $\closure{\{0,1\}} =
\{0,1\}$, \quad $\bdy{\{0,1\}} = \{0,1\}$
\item $\interiorone{\BBQ} = \varnothing$, \quad $\closure{\BBQ} =
\BBR$, \quad $\bdy{\BBQ} = \BBR$
\end{enumerate}
\end{exa}

\begin{thm}\label{thm:demorgan-interior-closure}
Let $A\subseteqq \BBR$. Then $$ \BBR\setminus \interiorone{A} =
\closure{\BBR\setminus A}, \qquad \BBR\setminus \closure{A} =
\interior{\BBR\setminus A}.
$$
\end{thm}
\begin{pf}
The theorem follows from the De Morgan Laws, as
$$\BBR\setminus \interiorone{A} = \BBR \setminus  \bigcup _{\substack{\Omega \subseteqq
A\\ \Omega\ \mathrm{open}}} \Omega = \bigcap _{\substack{\Omega
\subseteqq A\\ \Omega\ \mathrm{open}}} \left(\BBR\setminus \Omega
\right) =\bigcap _{\substack{\BBR\setminus A \subseteqq \BBR\setminus \Omega\\
\Omega\ \mathrm{open}}} \left(\BBR\setminus \Omega \right) =\bigcap _{\substack{\BBR\setminus A \subseteqq F\\
F\ \mathrm{closed}}} F = \closure{\BBR\setminus A},  $$and
$$\BBR\setminus \closure{A} = \BBR \setminus  \bigcap _{\substack{F \supseteqq
A\\ F\ \mathrm{closed}}} F = \bigcup _{\substack{F \supseteqq A\\
F\ \mathrm{closed}}} \left(\BBR\setminus F
\right) =\bigcup _{\substack{\BBR\setminus A \supseteqq \BBR\setminus F\\
F\ \mathrm{closed}}} \left(\BBR\setminus F \right) =\bigcap _{\substack{\BBR\setminus A \supseteqq \Omega\\
\Omega\ \mathrm{open}}} \Omega = \interior{\BBR\setminus A}.  $$

\end{pf}

\begin{thm}\label{thm:belonging-to-closure}
$x\in\closure{A} \iff \forall \N{x}$,\quad $\N{x}\cap A \neq
\varnothing$. That is, $x$ is an adherent point if and only if every
neighbourhood of $x$ has a nonempty intersection with $A$.
\end{thm}
\begin{pf}
Assume $x\in\closure{A}$ and let $r>0$. If $\loro{x-r}{x+r}\cap A =
\varnothing$, then $\loro{x-r}{x+r}\subseteqq \BBR\setminus A$.
Since  $\loro{x-r}{x+r}$ is open, we have---in particular---
$\loro{x-r}{x+r}\subseteqq \interior{\BBR\setminus A} =
\BBR\setminus \closure{A}$ by Theorem
\ref{thm:demorgan-interior-closure}. This means that $x\not\in
\closure{A}$, a contradiction.

\bigskip

Conversely, assume that for all neighbourhoods $\N{x}$ of $x$ we
have $\N{x}\cap A \neq \varnothing$.  If $x\not\in \closure{A}$ then
$x\in \BBR\setminus \closure{A} = \interior{\BBR\setminus A}$. Since
$\interior{\BBR\setminus A}$ is open there is an $r'>0$ such that
$\loro{x-r'}{x+r'}\subseteqq \interior{\BBR\setminus A}\subseteqq
\BBR\setminus A$. But then $\loro{x-r'}{x+r'}\cap A = \varnothing$,
a contradiction.
\end{pf}
\begin{thm}\label{thm:sup-belongs-to-closed}
Let $\varnothing \varsubsetneqq A\subseteqq \BBR$ be bounded above.
Then $\sup {A}\in \closure{A}$. If, moreover, $A$ is closed then
$\sup (A)\in A$.
\end{thm}
\begin{pf}
Let $r>0$. By Theorem \ref{thm:approx-sup-inf}, there exists $a\in
A$ such that $\sup (A)-r<a$, which gives $\absval{\sup (A)-a}<r$.
This shews that $\loro{\sup{A}-r}{\sup{A}+r}\cap A \neq \varnothing$
regardless of how small $r>0$ might be and, hence, $\sup (A)\in
\closure{A}$ by Theorem \ref{thm:belonging-to-closure}. If $A$ is
closed, then $A=\closure{A}$.
\end{pf}

\begin{df}
Let $A\subseteqq \BBR$. A point $x\in A$ is called an {\em isolated
point of $A$} if there exists an $r>0$ such that $\ball{x}{r}\cap A
= \{x\}$. The set of isolated points of $A$ is denoted by $A^*$.

\bigskip
 A point $y \in \BBR$ is called an {\em accumulation point
of $A$ in $\BBR$} if
$$\forall \N{x}, \quad (\N{x}\setminus \{x\})\cap A \neq \varnothing ,
$$that is, if any neighbourhood of $x$ meets $A$ at a point
different than $x$. The set of accumulation points of $A$ is called
the {\em derived set of $A$} and is denoted by $\acc{A}$.\end{df}

\begin{exa}We have
\begin{enumerate}
\item $0$ is an isolated point of the set $A = \{0\}\cup
\lcrc{1}{2}$.
\item Every point of the set $A = \left\{1, \dfrac{1}{2}, \dfrac{1}{3}, \ldots
\right\}$ is isolated. This is because we may take $r =
\dfrac{1}{2^{n+2}}$ in the definition of isolated point, and then
$\loro{\dfrac{1}{n}-\dfrac{1}{2^{n+2}}}{\dfrac{1}{n}+\dfrac{1}{2^{n+2}}}
\cap A = \left\{\dfrac{1}{n}\right\}$. Observe that
$\dfrac{1}{n}-\dfrac{1}{n+1} = \dfrac{1}{n(n+1)}$ and
$\dfrac{1}{n-1}-\dfrac{1}{n} = \dfrac{1}{n(n-1)}$ and that $2^{n+2}>
\max (n(n+1), n(n-1))$.
\item $0$ is an accumulation point of $A = \left\{1, \dfrac{1}{2}, \dfrac{1}{3}, \ldots
\right\}$.
\end{enumerate}
\end{exa}

\begin{thm}\label{thm:accu-points-infinite-meet}
$x$ is an accumulation point of $A$ if and only if every
neighbourhood of $x$ in $\BBR$ has an infinite number of points of
$A$.
\end{thm}
\begin{pf}
Suppose $x\in A'$. Suppose a neighbourhood of $x$ had only finitely
many elements of $A$, say $\{y_1, y_2, \ldots , y_n\}$. Take
$2r=\min _{1 \leq k \leq n} \absval{y_k-x}$. Then
$\left(\loro{x-r}{x+r}\setminus \{x\}\right)\cap A = \varnothing$
contradicting the fact that every neighbourhood of $x$ meets $A$ at
a point different from $x$.

\bigskip

Conversely if every neighbourhood of $x$ in $\BBR$ has an infinite
number of points of $A$, then a fortiori, any intersection of such a
neighbourhood with $A$ will contain a point different from $x$, and
so $x\in \acc{A}$.
\end{pf}

\begin{thm}
A set is closed if and only if it contains all its accumulation
points.
\end{thm}
\begin{pf}
If $A$ is closed then $\BBR \setminus A$ is open. If $c\in \BBR
\setminus A$ then there exists $r>0$ such that
$\loro{c-r}{c+r}\subseteqq \BBR \setminus A$, a neighbourhood that
clearly does not contain any points of $A$, which means $c\not\in
\acc{A}$.

\bigskip

Conversely, suppose a set $\acc{A}\subseteqq A$. We will prove that
$\BBR\setminus A$ is open. If $x\in \BBR\setminus A$, then {\em a
fortiori}, $x\not\in \acc{A}$. This means that there is an $r>0$
such that $\loro{x-r}{x+r}\cap A = \varnothing$. Hence
$\loro{x-r}{x+r}\subseteqq \BBR\setminus A$, and so $\BBR\setminus
A$ is open.
\end{pf}


\begin{rem}
One has
$$A^* \subseteqq A, \quad \closure{A}-A \subseteqq \acc{A}, \quad A^* \cap \acc{A} = \varnothing, \quad A^* \cup \acc{A} = \closure{A}.  $$
\end{rem}

\section{Connected Sets}
\begin{df}
A set $X\subseteqq \BBR$ is connected if, given open sets $U, V$ of
$\BBR$ with $U\cup V = X$, $U\cap V = \varnothing$, either
$U=\varnothing$ or $V=\varnothing.$
\end{df}
\begin{thm}
If $X\subseteqq \BBR$ is connected, and if $(a, c)\in X^2$,
$b\in\BBR$, are such that $a<b< c$ then $b\in X$.
\end{thm}

\begin{cor}\label{cor:R-is-connected}The only connected sets of $\BBR$ are the intervals. In particular, $\BBR$ is connected.
\end{cor}

\section{Compact Sets}

\begin{df}
A sequence of open sets $U_1, U_2, \ldots$ is said to be an {\em
open cover} for $A\subseteqq \BBR$ if $A\subseteqq \bigcup _{n=1}
^\infty U_n$. $U_1, U_2, \ldots$ has a {\em subcover} $U_{k_1},
U_{k_2}, \ldots$ of $A$ if $A\subseteqq \bigcup _{n=1} ^\infty
U_{k_n}$.
\end{df}
\begin{df}
A set of real numbers is said to be {\em compact in $\BBR$} if every
open cover of the set has a finite subcover.\footnote{This
definition is appropriate for $\BBR$ but it is not valid in general.
However, it very handy for one-variable calculus, hence we will
retain it.}
\end{df}
\begin{exa}Since $\BBR = \bigcup _{n\in\BBZ} \loro{n-1}{n+1}$, the
sequence of intervals $\loro{n-1}{n+1}, \quad n\in\BBZ$ is a cover
for $\BBR$.
\end{exa}
\begin{thm}\label{thm:[ab]-is-compact}
Let $a, b$ be real numbers with $a\leq b$. The closed interval
$\lcrc{a}{b}$ is compact in $\BBR$.
\end{thm}
\begin{pf}Let $U_1, U_2, \ldots $ be an open cover for
$\lcrc{a}{b}$. Let $E$ be the collection of all $x\in \lcrc{a}{b}$
such that $\lcrc{a}{x}$ has a finite subcover from the $U_i$. We
will shew that $b\in E$.

\bigskip

 Since $a\in \bigcup _{i=1} ^\infty U_i$, there exists $U_r$ such that $a\in U_r$. Thus $\{a\}=\lcrc{a}{a}\subseteqq U_r$
and so  $E\neq \varnothing$. Clearly, $b$ is an upper bound for $E$.
By the Completeness Axiom, $\sup E$ exists. We will shew that $b =
\sup E$.

\bigskip

By Theorem \ref{thm:sup-belongs-to-closed},  $\sup E \in \lcrc{a}{b}
\subseteqq \bigcup _{i=1} ^\infty U_i$, hence there exists $U_s$
such that $\sup E \in U_s$. Since $U_s$ is open, there exists
$\varepsilon >0$ such that $\loro{\sup E-\varepsilon}{\sup E +
\varepsilon}\subseteqq U_s$. By Theorem \ref{thm:approx-sup-inf}
there is $x\in E$ such that  $\sup E - \varepsilon < x \leq  \sup
E$. Thus there is a finite subcover from the $U_i$, say, $U_{p_1}$
$U_{p_2}$, \ldots , $U_{p_n}$ such that $\lcrc{a}{x}\subseteqq
\bigcup _{i=1} ^n U_{k_i}$.

\bigskip

We thus have $$ \lcrc{a}{\sup E}\subseteqq \lcrc{a}{x}\bigcup
\loro{\sup E-\varepsilon}{\sup E + \varepsilon}\subseteqq
\left(\bigcup _{i=1} ^n U_{k_i}\right) \cup U_s,
$$a finite subcover. This means that $\sup E \in E$.

\bigskip

Suppose now that $\sup E < b$, and consider $y = \sup E +
\dfrac{1}{2}\min (b - \sup E, \varepsilon)$. Then
$$ \sup E < y, \qquad \lcrc{a}{y}=\lcrc{a}{\sup E}\cup \lcrc{\sup E}{y}
\subseteqq  \left(\bigcup _{i=1} ^n U_{k_i}\right) \cup U_s,$$
whence $y\in E$, contradicting the definition of $\sup E$. This
proves that $\sup E = b$ and finishes the proof of the theorem.
\end{pf}
\begin{thm}[Heine-Borel]\label{thm:heine-borel} A set $A$ of $\BBR$
is closed and bounded if and only if it is compact.
\end{thm}

\begin{pf}
Let $A$ be closed and bounded in $\BBR$, and let $U_1, U_2, \ldots
,$ be an open cover for $A$. There exist $(a, b)\in \BBR^2$, $a\leq
b$, such that $A\subseteqq \lcrc{a}{b}$. Since
$$\lcrc{a}{b}\subseteqq (\BBR \setminus A)\cup\bigcup _{i=1} ^\infty
U_i,
$$by Theorem \ref{thm:[ab]-is-compact} there is a finite subcover
of the $U_i$, say, $U_{k_i}$ such that
$$\lcrc{a}{b}\subseteqq (\BBR \setminus A)\cup\bigcup _{i=1} ^\infty
U_{k_i}.
$$Therefore
$$A=A\cap \lcrc{a}{b}\subseteqq \lcrc{a}{b}\subseteqq \bigcup _{i=1} ^\infty
U_{k_i},
$$and so $A$ admits an open subcover.

\bigskip

Conversely, suppose that every open cover of $A$ admits a finite
subcover. The open cover $\loro{-n}{n}, n\in\BBR$ of $A$ must admit
a finite subcover by our assumption, hence there is $N\in \BBN$ such
that $A\subseteqq \loro{-N}{N}$, meaning that $A$ is bounded. Let us
shew now that $\BBR \setminus A$ is open.

\bigskip

Let $x\in \BBR\setminus A$. We have
$$ \bigcup  _{n\geq 1} \left(\BBR\setminus \lcrc{x-\frac{1}{n}}{x+\frac{1}{n}}\right) = \BBR \setminus \bigcap  _{n\geq 1}
\lcrc{x-\frac{1}{n}}{x+\frac{1}{n}} =\BBR\setminus \{x\} \supseteqq
A,$$since $x\not\in A$. By hypothesis there is $N\in\BBN$ and $n_1,
n_2, \ldots , n_N$ such that
$$A\subseteqq \bigcup _{k=1} ^N \left(\BBR\setminus \lcrc{x-\frac{1}{n_k}}{x+\frac{1}{n_k}}\right) \subseteqq
\BBR\setminus \lcrc{x-\frac{1}{n_m}}{x+\frac{1}{n_m}},$$where
$m=\max (n_1, n_2, \ldots , n_N)$. This gives $
\lcrc{x-\frac{1}{n_m}}{x+\frac{1}{n_m}}\subseteqq \BBR\setminus A$,
meaning that $\BBR\setminus A$ is open, whence $A$ is closed.

\end{pf}
\begin{cor}[Cantor's Intersection Theorem]
Let $$\lcrc{a_1}{b_1}\supseteqq\lcrc{a_2}{b_2}\supseteqq
\lcrc{a_3}{b_3}\supseteqq\ldots$$be a sequence of non-empty,
bounded, nested closed intervals. Then
$$ \bigcap _{j=1} ^\infty \lcrc{a_j}{b_j} \neq \varnothing . $$
\end{cor}
\begin{pf}Assume that $\lcrc{a_1}{b_1}\cap  \bigcap _{j=2} ^\infty \lcrc{a_j}{b_j} =\varnothing$. Then
  $$\lcrc{a_1}{b_1}\subseteqq \BBR\setminus  \bigcap _{j=2} ^\infty \lcrc{a_j}{b_j} =\bigcup _{j=2} ^\infty \left(\BBR\setminus \lcrc{a_j}{b_j}\right).$$
The $\BBR\setminus \lcrc{a_j}{b_j}$ for an open cover for
$\lcrc{a_1}{b_1}$, which is closed and bounded. By Theorem
\ref{thm:mono-reversing} we have $$\lcrc{a_{j}}{a_{j}} \subseteqq
\lcrc{a_{i}}{b_{i}} \implies \BBR\setminus
\lcrc{a_{i}}{b_{i}}\subseteqq \BBR\setminus \lcrc{a_{j}}{b_{j}}.$$
By the Heine-Borel Theorem \ref{thm:heine-borel} there is a finite
subcover, say
$$\lcrc{a_1}{b_1}\subseteqq\bigcup _{j=1} ^N \left(\BBR\setminus \lcrc{a_{n_j}}{b_{n_j}}\right)\subseteqq
\BBR\setminus \lcrc{a_{n_N}}{b_{n_N}}.$$But then $
\lcrc{a_{n_N}}{b_{n_N}}\subseteqq \BBR\setminus
\lcrc{a_{1}}{b_{1}}$, which contradicts $
\lcrc{a_{n_N}}{b_{n_N}}\subseteqq\lcrc{a_{1}}{b_{1}}$, and the proof
is complete.\end{pf}

\begin{thm}[Bolzano-Weierstrass]\label{thm:bolzano-weierstrass}
Ever bounded infinite set of $\BBR$ has at least one accumulation
point.
\end{thm}

\begin{pf}
Let  $A$ be a bounded  set of $\BBR$  with $\acc{A}=\varnothing$.
Then $A^* = A = \closure{A}$. Notice that then every element of $A$
is an isolated point of $A$, and hence,

$$\forall a\in A, \quad \exists r_a>0, \quad \mathrm{such\ that}\ \quad \loro{a-r_a}{a+r_a} \cap A = \{a\}.  $$
Observe that $$A\subseteqq \bigcup _{a\in A}  \loro{a-r_a}{a+r_a},
$$and so the $ \loro{a-r_a}{a+r_a}$ form an open cover for $A$. Since $A=\closure{A}$, $A$ is closed.
By the Heine-Borel Theorem \ref{thm:heine-borel}  $A$ has a finite
subcover from among the $ \loro{a-r_a}{a+r_a}$ and so there exists
an integer $N>0$ and $a_i$ such that
$$ A\subseteqq \bigcup _{i=1} ^N  \loro{a_i-r_{a_i}}{a_i+r_{a_i}}. $$
Since $$A=A\cap \bigcup _{i=1} ^N  \loro{a_i-r_{a_i}}{a_i+r_{a_i}} =
\bigcup _{i=1} ^N \{a_i\},
$$$A$ has only $N$ elements and thus it is finite.
\end{pf}

\begin{thm}Let $X\subseteqq \BBR$. Then the following are
equivalent.
\begin{enumerate}
\item $X$ is compact.
\item $X$ is closed and bounded.
\item every infinite set of $X$ has an accumulation point.
\item every infinite sequence of $X$ has a converging subsequence in
$X$.
\end{enumerate}
\label{thm:equivalent-statements-for-compactness}
\end{thm}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Give an example shewing that the union of an infinite number of
closed sets is not necessarily closed.
\end{pro}
\begin{pro}
Prove that a set $A\subseteqq \BBR$ is dense if and only if
$\closure{A}=\BBR$.
\end{pro}
\begin{pro}
For any set $A\subseteqq \BBR$ prove that $\bdy{A} =
\bdy{\BBR\setminus A}$.
\end{pro}
\begin{pro}
Let $A\neq \varnothing$ be a subset of $\BBR$. Assume that $A$ is
bounded above. Prove that $\sup (A) = \sup (\closure{A})$.
\end{pro}

\begin{pro}
Demonstrate that the only subsets of $\BBR$ which are simultaneously
open and closed in $\BBR$ are $\varnothing$ and $\BBR$. One codifies
this by saying that $\BBR$ is {\em connected}.
\end{pro}
\begin{pro}\label{pro:additive-closed-groups}
 Prove that the closed
additive subgroups of the real numbers are (i) just zero; or (ii)
all integral multiples of a fixed non-zero number (which may be
assumed positive); or (iii) or all reals.
\begin{answer}
For the proof of this let $G$ be such a set  (so that $x + y$ is in
$G$ if $x, y$ are, and $G$ is closed), and suppose that we are not
in cases (i) or (ii). Then it is enough to show that $G$ contains
arbitrarily small positive numbers, for then multiples of these will
be dense in $\BBR$ , but $G$  being closed forces $G = \BBR$. To
achieve this let $\mathscr{I} = \inf \{x:x\in G, x > 0\}$. If
$\mathscr{I} = 0$ we are done; but if $\mathscr{I} > 0$ there cannot
be numbers $x\in G$ arbitrarily close to and greater than
$\mathscr{I}$, for then $x - \mathscr{I}$ would run through small
positive members of $G$, in particular smaller than $\mathscr{I}$,
contradicting its definition. This means that $\mathscr{I}$ belongs
itself to $G$, and from there it is easy to see that we are in case
(ii) contrary to the assumption. Hence indeed $\mathscr{I} = 0$, $G
= \BBR$.
\end{answer}
\end{pro}


\begin{pro}
Let $A\in \BBR$. Prove the following
\begin{multicols}{2}
\begin{enumerate}
\item $\closure{\closure{A}} = \closure{A}$
\item $\interiorone{\interiorone{A}} = \interiorone{A}$
\item $A\subseteqq B \implies \closure{A}\subseteqq \closure{B}$
\item $A\subseteqq B \implies \interiorone{A}\subseteqq \interiorone{B}$
\item $\closure{A\cup B} =\closure{A}\cup \closure{B}$
\item $\closure{A\cap B} \subseteqq\closure{A}\cap \closure{B}$
\item $\interiorone{A}\cup \interiorone{B} \subseteqq \interior{A\cup B}$
\item $\interior{A\cap B} =\interiorone{A}\cap \interiorone{B}$
\end{enumerate}
\end{multicols}
\end{pro}
\end{multicols}

\section{$\closure{\closure{\BBR}}$}


\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} The algebraic
    rules introduced here will simplify some computations and
    statements in subsequent chapters.
\end{minipage}}
    \end{center}
Geometrically, each real number can be viewed as a point on a
straight line. We make the convention that we orient the real line
with $0$ as the origin, the positive numbers increasing towards the
right from $0$ and the negative numbers decreasing towards the left
of $0$, as in figure \ref{fig:the_real_line}.


\vspace{1cm}
\begin{figure}[h]
$$\psset{unit=.5}
\renewcommand{\pshlabel}[1]{{\tiny #1}}
\psaxes[yAxis=false](0, 0)(-7,0)(7, 0)
\psline[linestyle=dotted]{->}(7,0)(11,0)
\psline[linestyle=dotted]{->}(-7,0)(-11,0)
\uput[dr](11,0){+\infty}\uput[dl](-11,0){-\infty}
$$\vspace{1cm} \footnotesize\hangcaption{The Real Line.} \label{fig:the_real_line}
\end{figure}


We append the object $+\infty$, which is larger than any real
number, and the object $-\infty$, which is smaller than any real
number. Letting $x\in\BBR$, we make the following conventions.
\begin{equation} (+\infty) + (+\infty) = +\infty  \end{equation}
\begin{equation} (-\infty) + (-\infty) = -\infty  \end{equation}
\begin{equation} x + (+\infty) = +\infty  \end{equation}
\begin{equation} x + (-\infty) = -\infty  \end{equation}
\begin{equation} x(+\infty) = +\infty\ \ \ \mathrm{if} \ x> 0  \end{equation}
\begin{equation} x(+\infty) = -\infty\ \ \ \mathrm{if} \ x< 0    \end{equation}
\begin{equation} x(-\infty) = -\infty\ \ \ \mathrm{if} \ x> 0  \end{equation}
\begin{equation} x(-\infty) = +\infty\ \ \ \mathrm{if} \ x< 0    \end{equation}
\begin{equation} \label{eq:1/big}\dfrac{x}{\pm \infty} = 0    \end{equation}
Observe that we leave the following undefined: $$\dfrac{\pm
\infty}{\pm \infty}, \ \ \ \ (+\infty) + (-\infty), \ \ \ 0(\pm
\infty).
$$

\begin{df}
We denote by $\closure{\closure{\BBR}}=\lcrc{-\infty}{+\infty}$ the
set of real numbers such with the two symbols $-\infty$ and
$+\infty$ appended, obeying the algebraic rules above. Observe that
then every set in $\closure{\BBR}$ has a supremum (it may as well be
$+\infty$ if the set is unbounded by finite numbers) and an infimum
(which may be $-\infty$).
\end{df}



\section{Lebesgue Measure}

\begin{df}Let $(a, b)\in\BBR^2$.
The {\em measure} of the open interval $\loro{a}{b}$ is $b-a$. We
denote this by $\meas{\loro{a}{b}} = b-a$. If $G = \bigcup _{k=1}
^\infty \loro{a_k}{b_k}$ is a union of disjoint, bounded, open
intervals, then $\meas{G} = \sum _{k=1} ^\infty (b_k-a_k)$.
\end{df}

\begin{df}
Let $E\subseteqq \BBR$ be a bounded set. The {\em outer measure of
$E$} is defined and denoted by $$\outermeas{E} = \inf _{\substack{E
\subseteqq O\\ O\ \mathrm{open}}} \meas{O}.
$$
\end{df}
\begin{df}
A set  $E\subseteqq \BBR$ is said to be {\em Lebesgue  measurable}
if $\forall \varepsilon > 0$, $\exists G\supseteqq E$ open such that
$\outermeas{G\setminus E}<\varepsilon$. In this case $\meas{E} =
\outermeas{E}$.
\end{df}
\section{The Cantor Set}
\begin{df}[The Cantor Set]
The Cantor set $C$ is the canonical example of an uncountable set of
measure zero. We construct $C$ as follows.

Begin with the unit interval $C_0 = \lcrc{0}{1}$, and remove the
middle third open segment $R_1 := \loro{\frac{1}{3}}{\frac{2}{3}}$.
Define $C_1$ as
\begin{equation}
C_1 := C_0 \setminus R_1 = \lcrc{0}{\frac{1}{3}} \bigcup
\lcrc{\frac{2}{3}}{1}
\end{equation}
Iterate this process on each remaining segment, removing the open
set
\begin{equation}
R_2 := \loro{\frac{1}{9}}{\frac{2}{9}} \bigcup
\loro{\frac{7}{9}}{\frac{8}{9}}
\end{equation}
to form the four-interval set
\begin{equation}
C_2 := C_1 \setminus R_2 = \lcrc{0}{\frac{1}{9}} \bigcup
\lcrc{\frac{2}{9}}{\frac{1}{3}} \bigcup
\lcrc{\frac{2}{3}}{\frac{7}{9}} \bigcup \lcrc{\frac{8}{9}}{1}
\end{equation}
Continue the process, forming $C_3, C_4, \ldots$ Note that $C_k$ has
$2^k$ pieces.

At each step, the endpoints of each closed segment will remain in
the set. See figure \ref{fig:cantor-set}.

The \emph{Cantor set} is defined as
\begin{equation}
C := \bigcap_{k=1}^{\infty} C_k = C_0 \setminus
\bigcup_{n=1}^{\infty}R_n
\end{equation}
\label{df:cantor-set}
\end{df}
\vspace{1cm}
\begin{figure}[h]
$$\psset{unit=1pc}
\begin{array}{ll}
\uput[l](-16,0){C_0} & \rput(-13.5,0){\psline{|-|}(0,0)(27,0) \uput[d](0,0){0}\uput[d](27,0){1}}  \\
\uput[l](-16,0){C_1} &  \rput(-13.5,0){\psline{|-|}(0,0)(9,0) \psline{|-|}(18,0)(27,0)  \uput[d](0,0){0}\uput[d](27,0){1}\uput[d](9,0){\frac{1}{3}}\uput[d](18,0){\frac{2}{3}}}\\
\uput[l](-16,0){C_2} &  \rput(-13.5,0){\psline{|-|}(0,0)(3,0)\psline{|-|}(6,0)(9,0)  \psline{|-|}(18,0)(21,0)\psline{|-|}(24,0)(27,0)  \uput[d](0,0){0}\uput[d](27,0){1}\uput[d](9,0){\frac{1}{3}}\uput[d](18,0){\frac{2}{3}}\uput[d](3,0){\frac{1}{9}}\uput[d](6,0){\frac{2}{9}}\uput[d](21,0){\frac{7}{9}}\uput[d](24,0){\frac{8}{9}}}\\
\uput[l](-16,0){\vdots} & \rput(0,0){\vdots}
\end{array}$$
\vspace{1cm} \hangcaption{Construction of the Cantor
Set.}\label{fig:cantor-set}
\end{figure}


\begin{thm}[Cardinality of the Cantor Set]The Cantor Set is uncountable.
\end{thm}
\begin{pf}
Starting with the two pieces of $C_1$, we mark the sinistral segment
``0'' and the dextral segment ``1''. We then continue to $C_2$, and
consider only the leftmost pair. Again, mark the segments ``0'' and
``1'', and do the same for the rightmost pair. Successively then,
mark the $2^{k-1}$ leftmost segments of $C_k$ ``0'' and the
$2^{k-1}$  rightmost segments ``1.'' The elements of the Cantor Set
are those with infinite binary expansions. Since there uncountable
many such expansions, the Cantor Set in uncountable.\end{pf}
\begin{thm}[Measure of the Cantor Set]
The Cantor Set has (Lebesgue) measure $0$.
\end{thm}
\begin{pf}Using the notation of Definition \ref{df:cantor-set}, observe that
\begin{align}
\mu(R_1) &= \frac{2}{3} - \frac{1}{3} = \frac{1}{3}\\
\mu(R_2) &= \left(\frac{2}{9} - \frac{1}{9}\right) + \left(\frac{8}{9} - \frac{7}{9}\right) = \frac{2}{9}\\
&\ \vdots\\
\mu(R_k) &= \sum_{n=1}^{k}\frac{2^{n-1}}{3^n}
\end{align}
Since  the $R$'s are disjoint, the measure of their union is the sum
of their  measures. Taking the limit as $k \to \infty$,
\begin{equation}
\mu\left(\bigcup_{n=1}^{\infty}R_n\right) = \sum_{n=1}^{\infty}
\frac{2^{n-1}}{3^n} = 1.
\end{equation}
Since clearly $\mu(C_0) = 1$,  we then have
\begin{equation}
\mu(C) = \mu\left(C_0 \setminus \bigcup_{n=1}^{\infty}R_n\right) =
\mu(C_0) - \sum_{n=1}^{\infty} \frac{1}{2^n} = 1-1 = 0.
\end{equation}
\end{pf}
\begin{thm}The Cantor set is closed and its interior is empty.
\end{thm}
\begin{pf}Each of  $C_0,
C_1, C_2, \ldots$, is closed, being the union of a finite number of
closed intervals. Thus the Cantor Set is closed, as it is the
intersection of  closed sets.

\bigskip

Now, let $I$ be an open interval. Since the numbers of the form
$\dfrac{m}{3^n}$, $(m, n)\in \BBZ$ are dense in the reals, there is
exists a rational number $\dfrac{m}{3^n}\in I$. Expressed in
ternary, this rational number has a finite expansion. If this
expansion contains the digit ``1'', then this number does not belong
to Cantor Set, and we are done. If not, since $I$ is open, there
must exist a number $k
> n$ such that $\dfrac{m}{3^n} + \dfrac{1}{3^k} \in I$. By construction, the last digit of the
ternary expansion of this number is also ``1'', and hence this
number does not belong to the Cantor Set either.\end{pf}





\chapter{Sequences}




\section{Limit of a Sequence}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} The {\em limit}
    concept is at the centre of calculus. We deal with discrete
    quantities first, that is, with limits of sequences.
\end{minipage}}
    \end{center}



\begin{df}
A {\em (numerical) sequence} is a function $a: \BBN \rightarrow
\BBR$. We usually denote $a(n)$ by $a_n$.\footnote{It is customary
to start at $n=1$ rather than $n=0$. We won't be too fuzzy about
such complications, but we will be careful to write sense.}
\end{df}
\begin{rem}
 We will use the notation
$\seq{a_n}{n=k}{l}$ to denote the sequence $a_k, a_{k+1}\ldots,
a_l$. For example
$$\seq{a_n}{n=0}{10} = \{a_0, a_1, a_2, \ldots ,a_{10}\},  $$
$$\seq{b_n}{n=4}{6} = \{b_4, b_5, b_{6}\},  $$
$$\seq{\left(1+\dfrac{1}{n}\right)^n}{n=1}{+\infty} = \{2, \dfrac{9}{4},\dfrac{64}{27}, \ldots ,\},  $$etc.
\end{rem}


\begin{exa}
The {\em Harmonic sequence} is
$$1, \quad \dfrac{1}{2},\quad  \dfrac{1}{3}, \quad  \ldots,    $$or
$a_n = \dfrac{1}{n}$ for $n\geq 1$.
\end{exa}
\begin{df}
A sequence $\seq{a_n}{n=1}{+\infty}$ is {\em bounded} if there
exists a constant $K>0$ such that $\forall n, \absval{a_n}\leq K$.
It is {\em increasing} if for all $n>0$, $a_{n}\leq a_{n+1}$ and
 {\em decreasing} if for all $n\geq 0$, $a_{n}\geq a_{n+1}$.
\end{df}


\section{Convergence of Sequences}
\begin{df} A sequence $\seq{a_n}{n=1}{+\infty}$ is said to
{\em converge} if $$\exists L\in \BBR, \forall \varepsilon
> 0, \quad \exists N>0\quad  \mathrm{such\ that }\quad \forall n\in \BBN, \quad
n\geq N \implies \absval{a_n-L}<\varepsilon .$$ In other words,
eventually\footnote{A good word to use in informal speech
``eventually'' will mean ``for large enough values'' or in the case
at hand $\forall n \geq N$ for some strictly positive integer $N$.}
the differences
$$ \absval{a_n-L},\absval{a_{n+1}-L}, \absval{a_{n+2}-L}, \ldots  $$
remain smaller that an arbitrarily prescribed small quantity. We
denote the fact that the sequence  $\seq{a_n}{n=1}{+\infty}$
converges to $L$ as $n\rightarrow +\infty$ by $$ \lim _{n\rightarrow
+\infty}a_n = L, \quad \mathrm{or\ by}\quad a_n \rightarrow L \quad
\mathrm{as}\quad n\rightarrow +\infty .$$ A sequence that does not
converge is said to {\em diverge}. Thus a sequence diverges if
$$\forall L\in \BBR, \exists \varepsilon >0,  \forall N\in\BBN ,\exists n\in\BBN \quad
\mathrm{such\ that}\quad  n > N \quad \mathrm{and}\quad
\absval{a_n-L}\geq \varepsilon. $$
\end{df}
\begin{rem}Given a sequence sequence $\seq{a_n}{n=1}{+\infty}$ and
$L\in \BBR$,
$$ a_n \rightarrow L \quad \mathrm{as} \quad \ngrows \quad\mathrm{if and only if} \quad \lim \inf a_n = \lim \sup a_n = \lim a_n =L.$$
\end{rem}
\begin{df} A sequence $\seq{b_n}{n=1}{+\infty}${\em diverges to plus infinity}
if $\forall M>0, \quad \exists N>0$ such that $\forall n\geq N,
\quad b_n>M$. A sequence $\seq{c_n}{n=1}{+\infty}${\em diverges to
minus infinity} if $\forall M>0, \quad \exists N>0$ such that
$\forall n\geq N, \quad c_n<-M$. A sequence that diverges to plus or
minus infinity is said to {\em properly diverge}. Otherwise it is
said to {\em oscillate}.
\end{df}
\begin{df}
Given a sequence $\seq{a_n}{n=1}{+\infty}$, we say that $\lim
_{\ngrows}a_n$ {\em exists} it is either convergent or properly
divergent.
\end{df}
\begin{exa}
The constant sequence $$1,1,1,1, \ldots   $$converges to $1$.
Similarly, if a sequence is eventually stationary, that is,
constant, it converges to that constant.
\end{exa}
\begin{exa}
Consider the sequence $$1, \dfrac{1}{2}, \dfrac{1}{3}, \ldots,
\frac{1}{n}, \ldots, $$ We claim that $\dfrac{1}{n}\rightarrow 0$ as
$n\rightarrow+\infty$. Suppose we wanted terms that get closer to
$0$ by at least $.00001 = \dfrac{1}{10^5}$. We only need to look at
the $100000$-term of the sequence: $\dfrac{1}{100000} =
\dfrac{1}{10^5}$. Since the terms of the sequence get smaller and
smaller, any term after this one will be within $.00001$ of $0$. We
had to wait a long time---till after the $100000$-th term---but the
sequence eventually did get closer than $.00001$ to $0$. The same
argument works for any distance, no matter how small, so we can
eventually get arbitrarily close to $0$. A rigorous proof is as
follows. If $\varepsilon
> 0$ is no matter how small, we need only to look at the terms
after $N = \floor{ \frac{1}{\varepsilon} + 1}$ to see that, indeed,
if $n > N$, then $$s_n = \frac{1}{n} < \frac{1}{N} =
\frac{1}{\floor{\frac{1}{\varepsilon} + 1} } < \varepsilon.$$ Here
we have used the inequality $$ t - 1 < \floor{t} \leq t, \ \ \forall
t\in \BBR.$$
\end{exa}
\begin{exa}
The sequence $$0, 1,4,9,16, \ldots ,n^2, \ldots$$ diverges to
$+\infty$, as the sequence gets arbitrarily large. A rigorous proof
is as follows. If $M
> 0$ is no matter how large, then the terms after $N = \floor{
\sqrt{M}} + 1$ satisfy ($n
> N$)
$$t_n = n^2 > N^2  = (\floor{ \sqrt{M}} + 1)^2 > M. $$
\end{exa}

\begin{exa}
The sequence $$1,-1,1,-1,1,-1,\ldots, (-1)^n,\ldots$$ has no limit
(diverges), as it bounces back and forth from $-1$ to $+1$
infinitely many times.
\end{exa}
\begin{exa}
The sequence $$0, -1,2,-3,4,-5,\ldots, (-1)^nn, \ldots , $$ has no
limit (diverges), as it is unbounded and alternates back and forth
positive and negative values..
\end{exa}

\bigskip

We will now see some properties of limits of sequences.
\begin{thm}[Uniqueness of Limits]
If $a_n\rightarrow L$ and $a_n\rightarrow L'$ as $n\rightarrow
+\infty$ then $L=L'.$
\end{thm}
\begin{pf}The statement only makes sense if both $L$ and $L'$ are
finite, so assume so. If $L\neq L'$, take $\varepsilon =
\dfrac{|L-L'|}{2}>0$ in the definition of convergence. Now
$$\lim _{n\rightarrow +\infty} a_n = L \implies \exists N_1> 0, \quad \forall n\geq N_1 \absval{a_n-L}<\varepsilon, $$
$$\lim _{n\rightarrow +\infty} a_n = L' \implies \exists N_2> 0, \quad \forall n\geq N_2 \absval{a_n-L'}<\varepsilon.
$$If $n>\max (N_1, N_2)$ then by the Triangle Inequality (Theorem \ref{tri_ineq}) then
 $$ \absval{L-L'}\leq \absval{L-a_n}+\absval{a_n-L'}< 2\varepsilon = \absval{L-L'},
 $$a contradiction, so $L=L'$.
\end{pf}
\begin{thm}\label{thm:conve-seq-bounded-be}
Every convergent sequence is bounded.
\end{thm}
\begin{pf}
Let $\seq{a_n}{n=1}{+\infty}$ converge to $L$. Using  $\varepsilon =
1$ in the definition of convergence, $\exists N>0$ such that $$
n\geq N \implies \absval{a_n-L}<1 \implies L-1<a_n<L+1,
$$hence, eventually,  $a_n$ does not exceed $L+1$.
\end{pf}

\begin{figure}[h]
$$ \psset{unit=1pc} \psline[linewidth=1.5pt]{<->}(-10,0)(10,0)
\rput(-7,0){|}  \rput(-3,0){|}  \rput(0,0){|}  \rput(2.9,0){|}
\rput(4,0){|}  \rput(5,0){|}\rput(6.5,0){|}\uput[d](-7,0){x_0}
\uput[d](-3,0){x_1}\uput[d](0,0){{\tiny x_2}}\uput[d](2.9,0){\ddots
}\uput[d](4,0){{\tiny
x_n}}\uput[d](5,0){\ddots}\uput[d](6.5,0){{\tiny s}}
$$  \footnotesize\hangcaption{Theorem \ref{thm:conve_seq}.}
\label{fig:thm_conve_seq}
\end{figure}



When is it guaranteed that a sequence of real numbers has a limit?
We have the following result.
\begin{thm}\label{thm:bounded-increasing-seqs-convergent-be}Every bounded increasing sequence $\{a_n\}_{n = 0} ^{+\infty}$ of real numbers
converges to its supremum. Similarly, every bounded decreasing
sequence of real numbers converges to its infimum.
\end{thm}
\begin{pf}
The idea of the proof is sketched in figure \ref{fig:thm_conve_seq}.
By virtue of Axiom \ref{axi:completeness-of-R}, the sequence has a
supremum $s$. Every term of the sequence satisfies $a_n \leq s$. We
claim that eventually all the terms of the sequence are closer to
$s$ than a preassigned small distance $\varepsilon > 0$. Since $s -
\varepsilon$ is not an upper bound for the sequence, there must be a
term of the sequence, say $a_{n_0}$ with $s - \varepsilon \leq
a_{n_0}$ by virtue of the Approximation Property Theorem
\ref{thm:approx-sup-inf}. Since the sequence is increasing, we then
have

$$ s-\varepsilon \leq a_{n_0} \leq a_{n_0+1} \leq a_{n_0+2} \leq a_{n_0+2} \leq \ldots \leq s,   $$
which means that after the $n_0$-th term, we get to within
$\varepsilon$ of $s$.

\bigskip
To obtain the second half of the theorem, we simply apply the first
half to the sequence $\{-a_n\}_{n=0} ^{+\infty}$.  \end{pf}

\begin{thm}[Order Properties of Sequences]\label{thm:order-prop-seq}
Let $\seq{a_n}{n=1}{+\infty}$ be a sequence of real numbers
converging to the real number $L$. Then
\begin{enumerate}
\item If $a<L$ then eventually $a<a_n$.
\item If $L<b$ then eventually $a_n<b$.
\item If $a<L<b$ then eventually $a<a_n<b$.
\item If eventually $a_n\geq a$ then $L\geq a$.
\item If eventually $a_n\leq b$ then $L\leq b$.
\item If eventually $a\leq a_n\leq b$ then $a\leq L\leq b$.

\end{enumerate}

\end{thm}
\begin{pf}
We apply the definition of convergence repeatedly.
\begin{enumerate}
\item Taking $\varepsilon = L-a$ in the definition of convergence, $\exists N_1>0$ such that
$$\forall n \geq N_1, \quad  \absval{a_n-L}<L-a \implies  \forall n \geq N_1, \quad  a-L< a_n-L<L-a
\implies  \forall n \geq N_1, \quad  a< a_n, $$ that is, eventually
$a<a_n$.
\item Taking $\varepsilon = b-L$ in the definition of convergence, $\exists N_2>0$ such that
$$\forall n \geq N_2, \quad  \absval{a_n-L}<b-L \implies  \forall n \geq N_2, \quad  L-b<
a_n-L<b-L \implies  \forall n \geq N_2, \quad  a_n<b, $$ that is,
eventually $a_n<b$.
\item It suffices to take $N=\max(N_1,N_2)$ above.
\item If, to the contrary, $L>a$, then by part (1) we will eventually
have $a_n>a$, a contradiction.
\item If, to the contrary, $L<b$, then by part (2) we will eventually
have $a_n<b$, a contradiction.
\item If either $L<a$ or $b<L$ we would obtain a contradiction to
parts (4) or (5).
\end{enumerate}

\end{pf}
\begin{thm}[Sandwich Theorem] Let $\seq{a_n}{n=1}{+\infty}$,
$\seq{u_n}{n=1}{+\infty}$, $\seq{v_n}{n=1}{+\infty}$ be sequences of
real numbers such that eventually $$u_n \leq a_n \leq v_n.  $$ If
for $L\in \BBR$, $u_n \rightarrow L$ and $v_n\rightarrow L$ then
$a_n \rightarrow L$.
\end{thm}
\begin{pf}
For all $\varepsilon >0$ there are $N_1>0$, $N_2>0$ such that
$$\forall n\geq \max (N_1, N_2), \quad \absval{u_n-L}<\varepsilon, \quad \absval{v_n-L}<\varepsilon  \implies -\varepsilon < u_n-L < \varepsilon,
\quad -\varepsilon < v_n-L < \varepsilon.  $$ Thus for such $n$,
$$-\varepsilon < u_n -L \leq a_n - L \leq v_n - L < \varepsilon, \implies -\varepsilon < a_n-L < \varepsilon \implies \absval{a_n-L}<\varepsilon, $$
from where $\seq{a_n}{n=1}{+\infty}$ converges to $L$.
\end{pf}
\begin{thm} Let $\seq{a_n}{n=1}{+\infty}$ be a sequence of real numbers
such that $a_n\rightarrow L$. Then $\absval{a_n}\rightarrow
\absval{L}$.
\end{thm}
\begin{pf}
From Corollary \ref{cor:triangle-ineq}, we have  the inequality
$\absval{|a_n|-|L|}\leq \absval{a_n-L}$ from where the result
follows.
\end{pf}
\begin{thm}\label{thm:abs-val-conver-seq} Let $\seq{a_n}{n=1}{+\infty}$ be a sequence of real numbers
such that $a_n\rightarrow 0$, and let  $\seq{b_n}{n=1}{+\infty}$ be
a bounded sequence. Then $a_nb_n\rightarrow 0$.
\end{thm}
\begin{pf}Eventually $\absval{a_n}<\varepsilon$.
Assume that eventually $|b_n|\leq U$. Then $$\absval{a_nb_n} \leq
U\absval{a_n}<U\varepsilon ,
$$from where the result follows.
\end{pf}


\begin{thm}\label{thm:recip-conv-seq}
If $b_n \rightarrow l\neq 0$ then $b_n$ is eventually different from
$0$ and $\dfrac{1}{b_n}\rightarrow \dfrac{1}{l}$.
\end{thm}
\begin{pf}
By Theorem \ref{thm:abs-val-conver-seq}, $\absval{b_n}\rightarrow
\absval{l}$. Using $\varepsilon = \dfrac{|l|}{2}>0$ in the
definition of convergence, we have that eventually
$$\absval{|b_n|-|l|}<\dfrac{|l|}{2}\implies |l|-\dfrac{|l|}{2} <\absval{b_n} <
|l|+\dfrac{|l|}{2} \implies \dfrac{|l|}{2}<\absval{b_n},
$$That is, eventually $|b_n|$ is strictly positive and so
$\dfrac{1}{b_n}$ makes sense. Also, eventually,
$\dfrac{1}{|b_n|}<\dfrac{2}{|l|}$ Now, for sufficiently large $n$,
$$\absval{\dfrac{1}{b_n}-\dfrac{1}{l}} = \absval{\dfrac{l-b_n}{|b_n||l|}} = \dfrac{\absval{b_n-l}}{\absval{b_n}\absval{l}}<\dfrac{2\varepsilon}{\absval{l}\absval{l}},  $$
from where the result follows.\end{pf}
\begin{thm}[Algebraic Properties of Sequences]\label{thm:algebra-of-seq-limits}
Let $k\in \BBR$. If $\seq{a_n}{n=1}{+\infty}$ converges to $L$ and
$\seq{b_n}{n=1}{+\infty}$ converges to $L'$ then
$$ \lim _{n\rightarrow +\infty} (ka_n+ b_n ) = kL+ L', \qquad \lim _{n\rightarrow +\infty} (a_nb_n ) = LL'.  $$
Moreover, if $L'\neq 0$ then $$ \lim _{n\rightarrow +\infty}
\left(\dfrac{a_n}{b_n} \right) = \dfrac{L}{L'}. $$
\end{thm}
\begin{pf} The trick in all these proofs is the following
observation: If one multiplies a bounded quantity by an arbitrarily
small quantity, one gets an arbitrarily small quantity. Hence once
considers the absolute value of the desired terms of the sequence
from the expected limit.

\bigskip

Given $\varepsilon>0$ there are $N_1>0$ and $N_2>0$ such that
$\absval{a_n-L}<\varepsilon$ and $\absval{b_n-L'}<\varepsilon$. Then
$$\absval{(ka_n+b_n)-(kL+L')} = \absval{(ka_n-kL)+(b_n-L')} \leq \absval{k}\absval{a_n-L}+\absval{b_n-L'}<\varepsilon (k+1),
$$and so the sinistral side is arbitrarily close to $0$,
establishing the first assertion.

\bigskip

For the product, observe that by Theorem
\ref{thm:conve-seq-bounded-be} there exists a  constant $K>0$ such
that $\absval{b_n}<K$. Hence
$$ \absval{a_nb_n-LL'} = \absval{(a_n-L)b_n + L(b_n-L')}\leq \absval{a_n-L}\absval{b_n}+\absval{L}\absval{b_n-L'}<\varepsilon K + \absval{L}\varepsilon = \varepsilon (K+\absval{L}),$$
and again, the sinistral side is made arbitrarily close to $0$.

\bigskip

Finally, if $L'\neq 0$ then by Theorem \ref{thm:recip-conv-seq},
$b_n$ is eventually $\neq 0$ and $\dfrac{1}{b_n}\rightarrow
\dfrac{1}{L'}$. We now simply apply the result we obtained for
products, giving
$$a_nb_n\rightarrow L\left(\dfrac{1}{L'}\right) = \dfrac{L}{L'}.  $$

\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
If  $\forall n >0, \quad a_n>0$ and $\seq{a_n}{n=1}{+\infty}$
converges to $L$ must it be the case that $L>0$?
\begin{answer}No. Take $a_n = \dfrac{1}{n}$. Then $a_n > 0$ always, but $L=0$.
\end{answer}
\end{pro}
\begin{pro}
Prove that if $a_n \rightarrow +\infty $ and if
$\seq{b_n}{n=1}{+\infty}$ is bounded, then $a_n+b_n \rightarrow
+\infty$.
\end{pro}
\begin{pro}
Prove that if $a_n \rightarrow +\infty $ and  $b_n \rightarrow
+\infty $ is bounded, then $a_n+b_n \rightarrow +\infty$.
\end{pro}
\begin{pro}
Prove that if $a_n \rightarrow +\infty $ and if there exists $K>0$
such that  eventually $b_n\geq K$, then $a_nb_n \rightarrow
+\infty$.
\end{pro}
\begin{pro}
Prove that if $a_n \rightarrow +\infty $ and  $b_n \rightarrow
+\infty $ is bounded, then $a_nb_n \rightarrow +\infty$.
\end{pro}
\begin{pro}
Prove that if $a_n \rightarrow +\infty $ and if
$\seq{b_n}{n=1}{+\infty}$ is bounded, then $a_n+b_n \rightarrow
+\infty$.
\end{pro}
\begin{pro}
Prove that if $a_n \rightarrow +\infty $ then
$\dfrac{1}{a_n}\rightarrow 0$.
\end{pro}

\begin{pro}
Prove that if $a_n \rightarrow 0 $ and if eventually $a_n>0$, then
$\dfrac{1}{a_n}\rightarrow +\infty$.
\end{pro}
\begin{pro}
Prove that $\sum _{i=1} ^n \dfrac{n}{n^2+i} \rightarrow 1$ as
$\ngrows$.
\begin{answer}
We have for $n>1$,$$\dfrac{n^2}{n^2+n} =
\underbrace{\dfrac{n}{n^2+n}+\cdots + \dfrac{n}{n^2+n}}_{n\
\mathrm{times}}< \sum _{i=1} ^n
\dfrac{n}{n^2+i}<\underbrace{\dfrac{n}{n^2+1}+ \cdots +
\dfrac{n}{n^2+1}}_{n\ \mathrm{times}} =\dfrac{n^2}{n^2+1}, $$and the
result follows by the Sandwich Theorem since each of the sequences
on the extremes converges to $1$.
\end{answer}
\end{pro}
\begin{pro}
Prove that $\dfrac{1}{(n!)^{1/n}} \rightarrow 0$.
\begin{answer}
Evidently $n! \leq n^n$. By problem \ref{pro:factorial-n^n/2}, if
$n>2$ then $n^{n/2}\leq n!$. Thus $$\dfrac{1}{n}\leq
\dfrac{1}{(n!)^{1/n}}\leq \dfrac{1}{n^{1/2}}$$and the result follows
by the Sandwich Theorem.
\end{answer}
\end{pro}
\begin{pro}
Prove that $\dfrac{2^n}{n!}\rightarrow 0$.
\begin{answer}
For $n \geq 2$ we have
$$ \dfrac{2^n}{n!} = \dfrac{2}{1}\cdot \dfrac{2}{2}\cdot \dfrac{2}{3}\cdots \dfrac{2}{n} \leq 2\cdot 1 \cdot 1 \cdots 1 \cdot \dfrac{2}{n} = \dfrac{4}{n} \rightarrow 0.$$
\end{answer}
\end{pro}
\begin{pro}
Let $x_1 , x_2 , \ldots$ be a {\em bounded} sequence of real
numbers, and put $s_n = x_1 + x_2 + \cdots + x_n$. Suppose that
$\dfrac{s_{n^2}}{n^2}\rightarrow 0$. Prove that $\dfrac{s_n}{n}
\rightarrow 0$.
\begin{answer}  There is a positive integer $m$ with $m^2 \leq n < (m +
1)^2$. Consider $$ \absval{\dfrac{s_{m^2}}{m^2} - \dfrac{s_{n}}{n}}.
$$\end{answer}
\end{pro}
\begin{pro}
Prove rigorously that the sequence $\seq{\sin n}{n=0}{+\infty}$ is
divergent.
\begin{answer}
Since $-1\leq \sin n \leq 1$, any possible limit must be finite. By
way of contradiction assume that $\sin n \rightarrow a$ as
$\ngrows$. Then
$$\lim _{\ngrows} \sin n = a \implies  \lim _{\ngrows} \sin (n+2) = a,  $$
whence $$\lim _{\ngrows} (\sin (n+2)-\sin n) = a-a=0.  $$Now,
$$\sin (n+2)-\sin n=2(\sin 1)\cos (n+1) \implies \cos (n+1)\rightarrow 0, \quad \mathrm{as}\ \ngrows.  $$
From $$\cos  (n+1) = \cos n \cos 1 -\sin n\sin 1 $$we obtain
$$\sin n = \dfrac{1}{\sin 1}\left(\cos n\cos 1 - \cos (n+1)\right) \rightarrow \dfrac{1}{\sin 1}\left(0\cdot \cos 1 - 0\right) = 0,
$$and so $a=0$. But then
$$ 1 = \sin ^2 n + \cos ^2n\rightarrow 0^2 + 0^2 = 0, $$a
contradiction.
\end{answer}
\end{pro}
\begin{pro}
Prove that $(n!)^{1/n}\rightarrow +\infty$ as $\ngrows$.
\begin{answer}
By problem \ref{pro:factorial-n^n/2}, $(n!)^{1/n}>\sqrt{n}$ for
$n\geq 3$. Hence, for all $M>0$, as long as $n>M^2$ we will have
$$ (n!)^{1/n}>\sqrt{n}>M, $$giving the result.
\end{answer}
\end{pro}
\begin{pro}
A sequence of real numbers $a_1, a_2, \ldots $ satisfies, for all
$m, n$, the inequality
$$ \absval{a_m+a_n-a_{m+n}} \leq \dfrac{1}{m+n}.
$$Prove that this sequence is an arithmetic progression.
\end{pro}
\begin{pro}
Prove rigorously that $\sqrt{n+1}-\sqrt{n}\rightarrow 0$ as
$\ngrows$.
\begin{answer}
We have
$$\sqrt{n+1}-\sqrt{n} = \dfrac{n+1-n}{\sqrt{n+1}+\sqrt{n}} = \dfrac{1}{\sqrt{n+1}+\sqrt{n}}<\dfrac{1}{2\sqrt{n}}.  $$
Hence, as long as $\dfrac{1}{2\sqrt{n}}< \varepsilon$ that is, as
long as  $n>\dfrac{1}{4\varepsilon ^2} $ we will have
$$\absval{\sqrt{n+1}-\sqrt{n}} < \dfrac{1}{2\sqrt{n}}<\varepsilon
.$$
\end{answer}
\end{pro}
\begin{pro}
Prove that the sequence $H_n = 1 + \dfrac{1}{2}+\dfrac{1}{3}+\cdots
+ \dfrac{1}{n}$ diverges to $+\infty$.
\begin{answer}
Write $$ \sum_{n=1}^{2^M} \frac{1}{n} = \sum_{m=1}^M
\sum_{n=2^{m-1}+1}^{2^m} \frac{1}{n}. $$ Since $1/n \geq 1/N$ when
$n \leq N$, we gather that $$ \sum_{n=2^{m-1}+1}^{2^m} \frac{1}{n}
\geq \sum_{n=2^{m-1}+1}^{2^m} 2^{-m} = (2^m - 2^{m-1}) 2^{-m} =
\frac{1}{2}.$$ Thus $$ \sum_{n=1}^{2^M} \frac{1}{n} \geq \frac{M}{2}
$$ and the sequence can be made arbitrarily large.

\end{answer}
\end{pro}

\begin{pro}
Find
$$ \lim _{K\rightarrow +\infty} \sum _{n=1} ^K
\dfrac{\sqrt{(n-1)!}}{(1+\sqrt{1})(1+\sqrt{2})(1+\sqrt{3}) \cdots
(1+\sqrt{n})}. $$
\begin{answer}
Observe that for $n \geq 2$,
$$\begin{array}{l}\dfrac{\sqrt{(n-1)!}}{(1+\sqrt{1})(1+\sqrt{2})(1+\sqrt{3}) \cdots
(1+\sqrt{n-1})} -
\dfrac{\sqrt{(n)!}}{(1+\sqrt{1})(1+\sqrt{2})(1+\sqrt{3}) \cdots
(1+\sqrt{n})} \\ =
\dfrac{\sqrt{(n-1)!}}{(1+\sqrt{1})(1+\sqrt{2})(1+\sqrt{3}) \cdots
(1+\sqrt{n-1})}\left(1-\dfrac{\sqrt{n}}{1+\sqrt{n}}\right)
\\
 =  \dfrac{\sqrt{(n-1)!}}{(1+\sqrt{1})(1+\sqrt{2})(1+\sqrt{3})
\cdots (1+\sqrt{n})}.  \end{array}$$Therefore
$$\sum _{n=1} ^K
\dfrac{\sqrt{(n-1)!}}{(1+\sqrt{1})(1+\sqrt{2})(1+\sqrt{3}) \cdots
(1+\sqrt{n})}=1 -\frac{\sqrt{K!}}{(1+\sqrt{1})(1+\sqrt{2})\cdots
(1+\sqrt{K})}.$$ Now prove that
$u_K=\frac{\sqrt{K!}}{(1+\sqrt{1})(1+\sqrt{2})\cdots (1+\sqrt{K})}$
decreases to $0$.
\end{answer}

\end{pro}

\begin{pro}
What reasonable meaning can be given to
$$ \sqrt{1+\sqrt{1+\sqrt{1+\sqrt{\cdots}}}}\qquad ? $$
\begin{answer} Put $x_1 = 1, \quad x_{n+1} = \sqrt{1+x_n}, n\geq 0$. We claim
that the sequence $\seq{x_n}{n=1}{+\infty}$ is increasing and
bounded above. By Theorem
\ref{thm:bounded-increasing-seqs-convergent-be} the sequence must
have a limit $L$. To prove that the sequence is increasing consider
$x_{n+1}-x_n$ (fill in this gap). To prove that the sequence is
bounded, we claim that for all $n\geq 1$,  $x_n <
 4$. For this is clearly true for $n=1$. So assume that $x_n<4$.
 Then $$x_{n+1} = \sqrt{1+x_n} < \sqrt{1+4}=\sqrt{5}<4,  $$
and so the assertion follows by induction.

\bigskip

Since we have shewn that $L$ exists we now may compute
$$L= \lim _{\ngrows} x_{n+1} =  \lim _{\ngrows} \sqrt{1+x_n} = \sqrt{1+L}\implies L=\sqrt{1+L} \implies L^2-L-1=0 \implies L= \dfrac{1+\sqrt{5}}{2},
$$where we have chosen the positive root as the sequence is clearly
strictly positive.
\end{answer}
\end{pro}
\begin{pro}
Prove that $$\dfrac{1+2+\cdots + n}{n^2}\rightarrow \dfrac{1}{2},
\quad \mathrm{as}\ \ngroes.$$
\begin{answer}
By Theorem \ref{thm:sum-of-first-n-integers}, $1+2+\cdots + n =
\dfrac{n^2+n}{2}$, and the desired result follows.
\end{answer}
\end{pro}
\begin{pro}
Calculate the following limits:
\begin{enumerate}
\item $\lim _{\ngroes}\left(\dfrac{1^2}{n^2}+ \dfrac{2^2}{n^2}+ \cdots +
\dfrac{(n-1)^2}{n^2}\right)$,
\item $\lim _{\ngroes}\left(\dfrac{1}{1\cdot 2}+ \dfrac{1}{2\cdot 3}+ \cdots +
\dfrac{1}{n(n+1)}\right)$,
\item $\lim _{\ngroes}\left(\dfrac{1}{1\cdot 2\cdot 3}+ \dfrac{1}{2\cdot 3\cdot 4}+ \cdots +
\dfrac{1}{n(n+1)(n+2)}\right)$,\end{enumerate}
\begin{answer}
$\dfrac{1}{3}$; $1$; $\dfrac{1}{4}$.
\end{answer}
\end{pro}
\begin{pro}
What reasonable meaning can be given to
$$ \dfrac{1}{1+\dfrac{1}{1+\dfrac{1}{1+\dfrac{1}{\vdots}}}} \qquad ? $$
\begin{answer} Put $x_1 = 1, \quad x_{n+1} = \dfrac{1}{1+x_n}, n\geq 0$. We claim
that the sequence $\seq{x_n}{n=1}{+\infty}$ is increasing and
bounded above. By Theorem
\ref{thm:bounded-increasing-seqs-convergent-be} the sequence must
have a limit $L$. To prove that the sequence is increasing consider
$x_{n+1}-x_n$ (fill in this gap). To prove that the sequence is
bounded, we claim that for all $n\geq 1$, prove by induction that
$x_n <
 4$ (fill in this gap).

 \bigskip
Since we have shewn that $L$ exists we now may compute
$$L= \lim _{\ngrows} x_{n+1} =  \lim _{\ngrows} \dfrac{1}{1+x_n} = \dfrac{1}{1+L}\implies L=\dfrac{1}{1+L} \implies L^2+L-1=0 \implies L= \dfrac{\sqrt{5}-1}{2},
$$where we have chosen the positive root as the sequence is clearly
strictly positive.
\end{answer}
\end{pro}
\begin{pro}
Let $K\in\BBN\setminus \{0\}$, and let $a_1, \ldots , a_K, \lambda
_1 , \ldots , \lambda _K$ be strictly positive real numbers. Prove
that
$$ \lim _{\ngrows} \left(\sum _{k=1} ^K \lambda _k a_k ^n\right)^{1/n} = \max _{1\leq k \leq K} a_k, \qquad
 \lim _{\ngrows} \left(\sum _{k=1} ^K \lambda _k a_k ^{-n}\right)^{-1/n} = \min _{1\leq k \leq K} a_k. $$
\end{pro}
\begin{pro}
Prove that if $\seq{\dfrac{a_n}{b_n}}{n=1}{+\infty}$ is a monotonic
sequence, then the  $\seq{\dfrac{a_1+a_2+\cdots +
a_n}{b_1+b_2+\cdots + b_n}}{n=1}{+\infty}$ is also monotonic in the
same sense.
\begin{answer}
Assume that $\seq{\dfrac{a_n}{b_n}}{n=1}{+\infty}$  is increasing.
Then
$$\dfrac{a_1}{b_1}\leq \dfrac{a_2}{b_2} \leq \cdots \leq \dfrac{a_n}{b_n} \leq \dfrac{a_{n+1}}{b_{n+1}}.  $$
Using Theorem \ref{thm:min-max-fractions},
$$ \dfrac{a_1+a_2+\cdots +
a_n}{b_1+b_2+\cdots + b_n}\leq \dfrac{a_n}{b_n}  \leq
\dfrac{a_{n+1}}{b_{n+1}} \implies \dfrac{a_1+a_2+\cdots +
a_n}{b_1+b_2+\cdots + b_n} \leq \dfrac{a_1+a_2+\cdots +
a_{n+1}}{b_1+b_2+\cdots + b_{n+1}} \leq \dfrac{a_{n+1}}{b_{n+1}},
$$proving that $\seq{\dfrac{a_1+a_2+\cdots +
a_n}{b_1+b_2+\cdots + b_n}}{n=1}{+\infty}$ is also increasing. If
$\seq{\dfrac{a_n}{b_n}}{n=1}{+\infty}$  were decreasing,
$\seq{-\dfrac{a_n}{b_n}}{n=1}{+\infty}$ is increasing and we apply
what we just have proved.
\end{answer}
\end{pro}

\begin{pro}
Let $a, b, c$ be real numbers such that $b^2-4ac<0$. Let
$\seq{X_n}{n=1}{+\infty}$, $\seq{Y_n}{n=1}{+\infty}$ be sequences of
real numbers such that
$$ aX^2 _n + bX_nY_n + cY_n ^2 \rightarrow 0, \quad \mathrm{as}\ \ngrows.
$$Prove that $X_n\rightarrow 0$ and $Y_n \rightarrow 0$ as
$\ngrows$.
\end{pro}
\begin{pro}[Gram's Product]Prove that $$ \lim _{\ngrows}\prod _{k=2} ^n \dfrac{k^3-1}{k^3+1} =\dfrac{2}{3}.$$
\begin{answer}
We have
$$\prod _{k=2} ^n \dfrac{k^3-1}{k^3+1} = \prod _{k=2} ^n \dfrac{k-1}{k+1}  \prod _{k=2} ^n \dfrac{k^2+k+1}{k^2-k+1}.  $$
Now
$$\prod _{k=2} ^n \dfrac{k-1}{k+1}  = \dfrac{(n-1)!}{\frac{(n+1)!}{2}} = \dfrac{2}{n(n+1)}.  $$
By observing that  $(k+1)^2-(k+1)+1 = k^2+k+1$, we gather that
$$ \prod _{k=2} ^n \dfrac{k^2-k+1}{k^2+k+1} = \dfrac{3^2+3+1}{2^2-2+1}\cdot \dfrac{4^2+4+1}{3^2+3+1}\cdot \dfrac{5^2+5+1}{4^2+4+1}
\cdots \dfrac{n^2+n+1}{(n-1)^2+(n-1)+1} =\dfrac{n^2+n+1}{3}.  $$
Thus
$$ \prod _{k=2} ^n \dfrac{k^3-1}{k^3+1}  = \dfrac{2}{3} \cdot \dfrac{n^2+n+1}{n(n+1)} \rightarrow
\dfrac{2}{3},$$as $\ngrows$.
\end{answer}
\end{pro}
\begin{pro}
Prove that the sequence $\seq{x_n}{n=1}{+\infty}$ with $x_n = 1 +
\dfrac{1}{2^2} + \dfrac{1}{3^2} + \cdots +\dfrac{1}{n^2}$ satisfies
$x_n \leq  2-\dfrac{1}{n}$ for $n\geq 1$. Hence deduce that it
converges.
\begin{answer}
Clearly $x_n <x_n +\dfrac{1}{(n+1)^2}= x_{n+1}$, and so the sequence
is strictly increasing. By shewing that $x_n < 2-\dfrac{1}{n}<2$ we
will be shewing that it is bounded above, and hence convergent by
Theorem \ref{thm:bounded-increasing-seqs-convergent-be}. For $n=1$,
$x_1 = 1 = 2-\dfrac{1}{1}$ and so the assertion is true. Assume that
$x_n < 2-\dfrac{1}{n}$. Then
$$x_{n+1} = x_n +\dfrac{1}{(n+1)^2}< 2-\dfrac{1}{n}+\dfrac{1}{(n+1)^2} = 2+ \dfrac{n-(n+1)^2}{n(n+1)^2} =
 2- \dfrac{n^2+n+1}{n(n+1)^2}<2-\dfrac{n^2+n}{n(n+1)^2} = 2-\dfrac{1}{n+1},
 $$and the claimed inequality follows by induction. We will prove
 later on a result of Euler:
 $$1 +
\dfrac{1}{2^2} + \dfrac{1}{3^2} + \cdots +\dfrac{1}{n^2}+ \cdots =
\dfrac{\pi^2}{6}.
$$
\end{answer}
\end{pro}
\begin{pro}
Prove the convergence of the sequence $x_n = \sum _{k=1} ^n
\dfrac{1}{n+k}$, $n \geq 1$.
\end{pro}
\begin{pro}
Prove the convergence of the sequence, $x_1=a$, $x_2=b$,  $x_{n+1} =
\dfrac{x_{n}+x_{n-1}}{2}$, $n \geq 2$ and $(a, b)\in\BBR^2$, $a\neq
b$. Also, find its limit.
\end{pro}

\begin{pro}
Prove the convergence of the sequence, $x_1=a$,  $x_{n+1} =
\dfrac{1}{2}\left(x_n+\dfrac{b}{x_n}\right)$, $n \geq 1$ and $(a,
b)\in\BBR^2$, $a>0, b>0$. Also, find its limit.
\end{pro}

\begin{pro}
Prove the convergence of the sequence, $x_1=a$,  $x_{n+1} =
\dfrac{1}{2}\left(x_n+\dfrac{b}{x_n}\right)$, $n \geq 1$ and $(a,
b)\in\BBR^2$, $a<0, b>0$. Also, find its limit.
\end{pro}
\begin{pro}
Let $(a, b)\in \BBR^2$, $a>b>0$. Set $a_1 = \dfrac{a+b}{2}$, $b_1 =
\sqrt{ab}$. If for $n>1$, $$a_{n+1} = \dfrac{a_n+b_n}{2}, \qquad b_n
= \sqrt{a_nb_n},
$$Prove that
\begin{enumerate}
\item $\seq{a_n}{n=1}{+\infty}$ is monotonically decreasing,
\item $\seq{b_n}{n=1}{+\infty}$ is monotonically inreasing,
\item both sequences converge,
\item their limits are equal.
\end{enumerate}
\end{pro}
\end{multicols}

\section{Classical Limits of Sequences}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}}
In this section we will obtain various classical limits. In
particular, we define the constant $e$ and obtain a few interesting
results about it.
\end{minipage}}
    \end{center}

 \begin{thm}\label{thm:geom-sequence-to-zero-goes}
Let $r\in\BBR$ be fixed. If $\absval{r}<1$ then $r^n \rightarrow 0$
as $n\rightarrow +\infty$. If $\absval{r}>1$ then $r^n \rightarrow
+\infty$ as $n\rightarrow +\infty$.
\end{thm}
\begin{pf}
Taking $x = \absval{\frac{1}{r}} - 1$ in Bernoulli's Inequality
(Theorem \ref{thm:bernoulli}), we find
$$\left|\frac{1}{r}\right|^n > 1 + n\left(\left|\frac{1}{r}\right| - 1\right)
> n\left(\left|\frac{1}{r}\right| - 1\right).
$$Therefore
$$|r|^n < \frac{|r|}{n(1 - |r|)} \rightarrow 0,$$as $n \rightarrow
+\infty$, since $\dfrac{1}{n}\rightarrow 0$ as $\ngrows$.

\bigskip


If $|r| > 1$, again by Bernoulli's Inequality
$$|r|^n = (1 + |r| - 1)^n > 1 + n(|r| - 1),$$ and the dextral
side can be made arbitrarily large.\end{pf}

\begin{thm}\label{thm:geometric-sum}
Let $|r|<1$. Then
$$1+r+r^2 + \cdots + r^n \rightarrow \dfrac{1}{1-r},\quad  \mathrm{as}\quad \ngrows.  $$
\end{thm}
\begin{pf}
If $S_n = 1+r+r^2 + \cdots + r^n $ then $rS_n = r+r^2+r^3+ + \cdots
+ r^{n+1}$ and $$S_n-rS_n = 1-r^{n+1} \implies S_n =
\dfrac{1-r^{n+1}}{1-r}.
$$Then apply Theorem \ref{thm:geom-sequence-to-zero-goes}.
\end{pf}

\begin{rem}
An estimating trick that we will use often is the following. If $0 <
 r<1$ then the truncated sum is smaller than the infinite sum and
so, for all positive integers $k$:
$$1+r+r^2 + \cdots + r^k < 1+r+r^2 + \cdots + \cdots = \dfrac{1}{1-r}.  $$
\end{rem}
\begin{thm}\label{thm:root-n-of-a-to-1-goes}
Let $a\in \BBR$, $a>0$, be fixed. Then $a^{1/n} \rightarrow 1$ as
$n\rightarrow +\infty$.
\end{thm}


\begin{pf}
If $a>1$ then $a^{1/n}>1$ and by Bernoulli's Inequality,
$$a = (1+(a^{1/n}-1))^n >1+n(a^{1/n}-1) \implies 0\leq  a^{1/n}-1
<\dfrac{a-1}{n},$$whence $a^{1/n}-1\rightarrow 0$ as $\ngrows$.

\bigskip

If $0 < a < 1$ then $b=\dfrac{1}{a}>1$ and so by what we just
proved,
$$b^{1/n}\rightarrow 1 \implies \dfrac{1}{a^{1/n}}\rightarrow 1 \implies a^{1/n}\rightarrow 1,  $$
proving the theorem.\end{pf}
\begin{thm}\label{thm:exps-are-faster-than-powers}
Let $a\in\BBR$, $a>1$, $k\in\BBN\setminus\{0\}$, be fixed. Then
$\dfrac{a^n}{n^k} \rightarrow +\infty$ as $n\rightarrow +\infty$.
\end{thm}
\begin{pf}
Observe that $a^{1/k}>1$. We have, using  the Binomial Theorem,
$$\left(a^{1/k}\right)^n = \left(1+(a^{1/k}-1)\right)^n  = \sum _{i=0} ^n \binom{n}{i}(a^{1/k}-1)^i.$$Since each term of the above expansion is
$\geq 0$, we gather that  $$ \left(a^{1/k}\right)^n \geq
\dfrac{n(n-1)}{2}(a^{1/k}-1)^2 \implies
\dfrac{\left(a^{1/k}\right)^n }{n} \geq
\dfrac{(n-1)}{2}(a^{1/k}-1)^2 \implies \dfrac{\left(a^{1/k}\right)^n
}{n}\rightarrow +\infty \implies \dfrac{a^n}{n^k} \rightarrow
+\infty ,$$as desired.
\end{pf}
\begin{rem}
In particular $\dfrac{2^n}{n}\rightarrow +\infty$ as $\ngrows$.
\end{rem}
\begin{thm}\label{thm:factorials-are-fasters-than-exps}
Let $a\in\BBR$, ,  be fixed. Then $\dfrac{a^n}{n!} \rightarrow 0$ as
$n\rightarrow +\infty$.
\end{thm}
\begin{pf}Put $N=\floor{\absval{a}}+1$ and let $n\geq N$. Then
$$\absval{\dfrac{a^n}{n!}} = \left(\dfrac{|a|}{1}\cdot \dfrac{|a|}{2}\cdots \dfrac{|a|}{N}\right)
\left(\dfrac{|a|}{N+1}\cdot \dfrac{|a|}{N+2}\cdots
\dfrac{|a|}{n}\right)\leq \left(\dfrac{|a|^N}{N!}\right)\left(1\cdot
1 \cdots 1\cdot\dfrac{|a|}{n}\right) \rightarrow 0,
$$as $\ngrows$.
\end{pf}

\begin{thm}\label{thm:e}
The sequence $$e_n = \left(1 + \frac{1}{n}\right)^n, n = 1, 2,
\ldots
$$ is a bounded increasing sequence, and hence it converges to a
limit, which we call $e$. Also, for all strictly positive integers
$n$, $\left(1 + \frac{1}{n}\right)^n <e$.
\end{thm}
\begin{pf}
By Theorem \ref{thm:ineq_bin} $$ \dfrac{b^{n + 1} - a^{n + 1}}{b -
a} \leq (n + 1)b^{n}\implies b^n[(n + 1)a - nb] < a^{n + 1}.
$$Putting $a = 1 + \dfrac{1}{n + 1}$, $b = 1 + \dfrac{1}{n}$ we obtain
$$ e_n = \left(1 + \frac{1}{n}\right)^n < \left(1 + \frac{1}{n+1}\right)^{n + 1} = e_{n + 1},   $$
whence the sequence $e_n, n = 1, 2, \ldots$ increases. Again, by
putting $a = 1$, $b = 1 + \dfrac{1}{2n}$ we obtain $$ \left(1 +
\frac{1}{2n}\right)^n  < 2  \implies  \left(1 +
\frac{1}{2n}\right)^{2n} <4 \implies e_{2n} < 4. $$ Since $e_n <
e_{2n} < 4$ for all $n$, the sequence is bounded above. In view of
Theorem \ref{thm:bounded-increasing-seqs-convergent-be} the sequence
converges to a limit. We call this limit $e$. It follows also from
this proof and from Theorem \ref{thm:order-prop-seq} that for all
strictly positive integers $n$, $\left(1 + \frac{1}{n}\right)^n <e$.
\end{pf}
\begin{rem}
Another way of obtaining $ \left(1 + \frac{1}{n}\right)^{n} <\left(1
+ \frac{1}{n+1}\right)^{n + 1} $ is as follows. Using the AM-GM
Inequality with $x_1 = 1, x_2=\cdots =x_{n+1}=1+\dfrac{1}{n}$ we
have
$$ \left(1+\dfrac{1}{n}\right)^{n/(n+1)} < \dfrac{1+n\left(1+\dfrac{1}{n}\right)}{n+1} \implies
\left(1+\dfrac{1}{n}\right)^{n/(n+1)} < \quad \dfrac{n+2}{n+1} \quad
= \left(1+\dfrac{1}{n+1}\right)
$$ from where the desired inequality is obtained.
\end{rem}

\begin{thm}
The sequence $\seq{\left(1 +
\frac{1}{n}\right)^{n+1}}{n=1}{+\infty}$ is strictly decreasing and
$\left(1 + \frac{1}{n}\right)^{n+1}\rightarrow e$. Also, for all
strictly positive integers $n$, $\left(1 + \frac{1}{n}\right)^{n+1}
>e$.
\end{thm}
\begin{pf}
By Theorem \ref{thm:ineq_bin} $$ \dfrac{b^{n + 1} - a^{n + 1}}{b -
a} \geq (n + 1)a^{n}.
$$Putting $a = 1 + \dfrac{1}{n + 1}$, $b = 1 + \dfrac{1}{n}$ we obtain
$$ \left(1 +
\frac{1}{n}\right)^{n+1}>\left(1 +
\frac{1}{n+1}\right)^{n+2}\left(\dfrac{n^3+4n^2+4n+1}{n(n+2)^2}\right).
$$The result will follow as long as $\left(\dfrac{n^3+4n^2+4n+1}{n(n+2)^2}\right)> 1$. But
$$n(n+2)^2=n(n^2+4n+4)=n^3+4n^2+4n< n^3+4n^2+4n+1 \implies  \dfrac{n^3+4n^2+4n+1}{n(n+2)^2}> 1.$$
Thus the sequence is a sequence of strictly decreasing sequence of
real numbers. Putting $a=1$, $b=1+\dfrac{1}{n}$ in $ \dfrac{b^{n +
1} - a^{n + 1}}{b - a} \geq (n + 1)a^{n}$ we get
$$ \left(1+\dfrac{1}{n}\right)^{n+1} > 1+ n(n+1)>2, $$
 so the sequence is bounded below. In view of Theorem
\ref{thm:bounded-increasing-seqs-convergent-be} the sequence
converges to a limit $L$. To see that $L=e$ observe that
$$\left(1 + \frac{1}{n}\right)^{n+1} = \left(1 + \frac{1}{n}\right)^{n}\left(1 + \frac{1}{n}\right)\rightarrow e\cdot 1 = e.  $$
It follows also from this proof and from Theorem
\ref{thm:order-prop-seq} that for all strictly positive integers
$n$, $\left(1 + \frac{1}{n}\right)^{n+1} >e$.
 \end{pf}
\begin{rem}
The inequality
$\left(1+\dfrac{1}{n+1}\right)^{n+2}<\left(1+\dfrac{1}{n}\right)^{n+1}$
can be obtained by the Harmonic Mean-Geometric Mean Inequality by
putting $x_1=1, x_2=x_2=\cdots =x_{n+2} = 1+\dfrac{1}{n}$
$$ \dfrac{n+2}{\frac{1}{x_1}+\frac{1}{x_2}+\cdots +\frac{1}{x_{n+2}}}\leq (x_1x_2\cdots x_{n+2})^{1/(n+2)} \implies
\dfrac{n+2}{1+(n+1)\left(\dfrac{n}{n+1}\right)} <
\left(1+\dfrac{1}{n}\right)^{(n+1)/(n+2)}.$$
\end{rem}
\begin{thm}\label{thm:2<e<3} $2<e<3$.
\end{thm}
\begin{pf}
By the Binomial Theorem
$$\left(1 + \frac{1}{n}\right)^{n} = \sum _{k=0} ^n \binom{n}{k}\cdot \dfrac{1}{n^k}.
$$ Now, for $2 \leq k \leq n$,
$$ \binom{n}{k}\cdot \dfrac{1}{n^k}=\dfrac{1}{k!}\cdot \dfrac{n(n-1)(n-2)\cdots (n-k+1)}{n^k}=
\dfrac{1}{k!}\cdot (1)\cdot
\left(1-\dfrac{1}{n}\right)\left(1-\dfrac{2}{n}\right)\cdot
\left(1-\dfrac{k-1}{n}\right)\leq \dfrac{1}{2\cdot 3\cdots k}\leq
\dfrac{1}{2^{k-1}}.
$$
Thus
$$\left(1 + \frac{1}{n}\right)^{n} = \sum _{k=0} ^n \binom{n}{k}\cdot
\dfrac{1}{n^k}\leq 1 +1 + \dfrac{1}{2}+ \dfrac{1}{4}+\cdots +
\dfrac{1}{2^{n-1}}<1 +1 + \dfrac{1}{2}+ \dfrac{1}{4}+\cdots +
\dfrac{1}{2^{n-1}}+ \cdots <1+2=3,
$$by Theorem \ref{thm:geometric-sum}  (with $r=\frac{1}{2}$), and so the dextral
inequality is proved. The sinistral inequality follows from Theorem
\ref{thm:e}.
\end{pf}
\begin{rem} $e=  2.718281828459045235360287471352\ldots.$
\end{rem}
\begin{thm}\label{thm:e-as-a-sum} $e=\lim _{n\rightarrow +\infty} \left(1 + \dfrac{1}{1!}+ \dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots + \dfrac{1}{n!}\right)$.
\end{thm}
\begin{pf}
Put $y_k = 1 + \dfrac{1}{1!}+ \dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots +
\dfrac{1}{k!}$. Clearly $y_{k+1}>y_k$ so that
$\seq{y_k}{k=1}{+\infty}$ is an increasing sequence. We will prove
that it is bounded above with supremum $e$. By the Binomial Theorem
$$ \left(1+\dfrac{1}{n}\right)^n= \sum _{j=0} ^n \binom{n}{j}\cdot \dfrac{1}{n^j} =
 1 + \binom{n}{1}\dfrac{1}{n} + \cdots + \binom{n}{k}\dfrac{1}{n^k} + \cdots + \binom{n}{n}\dfrac{1}{n^n}
\geq  1 + \binom{n}{1}\dfrac{1}{n} + \cdots +
\binom{n}{k}\dfrac{1}{n^k},  $$for $0 < k <n$. Now let $j$ be fixed,
$0 < j < n$. Taking limits as $\ngrows$,
$$ \binom{n}{j}\cdot \dfrac{1}{n^j}=\dfrac{1}{j!}\cdot \dfrac{n(n-1)(n-2)\cdots (n-k+1)}{n^j}=
\dfrac{1}{j!}\cdot (1)\cdot
\left(1-\dfrac{1}{n}\right)\left(1-\dfrac{2}{n}\right)\cdot
\left(1-\dfrac{j-1}{n}\right) \implies \lim _{\ngrows }
\binom{n}{j}\cdot \dfrac{1}{n^j} = \dfrac{1}{j!}.
$$Hence, taking limits as $\ngrows$,
$$ \left(1+\dfrac{1}{n}\right)^n \geq 1 + \binom{n}{1}\dfrac{1}{n} + \cdots +
\binom{n}{k}\dfrac{1}{n^k} \implies e\geq  1 + \dfrac{1}{1!}+
\dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots + \dfrac{1}{k!} = y_k, $$ or
renaming,
\begin{equation}\label{equ:e-y-n-rename}
e\geq  1 + \dfrac{1}{1!}+ \dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots +
\dfrac{1}{n!} = y_n.
\end{equation}
Moreover, since $\binom{n}{k}\cdot \dfrac{1}{n^k}=\dfrac{1}{k!}\cdot
(1)\cdot \left(1-\dfrac{1}{n}\right)\left(1-\dfrac{2}{n}\right)\cdot
\left(1-\dfrac{k-1}{n}\right)\leq \dfrac{1}{2\cdot 3\cdots k} \leq
\dfrac{1}{k!}$, we have
\begin{eqnarray}\notag  \left(1+\dfrac{1}{n}\right)^n & = &1 +
\binom{n}{1}\dfrac{1}{n} + \cdots + \binom{n}{k}\dfrac{1}{n^k} +
\cdots + \binom{n}{n}\dfrac{1}{n^n} \\ \notag & \leq & 1 +
\dfrac{1}{1!}+ \cdots  +\dfrac{1}{k!}+\cdots + \dfrac{1}{n!}\\
& = & y_n. \label{equ:e<y-n}
\end{eqnarray}

In conclusion, from \ref{equ:e-y-n-rename} and \ref{equ:e<y-n} we
get $$\left(1+\dfrac{1}{n}\right)^n\leq y_n \leq e,$$and by taking
limits and using the Sandwich Theorem, we get that $y_n \rightarrow
e$ as $\ngrows$.
\end{pf}
\begin{lem}\label{lem:y_m+n-y_n}Let $n, m$ be strictly positive integers and
let $ 1 + \dfrac{1}{1!}+ \dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots +
\dfrac{1}{n!} = y_n$. Then $y_{m+n}-y_n<\dfrac{1}{n\cdot n!}$.
\end{lem}
\begin{pf}
We have $$\begin{array}{lll}y_{m+n}-y_n & = & \dfrac{1}{(n+1)!}+
\dfrac{1}{(n+2)!}+\dfrac{1}{(n+3)!}+\cdots + \dfrac{1}{(n+m)!}\\
& < & \dfrac{1}{(n+1)!}\left(1+
\dfrac{1}{n+2}+\dfrac{1}{(n+2)^2}\cdots + \dfrac{1}{(n+2)^{m-1}}\right)\\
& < & \dfrac{1}{(n+1)!}\left(1+
\dfrac{1}{n+2}+\dfrac{1}{(n+2)^2}+\cdots  + \cdots  \right)\\
& = & \dfrac{1}{(n+1)!}\left(\dfrac{1}{1-\dfrac{1}{n+2}}\right)\\
& = & \dfrac{1}{(n+1)!}\cdot \dfrac{n+2}{n+1}.\\
\end{array}$$Here the second inequality follows by using the
estimating trick deduced from Theorem \ref{thm:geometric-sum}.
Observe that this bound is independent of $m$.
\end{pf}
\begin{lem}\label{lem:e-y_n} Let $ 1 + \dfrac{1}{1!}+ \dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots +
\dfrac{1}{n!} = y_n$. Then $0<e-y_n< \dfrac{1}{n!n}$.
\end{lem}
\begin{pf}
From Lemma \ref{lem:y_m+n-y_n}, $$0 < y_{m+n}-y_n<
\dfrac{1}{(n+1)!}\cdot \dfrac{n+2}{n+1}.   $$ Taking the limit as $m
\rightarrow +\infty$ we deduce
 $$0 < e-y_n\leq
\dfrac{1}{(n+1)!}\cdot \dfrac{n+2}{n+1}.   $$ (The first inequality
is strict by Theorem \ref{thm:e-as-a-sum}.) We only need to shew
that for integer $n\geq 1$
$$\dfrac{1}{(n+1)!}\cdot \dfrac{n+2}{n+1}<  \dfrac{1}{n!n}. $$
But working backwards (which we are allowed to do, as all quantities
are strictly positive),
$$\begin{array}{lll}
\dfrac{1}{(n+1)!}\cdot \dfrac{n+2}{n+1}<  \dfrac{1}{n!n}  &
\Leftarrow &  n!n(n+2)<(n+1)!(n+1) \\ & \Leftarrow &
n(n+2)<(n+1)(n+1)\\ & \Leftarrow &
n^2+2n<n^2+2n+1 \\
& \Leftarrow &  0 <1,\end{array}   $$ and the theorem is proved.
\end{pf}
\begin{thm} $e$ is irrational.\label{thm:e-irrational}
\end{thm}
\begin{pf}
Assume $e$ is rational, with $e=\dfrac{p}{q}$, where $p$ and $q$ are
positive integers and the fraction is in lowest terms. Since $qe =
p$, an integer, $q!e$ must also be an integer. Also $q!y_q$ must be
an integer, since
$$q!y_q = q!\left(1 + \dfrac{1}{1!}+ \dfrac{1}{2!}+ \dfrac{1}{3!}+\cdots +
\dfrac{1}{q!}\right).  $$ But by Lemma \ref{lem:e-y_n},
$$0 < e - y_q < \dfrac{1}{q!q} \implies 0 < q!(e-y_q)<\dfrac{1}{q}\leq 1. $$
That is, the integer $q!(e-y_q)$ is strictly between $0$ and $1$, a
contradiction.
\end{pf}


\begin{thm}
The sequence $\seq{n^{1/n}}{n=1}{+\infty}$ is decreasing for $n\geq
3$. Also, $n^{1/n}\rightarrow 1$ as $\ngrows$.
\end{thm}
\begin{pf}
Consider the ratio
$$ \dfrac{(n+1)^{n}}{n^{n+1}} =\left(1+\dfrac{1}{n}\right)^{n}\cdot\dfrac{1}{n}  <\dfrac{e}{n}.   $$
Thus for $n \geq 3,$ $$ \dfrac{(n+1)^{n}}{n^{n+1}}<1 \implies
(n+1)^{1/(n+1)}<n^{1/n}.$$ Hence we have $$
3^{1/3}>4^{1/4}>5^{1/5}>\cdots . $$ Clearly, if  $n>1$ then
$n^{1/n}>1^{1/n}=1$. Also, by the Binomial Theorem, again, if
$n>1$,
$$ \left(1+\sqrt{\dfrac{2}{n}}\right)^n = 1^n + \binom{n}{1}\left(\sqrt{\dfrac{2}{n}}\right)^1
+ \binom{n}{2}\left(\sqrt{\dfrac{2}{n}}\right)^2 + \cdots > 1  +
\binom{n}{2}\left(\sqrt{\dfrac{2}{n}}\right)^2  = 1 +
\dfrac{n(n-1)}{2}\left(\dfrac{2}{n}\right) = n.
$$ We then conclude that
$$ 1 < n^{1/n}<  1+\sqrt{\dfrac{2}{n}},$$and that $n^{1/n}\rightarrow
1$ follows from the Sandwich Theorem.
\end{pf}
\begin{rem}
$2^{1/2} = 4^{1/4}$.
\end{rem}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
What's wrong with the following? Since the product of the limits is
the limit of the product,
$$ e=\lim _{\ngroes}  \left(1+\dfrac{1}{n}\right)^n =
\underbrace{\left(\lim _{\ngroes} 1+\dfrac{1}{n}\right)\cdot
\left(\lim _{\ngroes} 1+\dfrac{1}{n}\right) \cdots \left(\lim
_{\ngroes} 1+\dfrac{1}{n}\right) }_{n\ \mathrm{times}} =
\underbrace{1\cdot 1 \cdots 1}_{n\ \mathrm{times}} =1.$$
\begin{answer}
The product rule for limits only applies to a finite number of
factors. Here the number of factors grows with $n$.
\end{answer}
\end{pro}
\begin{pro}
Demonstrate that for all strictly positive integers $n$:
$$ \cos \dfrac{\pi}{2^{n+1}} = \dfrac{1}{2}\underbrace{\sqrt{2+\sqrt{2+\sqrt{2+\cdots +\sqrt{2} } } }}_{n\ \mathrm{radicands}}, $$
$$ \sin \dfrac{\pi}{2^{n+1}} = \dfrac{1}{2}\underbrace{\sqrt{2-\sqrt{2+\sqrt{2+\cdots +\sqrt{2} } } }}_{n\ \mathrm{radicands}}. $$
Hence deduce {\em Vi\`{e}te's Formula for $\pi$:}
$$ \pi = \lim _{\ngroes} 2^n\underbrace{\sqrt{2-\sqrt{2+\sqrt{2+\cdots +\sqrt{2} } } }}_{n\ \mathrm{radicands}}. $$

\end{pro}

\begin{pro}\label{pro:converging-to-log2}
Prove that the sequence $\seq{\sum _{k=n}
^{2n}\dfrac{1}{k}}{n=1}{+\infty}$ converges to $\log 2$.

\begin{answer}
From Theorem \ref{thm:e}, and since $x\mapsto \log x$ is increasing,
$$  \left(1+\dfrac{1}{k+1}\right)^{k+1} < e < \left(1+\dfrac{1}{k}\right)^{k+1}
\implies (k+1)\log \left(1+\dfrac{1}{k+1}\right) <1 < (k+1)\log
\left(1+\dfrac{1}{k}\right).
$$Rearranging,
$$ \log \dfrac{k+2}{k+1}<\dfrac{1}{k+1}< \log \dfrac{k+1}{k}. $$
Summing from $k=n-1$ to $k=2n-1$,
$$\begin{array}{lll} \sum _{k=n-1} ^{2n-1}\log \dfrac{k+2}{k+1}< \sum _{k=n-1} ^{2n-1}\dfrac{1}{k+1}< \sum _{k=n-1} ^{2n-1} \log \dfrac{k+1}{k}
& \implies &  \log \dfrac{2n+1}{n}< \dfrac{1}{n}+\dfrac{1}{n+1} +
\cdots + \dfrac{1}{2n}<\log \dfrac{2n}{n-1}\\

& \implies & \log
\left(2+\dfrac{1}{n}\right)<\dfrac{1}{n}+\dfrac{1}{n+1} + \cdots +
\dfrac{1}{2n} <\log \left(2+\dfrac{2}{n-1}\right)\end{array}$$and
the result follows from the Sandwich Theorem.
\end{answer}
\end{pro}
\begin{pro}
Prove that the sequence $\seq{1 - \frac{1}{2} + \frac{1}{3} -
\frac{1}{4} + \cdots + \frac{1}{2n - 1} -
\frac{1}{2n}}{n=1}{+\infty}$ converges to $\log 2$.
\begin{answer}
Observe that
$${\everymath{\displaystyle}\begin{array}{lcl}
\left( 1 + \frac{1}{2} + \frac{1}{3} + \frac{1}{4} + \cdots + \frac{1}{2n - 1} + \frac{1}{2n}\right) & & \\
\qquad - 2\left(\frac{1}{2} + \frac{1}{4} + \frac{1}{6} + \cdots + \frac{1}{2n}\right) & & \\
& = &
\left( 1 + \frac{1}{2} + \frac{1}{3} + \frac{1}{4} + \cdots + \frac{1}{2n - 1} + \frac{1}{2n}\right)  \\
& & \qquad - 2\cdot\frac{1}{2}\left(1 + \frac{1}{2} + \frac{1}{3} +  \frac{1}{4} + \cdots + \frac{1}{n}\right)       \\
& = &
\left( 1 + \frac{1}{2} + \frac{1}{3} + \frac{1}{4} + \cdots + \frac{1}{2n - 1} + \frac{1}{2n}\right)  \\
& & \qquad - \left(1 + \frac{1}{2} + \frac{1}{3} +  \frac{1}{4} + \cdots + \frac{1}{n}\right)      \\
& = & \frac{1}{n + 1} + \frac{1}{n + 2} + \cdots + \frac{1}{2n},
\end{array}}$$and use the result of problem
\ref{pro:converging-to-log2}.
\end{answer}
\end{pro}


\begin{pro}
Let $n$ be a strictly positive integer and let $x_n$ denote the
unique real solution of the equation $x^n+x+1$. Prove that $x_n
\rightarrow 1$ as $\ngroes$.
\end{pro}
\begin{pro}
Let $$a_n = \sqrt{n+ \sqrt{(n-1)+ \sqrt{(n-2)+\cdots +
\sqrt{2+\sqrt{1}}}}},
$$for $n\geq 1$. Prove that $a_n-\sqrt{n}\rightarrow \dfrac{1}{2}$.
\end{pro}
\begin{pro}
Prove that $e$ is not a quadratic irrational.
\begin{answer}

We begin by looking at the Taylor series for $e^x$:
\begin{equation*}
e^x=\sum_{k=0}^{\infty}\frac{x^k}{k!}.
\end{equation*}

This converges for every $x\in\mathbb{R}$, so
$e=\sum_{k=0}^{\infty}\frac{1}{k!}$ and
$e^{-1}=\sum_{k=0}^{\infty}(-1)^k\frac{1}{k!}$. Arguing by
contradiction, assume $ae^2+be+c=0$ for integers $a$, $b$ and $c$.
That is the same as $ae+b+ce^{-1}=0$.

Fix $n>\absval{a}+\absval{c}$, then $a,c\mid n!$ and $\forall k\le
n$, $k!\mid n!\;$. Consider
\begin{align*}
0=n!(ae+b+ce^{-1})&=an!\sum_{k=0}^{\infty}\frac{1}{k!}+b+ cn!\sum_{k=0}^{\infty}(-1)^k\frac{1}{k!}\\
&=b+\sum_{k=0}^n (a+c(-1)^k)\frac{n!}{k!}+\sum_{k=n+1}^\infty
(a+c(-1)^k)\frac{n!}{k!}
\end{align*}
Since $k!\mid n!$ for $k\le n$, the first two terms are integers. So
the third term should be an integer. However,
\begin{align*}
\absval{\sum_{k=n+1}^\infty (a+c(-1)^k)\frac{n!}{k!}}&\le (\absval{a}+\absval{c})\sum_{k=n+1}^\infty \frac{n!}{k!}\\
&=(\absval{a}+\absval{c})\sum_{k=n+1}^\infty \frac{1}{(n+1)(n+2)\dotsb k}\\
&\le (\absval{a}+\absval{c})\sum_{k=n+1}^\infty (n+1)^{n-k}\\
&=(\absval{a}+\absval{c})\sum_{t=1}^\infty (n+1)^{-t}\\
&=(\absval{a}+\absval{c})\frac{1}{n}
\end{align*}
is less than $1$ by our assumption that $n>\absval{a}+\absval{c}$.
Since there is only one integer which is less than $1$ in absolute
value, this means that $\sum_{k=n+1}^\infty
(a+c(-1)^k)\frac{1}{k!}=0$ for every sufficiently large $n$ which is
not the case because
\begin{equation*}
\sum_{k=n+1}^\infty (a+c(-1)^k)\frac{1}{k!}-\sum_{k=n+2}^\infty
(a+c(-1)^k)\frac{1}{k!}=(a+c(-1)^{n+1})\frac{1}{(n+1)!}
\end{equation*}
is not identically zero. The contradiction completes the proof.
\end{answer}
\end{pro}
\begin{pro}
Find $\lim _{\ngrows} \prod _{k=1} ^n \left(1+\dfrac{k}{n}\right)$.
\end{pro}
\begin{pro}
\label{pro:quadratic_integers} A {\em quadratic integer} is any
number $x$ that satisfies an equation
$$x^2 + mx + n = 0, \ \ (m,n)\in \BBZ^2.$$
 Prove that
the real quadratic integers are dense in the reals.
\begin{answer}
Apply Problem \ref{pro:additive-closed-groups} We can apply this to
the stated problem by observing that for a fixed $d$, a positive
integer without square factors, the numbers $a + b\sqrt{d}$ are
quadratic integers if $a, b$ are rational integers, and that the set
of such numbers is an additive group of reals. Clearly the closure
of this group (it, together with its set of limit points) is a group
too, for if $x_n \rightarrow x$ and $y_n \rightarrow y$ then
$x_n+y_n \rightarrow x+y$. The new group is not of form (i) or (ii),
hence must be all reals, and the proof (of a slightly stronger
theorem) is complete.
\end{answer}

\end{pro}

\end{multicols}
\section{Averages of Sequences}
\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} In this section
    we will examine some classical results that allow us to compute
    more complicated limits. Had we the language of matrices, most
    results here could be deduced from a classical result of
    Toeplitz. Since we don't, we will develop ad hoc methods which
    are interesting  by themselves.
\end{minipage}}
    \end{center}

We start with the following discrete analogues of L'H\^{o}pital's
Rule.
\begin{thm}Let $\seq{x_n}{n=1}{+\infty}$,
$\seq{y_n}{n=1}{+\infty}$, be two sequences of real numbers such
that $x_n \rightarrow 0$, $y_n\rightarrow 0$. Suppose, moreover,
that the $x_n$ are eventually strictly decreasing. Then
$$\lim _{\ngrows}\dfrac{x_n-x_{n-1}}{y_n-y_{n-1}} =  \lim _{\ngrows} \dfrac{x_n}{y_n},  $$
provided the sinistral limit exists (be it finite or $+\infty$).
\end{thm}
\begin{pf}
Assume first that
$\dfrac{x_{n-1}-x_n}{y_{n-1}-y_n}=\dfrac{x_n-x_{n-1}}{y_n-y_{n-1}}
\rightarrow L$, a finite real number. Then, given $\varepsilon >0$
we can find $N>0$ such that for $n> N$,
$$ L-\varepsilon <\dfrac{x_{n-1}-x_{n}}{y_{n-1}-y_{n}} <L+\varepsilon, \qquad y_n < y_{n-1}.
$$Thus $(L-\varepsilon)(y_{n-1}-y_{n})   < x_{n-1}-x_{n}   <
(L+\varepsilon)(y_{n-1}-y_{n}),$ and repeating this inequality for
$n+1, n+2, \ldots , n+m$,
$$\begin{array}{lllll} (L-\varepsilon)(y_{n}-y_{n+1}) &  < & x_{n}-x_{n+1}  & < & (L+\varepsilon)(y_{n}-y_{n+1}), \\
 (L-\varepsilon)(y_{n+1}-y_{n+2}) &  < & x_{n+1}-x_{n+2}
& < & (L+\varepsilon)(y_{n+1}-y_{n+2}), \\
& & \vdots & & \\
 (L-\varepsilon)(y_{m+n-1}-y_{m+n}) & < & x_{m+n-1}-x_{m+n}
& < & (L+\varepsilon)(y_{m+n-1}-y_{m+n}).
\end{array} $$
Adding columnwise,
$$(L-\varepsilon)(y_{n}-y_{m+n})  <  x_{n}-x_{m+n}
 <  (L+\varepsilon)(y_{n}-y_{m+n}). $$
Letting $m\rightarrow +\infty$, and since the $y_n$ are strictly
positive,
$$(L-\varepsilon)y_{n}  <  x_{n}
 <  (L+\varepsilon)y_{n} \implies L-\varepsilon  < \dfrac{x_{n}}{y_{n}}
 <  L+\varepsilon \implies \dfrac{x_{n}}{y_{n}} \rightarrow L $$as
 $\ngrows$.

\bigskip

If $\dfrac{x_{n-1}-x_{n}}{y_{n-1}-y_{n}} $ diverges to $+\infty$
then for all $M>0$ we can find $N'>0$ such that for all $n\geq N'$,
$$\dfrac{x_{n-1}-x_{n}}{y_{n-1}-y_{n}} >M \implies x_{n-1}-x_{n}>M(y_{n-1}-y_{n}).  $$
Reasoning as above, for positive integers $m\geq 0$, $$
x_{n}-x_{m+n}>M(y_{n}-y_{m+n}).
$$Taking the limit as $m\rightarrow +\infty$,
$$ x_n \geq My_n \implies \dfrac{x_n}{y_n}\geq M \implies \dfrac{x_n}{y_n}\rightarrow +\infty .  $$
\end{pf}
\begin{thm}[Stolz's Theorem]\label{thm:stolz} Let $\seq{a_n}{n=1}{+\infty}$,
$\seq{b_n}{n=1}{+\infty}$, be two sequences of real numbers. Suppose
that $\seq{b_n}{n=1}{+\infty}$ is strictly increasing for
sufficiently large $n$ and that $b_n\rightarrow +\infty$ as
$\ngrows$. Then
$$\lim _{\ngrows}\dfrac{a_n-a_{n-1}}{b_n-b_{n-1}} =  \lim _{\ngrows} \dfrac{a_n}{b_n},  $$
provided the sinistral side exists (be it finite or infinite).
\end{thm}
\begin{pf}
Assume first that $\dfrac{a_n-a_{n-1}}{b_n-b_{n-1}}\rightarrow L$,
finite. Then  for every $\varepsilon > 0$ there is $N>0$  such that
$(\forall) n \geq N$,
$$ L-\varepsilon < \dfrac{a_{n+1}-a_n}{b_{n+1}-b_n} < L + \varepsilon , \qquad b_{n+1}>b_n.$$
This means that
$$ (L-\varepsilon)(b_{n+1}-b_n) < a_{n+1}-a_n < (L+\varepsilon)(b_{n+1}-b_n) $$
By iterating the above relation for $N+1, N+2, \ldots , m+N$ we
obtain
$$\begin{array}{lllll} (L-\varepsilon)(b_{N+1}-b_N) &  < & a_{N+1}-a_N  & < & (L+\varepsilon)(b_{N+1}-b_N), \\
 (L-\varepsilon)(b_{N+2}-b_{N+1}) &  < & a_{N+2}-a_{N+1}
& < & (L+\varepsilon)(b_{N+2}-b_{N+1}), \\
& & \vdots & & \\
 (L-\varepsilon)(b_{m+N}-b_{m+N-1}) & < & a_{m+N}-a_{m+N-1}
& < & (L+\varepsilon)(b_{m+N}-b_{m+N-1}).
\end{array} $$
Adding columnwise,
$$ (L-\varepsilon)(b_{m+N}-b_N) < a_{m+N}-a_N < (L+\varepsilon)(b_{m+N}-b_N) \implies \absval{\dfrac{a_{m+N}-a_N}{b_{m+N}-b_N}-L}<\varepsilon . $$
Now,
$$\dfrac{a_{m+N}}{b_{m+N}} -L =\dfrac{a_{N}-Lb_N}{b_{m+N}} + \left(1-\dfrac{b_{N}}{b_{m+N}}\right)\left(\dfrac{a_{m+N}-a_N}{b_{m+N}-b_N}-L\right),    $$
so by the Triangle Inequality
$$\absval{\dfrac{a_{m+N}}{b_{m+N}} -L} \leq \absval{\dfrac{a_{N}-Lb_N}{b_{m+N}}} + \absval{1-\dfrac{b_{N}}{b_{m+N}}}
\absval{\dfrac{a_{m+N}-a_N}{b_{m+N}-b_N}-L}.  $$Since $N$ is fixed,
$\dfrac{a_{N}-Lb_N}{b_{m+N}} \rightarrow 0$ and
$\dfrac{b_{N}}{b_{m+N}}\rightarrow 0$ as $m\rightarrow +\infty$ Thus
the dextral side is arbitrarily small, proving that
$\dfrac{a_m}{b_m}\rightarrow L$ as $m\rightarrow +\infty$.


\bigskip

Assume now that $\dfrac{a_n-a_{n-1}}{b_n-b_{n-1}}\rightarrow
+\infty$. For sufficiently large $n$ thus $a_n-a_{n-1}>b_n-b_{n-1}$.
Thus the $a_n$ are eventually increasing and $a_n\rightarrow
+\infty$ as $\ngrows$. Applying the results above to the
$\dfrac{b_n}{a_n}$ we obtain
$$ \lim _{\ngrows} \dfrac{b_n}{a_n} =   \lim _{\ngrows}
\dfrac{b_n-b_{n-1}}{a_n-a_{n-1}}=0$$ and so $ \lim _{\ngrows}
\dfrac{a_n}{b_n}= +\infty$ too. \end{pf}


\begin{thm}[C\`{e}saro]\label{thm:cesaro}
If a sequence of real numbers converges to a number, then its
sequence of arithmetic means converges to the same number, that is,
if $x_n \rightarrow a$ then $\dfrac{x_1+x_2+\cdots +
x_n}{n}\rightarrow a$.
\end{thm}
\begin{f-pf}
Let $a_n= x_1+x_2+\cdots + x_n$ and $b_n=n$ in Stolz's Theorem.
\end{f-pf}
\begin{s-pf}It is instructive to give an ad hoc proof of this
result.
Given $\varepsilon > 0$ there exists $N>0$ such that if $n\geq N$
then $\absval{x_n-a}$. Then
$$\absval{\dfrac{x_1+x_2+\cdots +
x_n}{n}-a} = \absval{\dfrac{(x_1-a)+(x_2-a)+ \cdots
+(x_n-a)}{n}}\leq \dfrac{\absval{(x_1-a)}+\absval{(x_2-a)}+ \cdots
+\absval{(x_n-a)}}{n}.
$$Now we run into a slight problem. We can control the differences
$|x_k-a|$ after a certain point, but the early differences need to
be taken care of. To this end we consider the differences $x_k-a$
with $k\leq \floor{\sqrt{n}}$ or $k>\floor{n}$ where $n$ is so large
that $\floor{\sqrt{n}}\geq N$. We have
$$\begin{array}{lll}\dfrac{\absval{(x_1-a)}+\absval{(x_2-a)}+ \cdots
+\absval{(x_n-a)}}{n} &  = &
\dfrac{\absval{(x_1-a)}+\absval{(x_2-a)}+ \cdots
+\absval{(x_{\floor{\sqrt{n}}}-a)}}{n}\\  & &  +
\dfrac{\absval{(x_{\floor{\sqrt{n}}+1}-a)}+\absval{(x_2-a)}+ \cdots
+\absval{(x_n-a)}}{n} \\
& < &\dfrac{\floor{\sqrt{n}}\max _{1 \leq k \leq
\floor{\sqrt{n}}}\absval{x_k-a}}{n} +
\dfrac{(n-\floor{\sqrt{n}})\varepsilon}{n}.
 \end{array} $$
These two last quantities tend to $0$ as $n\rightarrow +\infty$,
from where the result follows.
\end{s-pf}
\begin{exa}
Since $n^{1/n}\rightarrow 1$, $\dfrac{1+2^{1/2}+3^{1/3}+\cdots +
n^{1/n}}{n}\rightarrow 1$.
\end{exa}
\begin{exa}
Since $\dfrac{1}{n}\rightarrow 0$,
$\dfrac{1+\dfrac{1}{2}+\dfrac{1}{3}+\cdots +
\dfrac{1}{n}}{n}\rightarrow 0$.
\end{exa}
\begin{exa}
Since $\left(1+\dfrac{1}{n}\right)^n\rightarrow e$,
$\dfrac{\left(1+\dfrac{1}{1}\right)^1+\left(1+\dfrac{1}{2}\right)^2+\left(1+\dfrac{1}{3}\right)^3+\cdots
+ \left(1+\dfrac{1}{n}\right)^n}{n}\rightarrow e$.
\end{exa}
\begin{exa}
The converse of C\`{e}saro's Theorem is false. For, the sequence
$a_n= (-1)^n$ oscillates and does not converge. But its sequence of
averages is $b_n = \dfrac{1-1+1-1+\cdots + (-1)^n}{n}  \rightarrow 0
$ as $\ngroes$ since the numerator is either $0$ or $-1$.
\end{exa}
\begin{thm}\label{thm:cesaro-2}
If a sequence of positive real numbers converges to a number, then
its sequence of geometric means converges to the same number, that
is, if  $\forall n>0, \quad x_n \geq 0$ and $x_n \rightarrow a$ then
$(x_1x_2\cdots x_n)^{1/n}\rightarrow a$.
\end{thm}
\begin{pf}The proof mimics C\`{e}saro's Theorem \ref{thm:cesaro}.
Since $x_n\rightarrow a$,  for all $\varepsilon >0$ there is $N>0$
such that for all $n \geq N$,
$$\absval{x_n-a}<\varepsilon \implies a-\varepsilon <x_n
<a+\varepsilon.$$ Then
$$\left(\min _{1\leq k \leq \floor{\sqrt{n}}} x_k\right)^{\floor{\sqrt{n}}/n}\left(x_{\floor{\sqrt{n}}+1} \cdots  x_n\right)^{1/n}
\leq  (x_1x_2\cdots x_{\floor{\sqrt{n}}}x_{\floor{\sqrt{n}}+1}
\cdots  x_n)^{1/n} \leq \left( \max _{1\leq k \leq \floor{\sqrt{n}}}
x_k\right)^{\floor{\sqrt{n}}/n}\left(x_{\floor{\sqrt{n}}+1} \cdots
x_n\right)^{1/n}. $$ This gives, for $\floor{\sqrt{n}}\geq N$,
$$\left(\min _{1\leq k \leq \floor{\sqrt{n}}} x_k\right)^{\floor{\sqrt{n}}/n}\left(a-\varepsilon\right)^{(n-\floor{\sqrt{n}})/n} \leq  (x_1x_2\cdots x_{\floor{\sqrt{n}}}x_{\floor{\sqrt{n}}+1}
\cdots  x_n)^{1/n} \leq  \left(\max _{1\leq k \leq \floor{\sqrt{n}}}
x_k\right)^{\floor{\sqrt{n}}/n}\left(a+\varepsilon\right)^{(n-\floor{\sqrt{n}})/n}
.$$ Now, both $\left(\min _{1\leq k \leq \floor{\sqrt{n}}}
x_k\right)^{\floor{\sqrt{n}}/n}$ and $\left(\max _{1\leq k \leq
\floor{\sqrt{n}}} x_k\right)^{\floor{\sqrt{n}}/n}$ converge to $1$
as $n\rightarrow +\infty$ by virtue of Theorem
\ref{thm:root-n-of-a-to-1-goes},  and again by the same theorem,
$$\left(a-\varepsilon\right)^{(n-\floor{\sqrt{n}})/n}= \left(a-\varepsilon\right)\left(a-\varepsilon\right))^{\floor{\sqrt{n}}/n}\rightarrow
a-\varepsilon, \qquad
\left(a+\varepsilon\right)^{(n-\floor{\sqrt{n}})/n} =
\left(a+\varepsilon\right)\left(a+\varepsilon\right))^{\floor{\sqrt{n}}/n}\rightarrow
a+\varepsilon$$ as $n\rightarrow +\infty$. This gives the result.
\end{pf}

\begin{exa}
Since $e_n  = \left(\dfrac{n+1}{n}\right)^n\rightarrow e$, then by
the Theorem \ref{thm:cesaro-2}
$$ \left(e_1e_2\cdots e_n\right)^{1/n} =
\left(\left(\dfrac{2}{1}\right)^1\left(\dfrac{3}{2}\right)^2\left(\dfrac{4}{3}\right)^3\cdots
\left(\dfrac{n+1}{n}\right)^n\right)^{1/n}
=\left(\dfrac{(n+1)^n}{n!}\right)^{1/n}\rightarrow e.$$This gives
$\dfrac{n}{(n!)^{1/n}} =\dfrac{n}{n+1}\cdot
\dfrac{n+1}{(n!)^{1/n}}\rightarrow 1\cdot e=e.$\end{exa}


\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
If $\seq{a_n}{n=1}{+\infty}$ is a sequence of strictly positive real
numbers such that $\dfrac{a_n}{a_{n-1}}\rightarrow a>0$. Prove that
$$\lim _{\ngrows} \dfrac{a_n}{a_{n-1}} = \lim _{\ngrows} \sqrt[n]{a_n}.  $$
\end{pro}
\begin{pro}
Let $x_n\rightarrow a$ and $y_n\rightarrow b$. Prove that
$\dfrac{x_1y_n+x_2y_{n-1}+\cdots + x_ny_1}{n}\rightarrow ab$.
\end{pro}

\begin{pro}
Prove that $\lim _{\ngroes} \left(\binom{2n}{n}\right)^{1/n} = 4$.
\end{pro}
\begin{pro}
Prove that $\lim _{\ngroes} \dfrac{1}{n}\left(n(n+1)\cdots
(n+n)\right)^{1/n} = 4e$.
\end{pro}
\begin{pro}
Prove that $\lim _{\ngroes} \dfrac{1}{n}\left(1\cdot 3\cdot 5\cdots
(2n-1)\right)^{1/n} = \dfrac{2}{e}$.
\end{pro}
\begin{pro}
Prove that $\lim _{\ngroes}
\dfrac{1}{n^2}\left(\dfrac{(3n)!}{n!}\right)^{1/n} = \dfrac{2}{e}$.

\end{pro}
\end{multicols}















\section{Orders of Infinity}

\begin{center}
    \fcolorbox{blue}{yellow}{
    \begin{minipage}{.90\linewidth}
    \noindent\textcolor{red}{\textbf{Why bother?}} It is clear that
    the sequences $\seq{n}{n=1}{+\infty}$ and
    $\seq{n^2}{n=1}{+\infty}$ both tend to $+\infty$ as $\ngrows$.
    We would like now to refine this statement and compare one with
    the other. In other words, we will examine their speed towards
    $+\infty$.
\end{minipage}}
    \end{center}

\begin{df}
We write $a_n = \boo{b_n}$ if $\exists  C>0$, $\exists N>0$ such
that $\forall n\geq N$  we have $\absval{a_n}\leq C\absval{b_n}$. We
then say that $a_n$ is {\em Big Oh} of $b_n$, or that $a_n$ {\em is
of order at most $b_n$} as $\ngroes$. Observe that this means that
$\absval{\dfrac{a_n}{b_n}}$ is bounded for sufficiently large $n$.
The notation $a_n << b_n$, due to Vinogradov, is often used as a
synonym of $a_n = \boo{b_n}$.
\end{df}
\begin{rem}
A sequence $\seq{a_n}{n=1}{+\infty}$ is bounded if and only if
$a_n<<1$.
\end{rem}
An easy criterion to identify Big Oh relations is the following.

\begin{thm}\label{thm:sufficient-conds-for-big-oh}
If $\lim _{\ngroes} \dfrac{a_n}{b_n} =c\in\BBR$, then $a_n<<b_n$.
\end{thm}
\begin{pf}
By Theorem \ref{thm:conve-seq-bounded-be}, a convergent sequence is
bounded, hence the sequence $\seq{\dfrac{a_n}{b_n}}{n=+1}{+\infty}$
is bounded: so for sufficiently large $n$,
$\absval{\dfrac{a_n}{b_n}}<C$ for some constant $C>0$. This proves
the theorem.
\end{pf}
\begin{rem}
The $=$ in the relation $a_n = \boo{b_n}$ is not a true equal sign.
For example $n^2 = \boo{n^3}$ since $\lim _{\ngroes}
\dfrac{n^2}{n^3} = 0$ and so $n^2<<n^3$ by Theorem
\ref{thm:sufficient-conds-for-big-oh}. On the other hand, $\lim
_{\ngroes} \dfrac{n^3}{n^2} = +\infty$ so that for sufficiently
large $n$, and for all $M>0$, $n^3>Mn^2$, meaning that $n^3\neq
\boo{n^2}$. Thus the Big Oh relation is not symmetric.\footnote{One
should more properly write $a_n\in \boo{b_n}$, as $\boo{b_n}$ is the
set of sequences growing to infinity no faster than $b_n$, but one
keeps the $=$ sign for historical reasons.}
\end{rem}
\begin{thm}[Lexicographic Order of Powers]\label{thm:lexico-powers-big-oh} Let $(\alpha,
\beta)\in\BBR$ and consider the sequences
$\seq{n^\alpha}{n=1}{+\infty}$ and $\seq{n^\beta}{n=1}{+\infty}$.
Then $n^\alpha << n^\beta \iff \alpha \leq \beta$.
\end{thm}
\begin{pf}
If $\alpha \leq \beta$ then $\lim_{\ngroes}
\dfrac{n^\alpha}{n^\beta} $ is either $1$ (when $\alpha = \beta$) or
$0$ (when $\alpha < \beta$), hence $n^{\alpha}<<n^{\beta}$ by
Theorem \ref{thm:sufficient-conds-for-big-oh}.

\bigskip

If $n^{\alpha}<<n^{\beta}$ then for sufficiently large $n$,
$n^{\alpha}\leq Cn^{\beta}$ for some constant $C>0$. If $\alpha
>\beta$ then this would mean that for all large $n$ we would have $n^{\alpha-\beta}\leq
C$, which is absurd, since for a strictly positive exponent
$\alpha-\beta$, $n^{\alpha-\beta}\rightarrow +\infty$ as
$\ngroes$.\end{pf}
\begin{exa}
As $\ngroes$,
$$ n^{1/10}<<n^{1/3}<<n<<n^{10/9}<<n^2, $$for example.
\end{exa}
\begin{thm}\label{thm:constants-matter-not-big-oh} If $c\in\BBR\setminus\{0\}$ then $\boo{ca_n} = \boo{a_n}$, that is, the set of sequences of order at most $ca_n$ is the same set at those of order at most
$a_n$.\end{thm}
\begin{pf}We prove that  $b_n= \boo{ca_n}\iff b_n = \boo{a_n}$. If $b_n= \boo{ca_n}$ the there are constants $C>0$ and
$N>0$ such that $\absval{b_n} \leq C\absval{ca_n}$ whenever $n \geq
N$. Therefore, $\absval{b_n} \leq C'\absval{a_n}$ whenever $n \geq
N$, where $C' = C \absval{c}$, meaning that $b_n=\boo{a_n}$.
Similarly, if $b_n= \boo{a_n}$ the there are constants $C_1>0$ and
$N_1>0$ such that $\absval{b_n} \leq C_1\absval{a_n}$ whenever $n
\geq N_1$. Since $c\neq 0$ this is equivalent to  $\absval{b_n} \leq
\dfrac{C_1}{c}\left(c\absval{a_n}\right)=C''\left(c\absval{a_n}\right)$
whenever $n \geq N_1$, where $C'' = \dfrac{C_1}{c}$, meaning that
$b_n=\boo{ca_n}$. Therefore, $\boo{a_n} = \boo{ca_n}$.
\end{pf}
\begin{exa}
As $\ngroes$,
$$ \boo{n^3}=\boo{\dfrac{n^3}{3}}=\boo{4n^3}. $$
\end{exa}

\begin{thm}[Sum Rule]\label{thm:sum-rule-big-oh}Let $a_n = \boo{x_n}$ and $b_n = \boo{y_n}$. Then $a_n + b_n = O(\max(\absval{x_n}, \absval{y_n}))$.\end{thm}
\begin{pf} There exist strictly positive constants $C_1, N_1, C_2, N_2$ such that
$$n\geq N_1, \implies \absval{a_n} \leq C_1\absval{x_n}\qquad \mathrm{and} \qquad n\geq N_2, \implies \absval{b_n} \leq C_2\absval{y_n}.$$
Let $N' = \max(N_1, N_2)$. Then for $n \geq N$,  by the Triangle
inequality $$\absval{a_n + b_n} \leq \absval{a_n} + \absval{b_n}
\leq C_1\absval{x_n} + C_2\absval{y_n}.$$ Let $C' = \max(C_1,C_2)$.
Then
$$ \absval{a_n + b_n}  \leq  C'(\absval{x_n} + \absval{y_n}) \leq 2C' \max(\absval{x_n}, \absval{y_n}),
$$ whence the theorem follows.
\end{pf}


\begin{cor}\label{cor:leading-term-dominates}Let $a_n= k_0n^m+k_1n^{m-1}+k_2n^{m-2}+\cdots + k_{m-1}n+k_n$ be a polynomial of degree $m$ in $n$ with real number coefficients.
The $a_n=\boo{n^m}$, that is, $a_n$ is of order at most its leading
term.\end{cor}
\begin{pf} By the Sum Rule Theorem \ref{thm:sum-rule-big-oh} the leading term dominates.\end{pf}

\begin{thm}[Transitivity Rule]\label{thm:transitivity-big-oh} If $a_n = O(b_n)$ and $b_n = O(c_n)$, then $a_n = \boo{c_n}$.\end{thm}
\begin{pf} There are strictly positive constants $C_1, C_2, N_1, N_2$  such that
$$n\geq N_1, \implies \absval{a_n} \leq C_1\absval{b_n}\qquad \mathrm{and} \qquad n\geq N_2, \implies \absval{b_n} \leq C_2\absval{c_n}.$$
If $n\geq \max (N_1,N_2)$, then
 $\absval{a_n} \leq C_1\absval{b_n} \leq  C_1C_2\absval{c_n}=C\absval{c_n}$ , with $C=C_1C_2$. This gives  $a_n = \boo{c_n}$.
\end{pf}
\begin{exa}
By Corollary \ref{cor:leading-term-dominates}, $ 5n^4-2n^2+100n-8 =
\boo{5n^4}$. By Theorem \ref{thm:constants-matter-not-big-oh},
$\boo{5n^4}=\boo{n^4}$. Hence $$5n^4-2n^2+100n-8=\boo{n^4}.$$
\end{exa}

\begin{thm}[Multiplication Rule]\label{thm:multiply-big-oh} If $a_n = O(x_n)$ and $b_n = O(y_n)$, then $a_nb_n = \boo{x_ny_n}$.\end{thm}
\begin{pf} There are strictly positive constants $C_1, C_2, N_1, N_2$  such that
$$n\geq N_1, \implies \absval{a_n} \leq C_1\absval{x_n}\qquad \mathrm{and} \qquad n\geq N_2, \implies \absval{b_n} \leq C_2\absval{y_n}.$$
If $n\geq \max (N_1,N_2)$, then
 $\absval{a_nb_n} \leq C_1C_2\absval{x_ny_n} = C\absval{x_ny_n}$, with $C=C_1C_2$. This gives  $a_nb_n = \boo{x_ny_n}$.
\end{pf}

\begin{thm}[Lexicographic Order of Exponentials]\label{thm:lexico-exps-big-oh} Let $(a,
b)\in\BBR$, $a>1$, $b>1$, and consider the sequences
$\seq{a^n}{n=1}{+\infty}$ and $\seq{b^n}{n=1}{+\infty}$. Then $a^n
<< b^n \iff a \leq b$.
\end{thm}
\begin{pf}
Put $r=\dfrac{a}{b}$, and use Theorems
\ref{thm:geom-sequence-to-zero-goes} and
\ref{thm:sufficient-conds-for-big-oh}.
\end{pf}
\begin{exa}
$\dfrac{1}{2^n}<< 1 << 2^n << e^n << 3^n$.
\end{exa}
\begin{lem}\label{lem:exps-are-faster-than-powers-big-oh} Let $a\in\BBR$,
$a>1$, $k\in\BBN\setminus \{0\}$. Then $n^k << a^n$.
\end{lem}
\begin{pf}
By Theorem \ref{thm:exps-are-faster-than-powers}, $\lim _{\ngroes}
\dfrac{n^k}{a^n}=0$. Now apply Theorem
\ref{thm:sufficient-conds-for-big-oh}.
\end{pf}
\begin{thm}[``Exponentials are faster than powers'']\label{thm:exps-are-faster-than-powers-big-oh} Let $a\in\BBR$,
$a>1$, $\alpha\in\BBR$. Then $n^\alpha << a^n$.
\end{thm}
\begin{pf}Put $k=\max (1, \floor{\alpha}+1)$. Then by Theorem
\ref{thm:lexico-powers-big-oh}, $n^\alpha << n^k$. By Lemma
\ref{lem:exps-are-faster-than-powers-big-oh}, $n^k<<a^n$, and by the
Transitivity of Big Oh (Theorem \ref{thm:transitivity-big-oh}),
$n^\alpha << n^k << a^n$.
\end{pf}
\begin{exa}
$$ n^{100}<<e^n. $$
\end{exa}
\begin{thm}[``Logarithms are slower than powers'']\label{thm:logs-are-slower-than-powers-big-oh} Let
$(\alpha ,\beta) \in\BBR^2$, $\alpha >0$. Then $ (\log n)^{\beta}<<
n^\alpha$.
\end{thm}
\begin{pf}If $\beta \leq 0$, then $(\log n)^\beta <<1$ and the assertion is evident, so assume $\beta >0$.
For $x>0$, then $\log x < x$. Putting $x=n^{\alpha /\beta}$, we get
$$ \log n^{\alpha /\beta} < n^{\alpha/\beta}  \implies \log n < \dfrac{\beta n^{\alpha/ \beta}}{\alpha}
\implies (\log n)^\beta < \dfrac{\beta^\beta n^\alpha}{\alpha^\beta
},$$ whence $(\log n)^\beta << n^\alpha$.
\end{pf}

By the Multiplication Rule (Theorem \ref{thm:multiply-big-oh}) and
Theorems \ref{thm:lexico-powers-big-oh},
\ref{thm:exps-are-faster-than-powers-big-oh},
\ref{thm:logs-are-slower-than-powers-big-oh}, in order to compare
two expressions of the type $a^nn^b(\log )^c$ and $u^nn^{v}(\log
)^{w}$ we simply look at the lexicographic order of the exponents,
keeping in mind that logarithms are slower than powers, which are
slower than exponentials.

\begin{exa}
In increasing order of growth we have
$$\dfrac{1}{e^n}  << {\dfrac{1}{2^n}} <<{\dfrac{1}{n^2}} =
{\dfrac{1}{\log n}} << {1} <<{(\log\log n)^{10}} <<{\sqrt{\log n}}
<<{\dfrac{n}{\log n}}<< {n} <<{n\log n} <<{e^n}.
$$
\end{exa}
\begin{exa}
Decide which one grows faster as $\ngroes$: $n^{\log n}$ or $(\log
n)^n$.
\end{exa}
\begin{solu}
Since $n^{\log n} = e^{(\log n)^2}$ and $(\log n)^n=e^{n\log\log
n}$, and since $(\log n)^2<<n\log\log n$, we conclude that $n^{\log
n}<<(\log n)^n$.
\end{solu}

\bigskip

We now define two more fairly common symbols in asymptotic analysis.
\begin{df}
We write $a_n = \soo{b_n}$ if $\dfrac{a_n}{b_n}\rightarrow 0$ as
$n\rightarrow +\infty$, and say that $a_n$ is {\em small oh} of
$b_n$, or that $a_n$ {\em grows slower} than $b_n$ as $\ngroes$.
\end{df}
\begin{df}
A sequence $\seq{a_n}{n=1}{+\infty}$ is said to be {\em
infinitesimal} if $a_n=\soo{1}$, that is, if $a_n\rightarrow 0$ as
$\ngroes$.
\end{df}
\begin{rem}
We know from above that for $a>1$ $\lim _{\ngroes}
\dfrac{n^\alpha}{a^n} = 0$, and so $n^\alpha = \soo{a^n}$. Also, for
$\gamma >0$, $\lim _{\ngroes} \dfrac{(\log n)^\beta}{n^\gamma} = 0$,
and so $(\log n)^\beta = \soo{n^\gamma}$.
\end{rem}

\begin{df} We write $a_n \sim b_n$ if $\dfrac{a_n}{b_n}\rightarrow 1$ as
$n\rightarrow +\infty$, and say that $a_n$ is {\em asymptotic} to
$b_n$.
\end{df}
Asymptotic sequences are thus those that grow at the same rate as
the index increases. \vspace{2cm}
\begin{figure}[h]
$$\psset{unit=1pc} \pscircle(-2,0){4}\pscircle(2,0){4}
\pscircle(0,0){1}\rput(0,0){f\sim g}
\pscircle(-3.8,-1){1.7}\rput(-3.8,-1){f=\soo{g}}
\pscircle(3.8,-1){1.7}\rput(3.8,-1){g=\soo{f}}
\rput(-3.3,2.5){f=\boo{g}}\rput(3.3,2.5){g=\boo{f}}
$$\vspace{1cm}\footnotesize\hangcaption{Diagram of $O$ relations.} \label{fig:O-relations}
\end{figure}

\begin{exa}
The sequences $\seq{n^2-n\sin n}{n=1}{+\infty}$,
 $\seq{n^2 + n-1}{n=1}{+\infty}$ are asymptotic since
 $$ \dfrac{n^2-n\sin n}{n^2+n-1} = \dfrac{1-\dfrac{\sin n}{n}}{1+\dfrac{1}{n}-\dfrac{1}{n^2}}\rightarrow 1,
 $$as $\ngrows$.
\end{exa}
\begin{thm}Let $\seq{a_n}{n=1}{+\infty}$ and
$\seq{b_n}{n=1}{+\infty}$ be two properly diverging sequences. Then
$a_n\sim b_n \iff a_n = b_n(1+\soo{1})$.
\end{thm}
\begin{pf}Since the limit is $1>0$, either both diverge to $+\infty$ or both to
$-\infty$. Assume the former, and so, eventually, $b_n$ will be
strictly positive. Now,
$$ \begin{array}{lll}\lim _{\ngroes} \dfrac{a_n}{b_n} = 1 & \iff &  \forall \varepsilon >0, \exists N>0, 1-\varepsilon<\dfrac{a_n}{b_n}<
1+\varepsilon\\
&  \iff &  b_n - b_n\varepsilon < a_n < b_n + b_n\varepsilon\\
&  \iff &  \absval{a_n-b_n}< b_n\varepsilon\\
&  \iff & a_n-b_n = \soo{b_n}.
\end{array}
$$
\end{pf}


The relationship between the three symbols is displayed in figure
\ref{fig:O-relations}.




\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}
Prove that $e^n<<n!$.
\end{pro}

\begin{pro}Prove that $\boo{\boo{a_n}}=\boo{a_n}$.
\end{pro}

\begin{pro} Let $k\in\BBR$ be a constant. Prove that $k+\boo{a_n} = \boo{k+a_n} = \boo{a_n}$.

\end{pro}
\begin{pro} Let $k\in\BBR$, $k>0$, be a constant. Prove that $(a_n+b_k)^k <<a_n ^k + b_n ^k$.

\end{pro}
\begin{pro}
For a sequence of real numbers $\seq{a_n}{n=1}{+\infty}$ it is known
that $a_n = \boo{n^2}$ and $a_n=\soo{n^2}$. Which of the two
statements conveys more information?
\begin{answer}
$a_n=\soo{n^2}$ does, since this says that $\lim
_{\ngroes}\dfrac{a_n}{n^2}=0$, whereas $a_n=\boo{n^2}$ says that
$\dfrac{a_n}{n^2}$ is bounded by some positive constant.
\end{answer}
\end{pro}
\begin{pro}
True or false: $a_n = \boo{n} \implies a_n= \soo{n}$.
\begin{answer}
False. Take $a_n=2n$, for example. Then $a_n<<n$,
$\dfrac{a_n}{n}=2$, and so $\dfrac{a_n}{n}\nrightarrow 0$.
\end{answer}
\end{pro}
\begin{pro}
True or false: $a_n = \soo{n} \implies a_n= \boo{n}$.
\begin{answer}
True. $\dfrac{a_n}{n}\rightarrow 0$ and so by Theorem
\ref{thm:sufficient-conds-for-big-oh}, $a_n<<n$.
\end{answer}
\end{pro}
\begin{pro}
True or false: $a_n = \soo{n^2} \implies a_n= \boo{n}$.
\begin{answer}
False. Take $a_n =n^{3/2}$. Then $\dfrac{a_n}{n^2}\rightarrow 0$ but
$a_n\neq \boo{n}$.
\end{answer}
\end{pro}
\begin{pro}
True or false: $a_n = \soo{n} \implies a_n= \boo{n^2}$.
\begin{answer}
True. $\dfrac{a_n}{n}\rightarrow 0$ and so by Theorem
\ref{thm:sufficient-conds-for-big-oh}, $a_n<<n$. Since $n<<n^2$, the
assertion follows by transitivity.
\end{answer}
\end{pro}
\end{multicols}




\section{Cauchy Sequences}
\begin{df}
A sequence of real numbers $\seq{a_n}{n=1}{+\infty}$ is called  a
\emph{Cauchy Sequence} if $$\forall \varepsilon > 0, \quad \exists
N>0, \quad \mathrm{such\ that} \quad \forall n,m \geq N \quad
\absval{a_{n}-a_{m}}< \varepsilon .
$$

\end{df}

\begin{thm}\label{thm:Cauchy-are-bounded}
 Cauchy sequences are bounded. \end{thm}\begin{pf} Let $ \seq{a_n}{n=1}{+\infty}
$ be Cauchy.  Take $ N>0 $ such that for all $ n\geq N $,
$\absval{a_{n}-a_{N}}<1$ . Then $ a_{n}$ is bounded by $$  \max
\left( |a_{1}|, \absval{a_{2}}, \ldots, \absval{a_{N}}\right)+1.$$
\end{pf}

\begin{lem}
\label{thm:Cauchy-seq-convers-if-a-subseq-does} If a Cauchy sequence
of real numbers has a convergent subsequence, then the parent
sequence converges, and it does so to the same limit as the
subsequence.
\end{lem}

\begin{pf}Let $\seq{a_n}{n=1}{+\infty}$ be a Cauchy sequence of real
numbers, and suppose that its subsequence
$\seq{a_{n_k}}{k=1}{+\infty}$ converges to the real number $a$.
Given $\varepsilon
> 0 $, take $ N>0$ sufficiently large such that
$$\forall m,n, n_k \geq N,\quad  \absval{a_{n}-a_{m}}< \varepsilon, \qquad
\mathrm{and}\quad \absval{a_{n_k}-a} < \varepsilon .$$ By the
Triangle Inequality,$$ \absval{a_{n}-a} \leq \absval{a_{n}-a_{n_k}}
+ \absval{a_{n_k}-a} < \varepsilon + \varepsilon = 2 \varepsilon,
$$whence $a_n\rightarrow a$.\end{pf}

\begin{thm}[General Principle of Convergence]
A sequence of real numbers converges if and only if it is Cauchy.
\end{thm}

\begin{pf}
\begin{enumerate}
\item[$ (\Rightarrow) $] If $ a_{n} \to a $,
given $ \varepsilon>0 $, choose $ N >0$ such that $
\absval{a_{n}-a}< \varepsilon $ for all $ n \geq N $.

Then if $ m,n \geq N $,
$$
\absval{a_{n}-a_{m}} \leq \absval{a_{n}-a} + \absval{a_{m}-a} \leq
\varepsilon + \varepsilon = 2 \varepsilon.
$$

Since $ 2 \varepsilon > 0$ can be made arbitrarily small, $ a_{n} $
is Cauchy.

\item[$ (\Leftarrow) $]
Suppose $ a_{n} $ is Cauchy. By virtue of Theorem
\ref{thm:Cauchy-are-bounded} it is bounded, say that for all $n>0$,
$a_{n} \in \lcrc{\alpha}{\beta}$. Put
$$
\mathscr{S} = \{ s: a_{n} \geq s \text{ for infinitely many $n$} \}.
$$As $ \alpha \in \mathscr{S} $,  $ \mathscr{S} \neq \varnothing
$.  $ \mathscr{S} $ is
 bounded above by $ \beta$.
 By the Completeness Axiom, $ \mathscr{S} $ has a supremum, $ a = \sup \mathscr{S} $.
Given $ \varepsilon > 0 $, $ a - \varepsilon < a $ and so
 there is $ s \in \mathscr{S} $ such that $a - \varepsilon < s$.
By definition of $ \mathscr{S}$, there are infinitely many $ n $
with $a_{n}\geq s
>
 a-\varepsilon$.
 $ a+ \varepsilon > a $, so that $a+ \varepsilon \notin \mathscr{S}$ and so there
 are only finitely many $ n $ for which $a_{n} \geq a+ \varepsilon$.
Thus there are infinitely many $ n $ with  $a_{n} \in (a -
\varepsilon, a+\varepsilon)$.

Choose $ N>0 $ such that $\absval{a_{n}-a_{m}} < \varepsilon$ for
all $ m,n \geq N $. We can find $ m \geq N $ with $a_{m}\in
(a-\varepsilon, a + \varepsilon)$ ie $\absval{a_{m}-a}<\varepsilon$.
Then if $ n \geq N $,
$$
\absval{a_{n}-a} \leq \absval{a_{n}-a_{m}} + \absval{a_{m}-a} <
\varepsilon + \varepsilon = 2 \varepsilon
$$

As $ 2 \varepsilon $ can be made arbitrarily small this shews $
a_{n} \to a $.
\end{enumerate}
\end{pf}



\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\end{multicols}



\section{Topology of sequences. Limit Superior and Limit Inferior}

\begin{thm}\label{thm:dense-and-sequences}A set $X\subseteqq \BBR$ is dense in $\BBR$ is and only
if for every $x\in \BBR$ there is a sequence
$\seq{x_n}{n=1}{+\infty}$ of elements of $X\setminus \{x\}$ that
converges to $x$.
\end{thm}
\begin{pf}
\begin{description}
\item[$\implies$] For each  positive integer $n$, since $X$ is
dense in $\BBR$, there exists $x_n\in X\setminus \{x\}$ such that
$\absval{x_n -x}< \dfrac{1}{2^n}$. But then $x_n\to x$ as $\ngroes$.
\item[$\Leftarrow$] Let $x\in \BBR $ and let $\seq{x_n}{n=1}{+\infty}$ of elements of $X\setminus \{x\}$ that
converges to $x$. Then $\forall \varepsilon > 0$, $\exists N\in
\BBN$ such that $\forall n\geq N$, $\absval{x_n-x}<\varepsilon$. But
then we have found elements of $X\setminus \{x\}$ which are
arbitrarily close to $x$, meaning that $X$ is dense in $\BBR$.

\end{description}
\end{pf}



\begin{thm}\label{thm:accu-points-and-sequences}
Let $X\subseteqq \BBR$. A point $x\in \BBR$ is an accumulation point
of $X$ if and only if there exists a sequence of elements of
$X\setminus \{x\}$ converging to $x$.
\end{thm}
\begin{pf}
\begin{description}
\item[$\implies$]If  $x$ is an accumulation point of $X$, every closed interval
$I_n := [x-1/n; x+1/n]$, $n\in\BBN$, satisfies $I_n \cap (X\setminus
\{x\}) \neq \varnothing$, thus $\forall n\in \BBN$, $\exists x_n \in
I_n \cap (X\setminus \{x\})$. Since $\absval{x_n-x}<\dfrac{1}{n}$,
we conclude that  $\lim x_n = x$.

\item[$\Leftarrow$]Suppose now that  $\seq{x_n}{1}{+\infty}$ is an infinite sequence of points of $X\setminus \{x\}$ converging to
$x$. If $x\notin \acc{X}$, then $x\notin\acc{x_1,x_2,\ldots}$. Thus
there is a neighbourhood of $x$, $\N{x}$ such that $\N{x}\cap
\{x_1,x_2,\ldots\}$. Thus there is a $\varepsilon > 0$ such that $]x
- \varepsilon; x + \varepsilon[ \subseteqq \N{x}$. For this
$\varepsilon$ and for none of the  $x_n$ it is true then that
$\absval{ x_n - x } < \varepsilon$, contradicting the fact that
$\lim _{\ngroes} x_n = x$.
\end{description}
\end{pf}

\begin{df}
Given a sequence $\seq{a_n}{1}{+\infty}$, the new sequence
$$b_k = \inf _{n\geq k} a_n = \inf \{a_k, a_{k+1}, a_{k+2,} \}, \quad k \geq
1,
$$satisfies $b_k\leq b_{k+1}$, that is, it is increasing, and hence
it converges to its supremum. We then put
$$ \lim \inf _{n\rightarrow +\infty} =\sup _{n\geq 1}\inf _{k\geq n} a_k.
$$Similarly, the  new sequence
$$c_k = \sup _{n\geq k} a_n = \sup \{a_k, a_{k+1}, a_{k+2,} \}, \quad k \geq
1,
$$satisfies $c_k\geq c_{k+1}$, that is, it is decreasing, and hence
it converges to its infimum. We then put
$$ \lim \sup _{n\rightarrow +\infty} =\inf _{n\geq 1}\sup _{k\geq n} a_k.
$$
\end{df}
We now prove the following theorem for future reference.
\begin{thm}\label{thm:ratio-is-inferior-to-root}
For any sequence $\seq{a_n}{n=0}{+\infty}$ of strictly positive real
numbers
$$ \lim  _{\ngroes}\inf \dfrac{a_{n+1}}{a_n}\leq \lim   _{\ngroes}\inf \sqrt[n]{a_n}\leq \lim  _{\ngroes}\sup \sqrt[n]{a_n}  \leq \lim  _{\ngroes}\sup  \dfrac{a_{n+1}}{a_n}. $$
\end{thm}
\begin{pf}
We will prove the last inequality. The first is quite similar, and
the two middle ones are obvious.


Put $r= \lim  _{\ngroes}\sup  \dfrac{a_{n+1}}{a_n}$. If $r=+\infty$
then there is nothing to prove. For $r<+\infty$ choose $r'>r$. There
is $N\in \BBN$ such that $$\forall n \geq N, \qquad
\dfrac{a_{n+1}}{a_n}\leq r'.$$ Hence,
$$a_{N+1} \leq r'a_N, \quad  a_{N+2} \leq r'a_{N+1}, \quad  a_{N+3} \leq r'a_{N+2}, \ldots \quad  a_{N+t} \leq r'a_{N+t-1}, $$
and so, upon multiplication and cancelling,
$$a_{N+t}\leq a_N (r')^t,  $$and putting $n=N+t$,
$$a_n\leq a_N(r')^{-N} (r')^n \implies \sqrt[n]{a_n} \leq r' \sqrt[n]{a_N(r')^{-N}} \implies \lim _{\ngroes} \sup \sqrt[n]{a_n} \leq r',  $$
since  $a_N(r')^{-N}$ is a fixed real number (does not depend on
$n$), and so, $\sqrt[n]{a_N(r')^{-N}} \to 1$ by Theorem
\ref{thm:root-n-of-a-to-1-goes}.

\end{pf}








 The following theorem is an easy exercise left to the reader.
\begin{thm}
Let $\seq{a_n}{1}{+\infty}$ be a sequence of real numbers. Then
\begin{enumerate}
\item if $\limsup _{n\rightarrow +\infty} a_n = +\infty$, then  $\seq{a_n}{1}{+\infty}$ has a subsequence converging to $+\infty$.
\item if $\limsup _{n\rightarrow +\infty}a_n = -\infty$,  then  $\lim _{n\rightarrow +\infty} a_n = -\infty$.
\item if $\limsup _{n\rightarrow +\infty} a_n = a\in\BBR $, then
$$
    \forall\,\epsilon>0, \; \exists\, n_0\ \hbox{such\ that } a_n < a + \epsilon\hbox{ whenever } n \geq n_0
$$and also, there are infinitely many $a_n$ such that  $a - \epsilon < a_n$.
\item if $\liminf _{n\rightarrow +\infty} a_n = -\infty$,  then  $\seq{a_n}{1}{+\infty}$ has a subsequence converging to
$-\infty$.
\item if $\liminf _{n\rightarrow +\infty} a_n = +\infty$,  then  $\lim _{n\rightarrow +\infty}a_n = +\infty$.
\item if $\liminf _{n\rightarrow +\infty} a_n = a\in\BBR $, then
$$
    \forall\,\epsilon>0, \; \exists\, n_0\ \hbox{ such \ that  } a - \epsilon < a_n \hbox{ whenever } n \geq n_0
$$
and there are infinitely many $a_n$ such that $a_n < a + \epsilon$.
\item $\liminf _{n\rightarrow +\infty} a_n \leq \limsup _{n\rightarrow +\infty} a_n$  is always verified, and furthermore,
$\liminf _{n\rightarrow +\infty} a_n =  \limsup _{n\rightarrow
+\infty} a_n$ if and only if $\lim _{n\rightarrow +\infty} a_n$
exists, in which case $\liminf _{n\rightarrow +\infty} a_n =\lim
_{n\rightarrow +\infty} a_n = \limsup _{n\rightarrow +\infty} a_n$.
\end{enumerate}

\end{thm}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Identify the set of accumulation points of the set
$\{\sqrt{a}-\sqrt{b}: (a, b)\in \BBN^2\}$.
\end{pro}
\begin{pro}
Consider the following enumeration of the proper fractions
$$ \dfrac{0}{1}, \dfrac{1}{1}, \dfrac{0}{2},\dfrac{1}{2},\dfrac{2}{2},\dfrac{0}{3},\dfrac{1}{3},\dfrac{2}{3}, \dfrac{3}{3}\ldots .$$
Clearly,  the fraction $\dfrac{a}{b}$ in this enumeration occupies
the $a+ \dfrac{b(b+1)}{2}$-th place. For each integer $k\geq 1$,
cover the $k$-th fraction $\dfrac{a}{b}$ by an interval of length
$2^{-k}$ centred at $\dfrac{a}{b}$. Shew that the point
$\dfrac{\sqrt{2}}{2}$ does not belong to any interval in the cover.
\end{pro}

\end{multicols}


\chapter{Series}
\section{Convergence and Divergence of Series}
\begin{df}Let $\{a_n\} _{n=1} ^{+\infty}$ be a sequence of real
numbers. A {\em series} is the sum of a sequence. We write
$$s_n= a_1 + a_2 + \cdots + a_n = \sum _{k =1} ^n a_k.  $$
Here $s_n$ is the $n$-th {\em partial sum}. Observe in particular
that $$ a_n = s_n - s_{n-1}. $$
\end{df}
\begin{df}
If the sequence $\{s_n\} _{n=1} ^{+\infty}$ has a finite limit $S$,
we say that the series converges to $S$ and write $$\sum _{k =1}
^{+\infty} a_k  = \lim _{\ngroes} s_n = S. $$Otherwise we say that
the series {\em diverges}.
\end{df}
Observe that $\sum _{n=1} ^{+\infty} a_n$  converges  to $S$ if
$\forall \varepsilon > 0$, $\exists N$ such that $\forall n \geq N$,
$$\absval{\left(\sum _{k \leq n} a_k\right) -S} =|s_n-S|  < \varepsilon .$$
Now, since $$\left(\sum _{k \leq n} a_k\right) -S = \left(\sum _{k
\leq n} a_k\right) - \left(\sum _{k \geq 1} a_k\right) =\sum _{k
> n} a_k, $$we see that a series converges if and only if its
``tail'' can be made arbitrarily small. Hence, the reader should
notice that adding or deleting a finite amount of terms to a series
does not affect its convergence or divergence. Furthermore, since
the sequence of partial sums of a convergent  series must be a
Cauchy sequence we deduce that a series is convergent if and only if
$\forall \varepsilon > 0$, $\exists N>0$ such that $\forall m\geq N,
n \geq N$, $m \leq n$, \begin{equation}\absval{s_n-s_m} =
\absval{\sum _{k=m} ^n a_k}<\varepsilon .\label{eq:Cauchy-seq-ser}
\end{equation}



\begin{thm}[$n$-th Term Test for Divergence]
If $\sum _{n=1} ^\infty a_n$ converges, then $a_n\rightarrow 0$ as
$\ngroes$. \label{thm:n-th-term-test}\end{thm}
\begin{pf}
Put $s_n = \sum _{k=1} ^n a_k$. Then
$$ \lim
_{\ngroes} s_n = S \implies a_n = s_n -s_{n-1} \rightarrow S-S = 0.
$$
\end{pf}


In general, the problem of determining whether a series converges or
diverges requires some work and it will be dealt with in the
subsequent sections. We continue here with some other examples.




\begin{exa}\label{exa:harmonic-series-diverges}
The series $\sum _{n =1} ^{+\infty} \left(1+\dfrac{2}{n}\right)^n$
diverges, since its $n$-th term $\left(1+\dfrac{2}{n}\right)^n
\rightarrow e^2$.
\end{exa}
\begin{exa} We will prove that the {\em harmonic series} $\sum _{n=1} ^{+\infty}
\dfrac{1}{n}$ diverges, even though $\dfrac{1}{n}\rightarrow 0$ as
$\ngroes$. Thus the condition in Theorem \ref{thm:n-th-term-test}
though necessary for convergence is not sufficient. The divergence
of the harmonic series was first demonstrated by Nicole d'Oresme
(ca. 1323-1382), but his proof was mislaid for several centuries.
The result was proved again by Pietro Mengoli in 1647, by Johann
Bernoulli in 1687, and by Jakob Bernoulli shortly thereafter. Write
the partial sums in dyadic blocks,
$$  \sum_{n=1}^{2^M} \frac{1}{n} = \sum_{m=1}^M \sum_{n=2^{m-1}+1}^{2^m} \frac{1}{n}. $$
As $1/n \ge 1/N$ when $n \le N$, we deduce that
$$  \sum_{n=2^{m-1}+1}^{2^m} \frac{1}{n} \ge \sum_{n=2^{m-1}+1}^{2^m} 2^{-m} = (2^m - 2^{m-1}) 2^{-m} = \frac{1}{2} $$
Hence,
$$  \sum_{n=1}^{2^M} \frac{1}{n} \ge \frac{M}{2} $$
so the series diverges in the limit $M \to +\infty$.
\end{exa}

The following theorem says that linear combinations of convergent
series converge.
\begin{thm}\label{thm:line-comb-conv}
Let $\sum _{n=1} ^{+\infty} a_n = A$ and $\sum _{n=1} ^{+\infty}
b_n=B$ be convergent series and let $\gamma \in \BBR$ be a real
number. Then the series \mbox{$\sum _{n=1} ^{+\infty} (a_n + \gamma
b_n)$} converges to $A+\gamma B$.
\end{thm}
\begin{pf}
For all $\varepsilon > 0$ there exist $N, N'$
 such that for all $n \geq \max (N,N')$,
$$ \absval{\sum _{k \leq n}
a_k-A} < \dfrac{\varepsilon}{2} , \qquad \absval{\sum _{k \leq n}
b_k-B} < \dfrac{\varepsilon}{2(|\gamma| + 1)}.
$$
Hence, by the triangle inequality and by the obvious inequality
$\dfrac{|\gamma|}{|\gamma |+1}\leq 1$, we have
$$\absval{\left(\sum _{k \leq n} a_k+ \gamma b_k \right) - (A+\gamma
B)} \leq \absval{\sum _{k \leq n} a_k - A} + | \gamma |\absval{\sum
_{k \leq n} b_k -B} \leq  \dfrac{\varepsilon}{2}+ |\gamma
|\dfrac{\varepsilon}{2(|\gamma| + 1)} <
\dfrac{\varepsilon}{2}+\dfrac{\varepsilon}{2} = \varepsilon .
$$

 \end{pf}


\begin{df}
A {\em geometric series} with common ratio $r$ and first term $a$ is
one of the form
$$a + ar + ar^2 + ar^3 + \cdots = \sum _{n=0} ^{+\infty} ar^{n}.  $$
\end{df}
By Theorem \ref{thm:geometric-sum}, if $|r|<1$ then the series
converges and we have
$$a + ar + ar^2 + ar^3 + \cdots = \sum _{n=0} ^{+\infty} ar^{n} = \dfrac{a}{1-r}.  $$
\begin{exa}
 A fly starts at the origin and goes $1$ unit up, $1/2$ unit
right, $1/4$ unit down, $1/8$ unit left, $1/16$ unit up, etc., {\em
ad infinitum.} In what coordinates does it end up?
\end{exa}
\begin{solu}
Its $x$ coordinate is
$$\frac{1}{2} - \frac{1}{8} + \frac{1}{32} - \cdots
= \frac{\frac{1}{2}}{1 - \frac{-1}{4}} = \frac{2}{5}.$$ Its $y$
coordinate is
$$1 - \frac{1}{4} + \frac{1}{16} - \cdots = \frac{1}{1 - \frac{-1}{4}} = \frac{4}{5}.$$Therefore, the
fly ends up in $$\left(\frac{2}{5}, \frac{4}{5}\right).$$ Here we
have used the fact the sum of an infinite geometric progression with
common ratio $r$, with $|r|<1$ and first term $a$ is
$$ a + ar + ar^2 + ar^3 + \cdots = \dfrac{a}{1-r}.  $$
\end{solu}

\begin{df}A {\em telescoping sum} is a sum where adjacent terms cancel
out. That is, $\sum _{n=0} ^Na_n$ is a telescoping sum if we can
write $a_n = b_{n+1}-b_n$ and then
$$\sum _{n=0} ^Na_n = a_0 + a_1 + \cdots + a_N =
(b_{1}-b_0) + (b_{2}-b_1) + \cdots + (b_{N+1}-b_N) = b_{N+1}-b_0.
$$

\end{df}

\begin{exa}\label{exa:simple-telescope}
We have
$$\sum _{n=1} ^N \dfrac{1}{n(n+1)} = \sum _{n=1} ^N \left(\dfrac{1}{n}-\dfrac{1}{n+1}\right) = \left(\dfrac{1}{1}-\dfrac{1}{2}\right)
+ \left(\dfrac{1}{2}-\dfrac{1}{3}\right) + \cdots +
\left(\dfrac{1}{N}-\dfrac{1}{N+1}\right) = 1-\dfrac{1}{N+1}.
$$Thus
$$ \sum _{n=1} ^{+\infty} \dfrac{1}{n(n+1)} =\lim _{N\to+\infty} \sum _{n=1} ^N \dfrac{1}{n(n+1)} = \lim _{N\to+\infty}\left(1-\dfrac{1}{N+1}\right) =1. $$
\end{exa}
\begin{exa}
We have
$$\begin{array}{lll}\sum _{n=1} ^N \dfrac{1}{n(n+1)(n+2)} & = &
\dfrac{1}{2}\sum _{n=1} ^N
\left(\dfrac{1}{n(n+1)}-\dfrac{1}{(n+1)(n+2)}\right) =
\dfrac{1}{2}\left(\left(\dfrac{1}{1\cdot 2}-\dfrac{1}{2\cdot
3}\right) + \left(\dfrac{1}{2\cdot 3}-\dfrac{1}{3\cdot 4}\right) +
\cdots + \left(\dfrac{1}{N(N+1)}-\dfrac{1}{(N+1)(N+2)}\right)\right)
\\ & = & \dfrac{1}{2}\left(\dfrac{1}{2}-\dfrac{1}{(N+1)(N+2)}\right).
\end{array}$$Thus
$$ \sum _{n=1} ^{+\infty} \dfrac{1}{n(n+1)(n+2)} =\lim _{N\to+\infty} \sum _{n=1} ^N \dfrac{1}{n(n+1)(n+2)} =
\lim
_{N\to+\infty}\dfrac{1}{2}\left(\dfrac{1}{2}-\dfrac{1}{(N+1)(N+2)}\right)
=\dfrac{1}{4}. $$
\end{exa}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro} Find the sum of
$\sum _{n=3} ^\infty \dfrac{2^n}{e^{n+1}} $. \begin{answer} This is
a geometric series with common ratio $|r|=\frac{2}{e} <1$, so it
converges. We have
$$ \sum _{n=3} ^\infty \dfrac{2^n}{e^{n+1}} = \dfrac{2^3}{e^4} + \dfrac{2^4}{e^5} + \cdots = \dfrac{\frac{2^3}{e^4}}{1 - \frac{2}{e}} = \dfrac{8}{e^4-2e^3}. $$
\end{answer}
\end{pro}

\begin{pro}
Find the sum of the series $\sum _{n=2} ^{+\infty}
\dfrac{1}{4n^2-1}$.
\begin{answer}Observe that
$$\dfrac{1}{4n^2-1} = \dfrac{1}{2(2n-1)} -\dfrac{1}{2(2n+1)}.$$ Hence
$$\sum _{n=2} ^{+\infty}
\dfrac{1}{4n^2-1} = \left(\dfrac{1}{2(1)} -\dfrac{1}{2(3)}\right) +
\left(\dfrac{1}{2(3)} -\dfrac{1}{2(5)}\right) +
\left(\dfrac{1}{2(5)} -\dfrac{1}{2(7)}\right) + \cdots
=\dfrac{1}{2(1)}  = \dfrac{1}{2}.
$$
\end{answer}
\end{pro}
\begin{pro}
Find the exact numerical value of the sum $\sum _{n=0}
^{+\infty}\arctan \dfrac{1}{n^2+n+1}$.
\begin{answer}
Since $\tan (x-y) = \dfrac{\tan x - \tan y}{1+\tan x\tan y}$,
observe that $\arctan \dfrac{1}{n^2+n+1} = \arctan (n+1) - \arctan
n$. Hence the series telescopes to $\lim _{\ngroes}\arctan
(n+1)-\arctan 1 = \dfrac{\pi}{4} .$
\end{answer}
\end{pro}
\begin{pro}
Find the exact numerical value of the infinite sum
$$ \sum _{n=1} ^{+\infty}
\dfrac{\sqrt{(n-1)!}}{(1+\sqrt{1})\cdots (1+\sqrt{n})}.$$
\end{pro}

\begin{pro} Shew that
$$\sum _{k = 1} ^n \frac{k}{k^4 + k^2 + 1} = \frac{1}{2}\cdot\frac{n^2 + n}{n^2 + n +
1},$$and thus prove that $\sum _{k = 1} ^n \frac{k}{k^4 + k^2 + 1}$
converges.
\end{pro}
\begin{pro}
Let $b(n)$ denote the number of ones in the binary expansion of the
positive integer  $n$, for example $b(3) = b(11_2)=2$. Prove that
$\sum _{n=1} \dfrac{b(n)}{n(n+1)} = \log 4$.
\end{pro}
\begin{pro}
Find
$$1 + \dfrac{1}{2} +\dfrac{1}{3} + \dfrac{1}{6}+\dfrac{1}{8} + \dfrac{1}{9}+\dfrac{1}{12} +\dfrac{1}{16}+\dfrac{1}{18}+\cdots ,$$
which is the sum of the reciprocals of all positive integers of the
form $2^n3^m$ for integers $n \geq 0, m\geq 0$.
\begin{answer}
By unique factorisation of the integers, the desired sum is
$$\left(1+\dfrac{1}{2} + \dfrac{1}{2^2}+\dfrac{1}{2^3}+\cdots \right)\left(1+\dfrac{1}{3} + \dfrac{1}{3^2}+\dfrac{1}{3^3}+\cdots \right)
= \dfrac{1}{1-\dfrac{1}{2}}\cdot\dfrac{1}{1-\dfrac{1}{3}} = 3.    $$

\end{answer}
\end{pro}
\begin{pro}
The {\em Fibonacci Numbers} $f_n$ are defined recursively as
follows:
$$f_0 =1,\quad f_1=1, \quad f_{n+2} = f_n + f_{n+1}, \quad n\geq 0.   $$
Prove that $\sum _{n=1} ^{+\infty} \dfrac{f_n}{3^n}=\dfrac{3}{5}$.
\end{pro}
\begin{pro}
Let $\sum _{n \geq 0} a_n $ be a convergent series and let $\sum _{n
\geq 0} b_n $ be a divergent series. Prove that $\sum _{n \geq 0}
(a_n+ b_n)$ diverges.
\begin{answer}Since the sum of two convergent series is convergent by Theorem \ref{thm:line-comb-conv}, if $\sum _{n \geq 0}
(a_n+ b_n)$ then from the  identity $b_n = (a_n+b_n)-a_n$ we would
deduce that $\sum _{n \geq 0} b_n $  converges, a contradiction.
\end{answer}
\end{pro}
\begin{pro}
Prove that if $\sum _{n\geq 1} a_n$ is a series of positive terms
and that its partial sums are bounded, then  $\sum _{n\geq 1} a_n$
converges. Shew that this is not necessarily true if  $\sum _{n\geq
1} a_n$  is not a series of positive terms.
\begin{answer}Put $s_N = \sum _{1 \leq n \leq N} a_n$. There is a
positive constant $M $ such that $\forall N>0$, $s_N\leq M$. Observe
that because the terms are positive
$$s_{N+1} = s_N + a_{N+1} \geq s_N,  $$and so the sequence
$\seq{s_N}{N=1}{+\infty}$ is a monotonically increasing bounded
above sequence and so it converges by Theorem
\ref{thm:bounded-increasing-seqs-convergent-be}.


This is not necessarily true if the series does not have positive
terms. For example, the series $\sum _{n\geq 1} (-1)^{n+1}$ has
bounded partial sums, in fact they are either $1$  or $0$. But the
sequence of partial sums then is
$$ 1,0,1,0,1,0, \ldots $$which does not converge.

\end{answer}
\end{pro}
\end{multicols}
\section{Convergence and Divergence of Series of Positive Terms}
We have several tools to establish convergence and divergence of
series of positive terms. We will start with some simple comparison
tests.
\begin{thm}[Direct Comparison Test] \label{thm:direct-comparison}
Let $\seq{a_n}{n=0}{+\infty}$, $\seq{b_n}{n=0}{+\infty}$,
$\seq{c_n}{n=0}{+\infty}$, be  sequences of positive real numbers.
Suppose that eventually $a_n \leq b_n$, that is, that $\exists N
\geq 0$ such that $\forall n \geq N$ there holds $a_n\leq b_n$. If
$\sum _{n \geq 0} b_n $ converges, then $\sum _{n \geq 0} a_n$
converges.



If eventually $a_n \geq c_n$, and  $\sum _{n \geq 0} c_n $ diverges,
then $\sum _{n \geq 0} a_n$ also must diverge.
\end{thm}
\begin{pf}The theorem is clear from the inequalities
$$ \sum _{n \geq N} a_n  \leq \sum _{n \geq N} b_n , \qquad  \sum _{n \geq N} a_n  \geq \sum _{n \geq N} c_n. $$
If $\sum _{n \geq 0} b_n $ converges, then its tail can be made as
small as we please, and so the tail of  $\sum _{n \geq 0} a_n $ can
be made as small as we please. Similarly if  $\sum _{n \geq 0} c_n $
diverges, because it is a series of positive terms, its tail grows
without bound and so the tail of $\sum _{n \geq 0} a_n $ grows
without bound.
\end{pf}

\begin{rem} Call a divergent series of positive terms a ``giant'' and a
converging  series of positive terms a ``midget.'' The comparison
tests say that if a series is bigger than a giant it must be a
giant, and if a series is smaller than a midget, it must be a
midget.
\end{rem}

\begin{exa}
From example \ref{exa:simple-telescope}, $\sum _{n \geq 1}
\dfrac{1}{n(n+1)}$ converges. Since for $n\geq 1$,
$$n(n+1)<(n+1)^2\implies \dfrac{1}{(n+1)^2} < \dfrac{1}{n(n+1)},$$we
deduce that the series
$$\sum _{n \geq 1} \dfrac{1}{(n+1)^2} =  \sum _{n \geq 2} \dfrac{1}{n^2} $$
converges. Since adding a finite amount of terms to a series does
not affect convergence, we deduce that $1+ \sum _{n \geq 2}
\dfrac{1}{n^2}  = \sum _{n \geq 1} \dfrac{1}{n^2} $ converges.
\end{exa}
\begin{exa}
$\sum _{n=1} ^{+\infty} \dfrac{1}{n^n}$ converges. For $n \geq 2$ we
have $\dfrac{1}{n^n} \leq \dfrac{1}{n^2}$ and the series converges
by direct comparison with $\sum _{n=1} ^{+\infty} \dfrac{1}{n^2}$.
\end{exa}
\begin{exa}
From example \ref{exa:harmonic-series-diverges}, $\sum _{n\geq
1}\dfrac{1}{n}$ diverges. Since for $n\geq 1$, $\log n < n$, we
deduce that $\sum _{n\geq 2} \dfrac{1}{\log n}$ diverges. Notice
that here we start the sum at $n=2$ since the logarithm vanishes at
$n=1$.
\end{exa}

\begin{exa} Prove that $$ \sum _{\stackrel{p}{p \ {\rm prime} }}
\dfrac{1}{p}$$diverges.\end{exa} \begin{solu} We will prove this by
contradiction. Let $p_1=2$, $p_2=3$, $p_3=5$, \ldots be the sequence
of primes in ascending order and  assume that the series converges.
Then there exists an integer $K$ such that
$$ \sum _{m \geq K+1} \dfrac{1}{p_m}< \dfrac{1}{2}. $$ Let
$P = p_1p_2 \cdots p_K$ and consider the numbers $1+nP$ for
$n=1,2,3,\ldots$. None of these numbers has a prime divisor in the
set $\{p_1,p_2, \ldots , p_K\}$ and hence all the prime divisors of
the $1+nP$ belong to the set $\{p_{K+1}, p_{K+2}, \ldots \}$. This
means that for each $t\geq 1$, $$ \sum _{n=1} ^t  \dfrac{1}{1+nP}
\leq \sum _{s\geq 1} \left(\sum _{m \geq K+1}
\dfrac{1}{p_m}\right)^s \leq  \sum _{s\geq 1}\dfrac{1}{2^s} = 1,
$$that is, $ \sum _{n=1} ^t  \dfrac{1}{1+nP}$, a series of positive
terms, has bounded partial sums and so it converges. But since $1+nP
\sim nP$ as $n\to +\infty$ and $\dfrac{1}{P}\sum _{n\geq
1}\dfrac{1}{n}$ diverges, we obtain a contradiction.
\end{solu}

Since the convergent behaviour of a series depends of its tail, the
following asymptotic comparison tests should be clear, and its proof
follows the same line of reasoning as Theorem
\ref{thm:direct-comparison}.

\begin{thm}[Asymptotic Comparison Test] \label{thm:asymp-comparison}
Let $\seq{a_n}{n=0}{+\infty}$, $\seq{b_n}{n=0}{+\infty}$,
$\seq{c_n}{n=0}{+\infty}$, be  sequences of  real numbers which are
eventually positive. Suppose that  $a_n <<b_n$, and that $c_n <<
a_n$. Then both $\sum _{n \geq 0} a_n$  and $\sum _{n \geq 0} b_n$
 converge together, and both $\sum _{n \geq 0} a_n$  and $\sum _{n \geq 0} c_n$
 diverge together. Moreover, if $\seq{b_n}{n=0}{+\infty}$ is
 eventually a strictly positive sequence and $a_n \sim b_n$, then $\sum _{n \geq 0} a_n$  and $\sum _{n \geq 0} b_n$
 converge or diverge together.
 \end{thm}

In order to effectively use the comparison tests we must have a
ready catalogue of series whose convergence or divergence we know.
In the subsequent lines we will develop such a catalogue. We start
with the following consequence of the comparison tests.


\begin{thm}[Cauchy Condensation Test]Let $\seq{a_n}{n=0}{+\infty}$ be a sequence of
positive real numbers which is monotonically decreasing. Then
$\sum_{n=0}^{\infty} a_n$ converges if and only if the sum
$\sum_{n=0}^{\infty} 2^{n}a_{2^{n}}$ converges.
\label{thm:condensed-cauchy}
\end{thm}
\begin{pf}
Since the sequence $\seq{a_n}{n=0}{+\infty}$ is monotonically
decreasing and positive,
$$ \sum _{k = 2^n} ^{2^{n+1}-1}a_{2^{n+1}-1}\leq \sum _{k = 2^n} ^{2^{n+1}-1}a_k  \leq \sum _{k = 2^n} ^{2^{n+1}-1}a_{2^n}
\implies 2^na_{2^{n+1}-1} \leq \sum _{k = 2^n} ^{2^{n+1}-1}a_k \leq
2^na_{2^n}.
$$
The second inequality yields
$$\sum _{n=0} ^{2^{N+1}-1}a_n = \sum _{n=0} ^{N} \sum _{k = 2^n} ^{2^{n+1}-1}a_k \leq
\sum _{n=0} ^{N}2^na_{2^n} \implies \lim _{N\to +\infty}\sum _{n=0}
^{2^{N+1}-1}a_n\leq \lim _{N\to +\infty}\sum _{n=0} ^{N}2^na_{2^n}.
$$Thus if $\sum _{n=0} ^{+\infty}2^na_{2^n}$ converges so does $\sum
_{n=0} ^{+\infty}a_n$.

\bigskip
The first inequality yields
$$\begin{array}{lll}  2^na_{2^{n+1}-1} \leq \sum _{k = 2^n} ^{2^{n+1}-1}a_k  & \implies &
(2^{n+1}-1)a_{2^{n+1}-1} \leq 2\sum _{k = 2^n}
^{2^{n+1}-1}a_k-a_{2^{n+1}-1}\\ & \implies &  \sum _{n=0}
^{N}(2^{n+1}-1)a_{2^{n+1}-1} \leq 2\sum _{n=0} ^{N}\sum _{k = 2^n}
^{2^{n+1}-1}a_k-\sum _{n=0} ^{N}a_{2^{n+1}-1} = 2\sum _{n=0}
^{2^{N+1}-1} a_n - \sum _{n=0} ^{N}a_{2^{n+1}-1}.
\end{array}$$

\end{pf}
As an application of Cauchy's Test, we obtain the following
important result.

\begin{thm}[$p$-series Test] If $p>1$ then $\zeta (p) = \sum _{n=1} ^{+\infty}
\dfrac{1}{n^p}$ converges, but diverges when $p \leq 1$.
\label{thm:p-series-test}
\end{thm}
\begin{pf}
If $p\leq 0$, divergence follows from Theorem
\ref{thm:n-th-term-test}. If $p > 0$, then using the fact that
$x\mapsto x^p$ is monotonically increasing, we may use Theorem
\ref{thm:condensed-cauchy}. Since
$$ \sum _{k\geq 0} \dfrac{2^k}{2^{pk}} = \sum _{k \geq 0} \left(2^{(1-p)}\right)^k $$
is a geometric series with ratio $2^{1-p}$, it converges by Theorem
\ref{thm:geometric-sum} when $$2^{1-p}< 1 \implies (1-p)\log _2 2 <
\log _2 1 \implies 1-p < 0 \implies p>1,
$$and diverges for $p>1$. The case $p=1$ has been shewn to diverge
in example \ref{exa:harmonic-series-diverges}.
\end{pf}
\begin{exa}
Since $\sqrt{2}>1$, the series $\sum _{n=1} ^{+\infty}
\dfrac{1}{n^{\sqrt{2}}}$ converges.
\end{exa}
\begin{exa}
Since $$ \dfrac{n^{\sqrt{2}}+(\log\log n)^{2007}}{n^3 + n(\log n)^5
+ 1}\sim \dfrac{n^{\sqrt{2}}}{n^3}=\dfrac{1}{n^{3-\sqrt{2}}}
$$and $3-\sqrt{2}>1$, the series \mbox{$\sum _{n\geq 1} \dfrac{n^{\sqrt{2}}+(\log\log n)^{2007}}{n^3 + n(\log n)^5
+ 1}$} converges.
\end{exa}

\begin{cor}[De Morgan's Logarithmic Scale] If $p>1$ then  all of
$$\sum _{n=1} ^{+\infty} \dfrac{1}{n^p}; \quad \sum _{n\geq e}
^{+\infty} \dfrac{1}{n(\log n)^p}; \quad \sum _{n\geq e^e}
^{+\infty} \dfrac{1}{n(\log n)(\log\log n)^p};  \quad \sum _{n\geq
e^{e^e}} ^{+\infty} \dfrac{1}{n(\log n)(\log\log n)(\log\log\log
n)^p}; \quad \ldots
$$ converge, but diverge when $p \leq 1$.
\label{cor:demorgan-log-scale}
\end{cor}
\begin{pf}
The theorem is proved inductively by successive applications of
Cauchy's Condensation Test. We will prove how the case for $\sum
_{n\geq e} ^{+\infty} \dfrac{1}{n(\log n)^p}$ follows from the case
$\sum _{n=1} ^{+\infty} \dfrac{1}{n^p}$ and leave the rest to the
reader.  We see that
$$ \sum _{k \geq 1} \dfrac{2^k}{2^k(\log 2^k)^p} = \dfrac{1}{(\log 2)^p}\sum _{k \geq 1} \dfrac{1}{k^p},  $$
and so this case follows from Theorem \ref{thm:p-series-test}.
\end{pf}
\begin{exa}
Determine whether $\sum _{n=4} ^{+\infty} \dfrac{(\log
n)^{100}}{n^{3/2}\log \log n}$ converges.
\end{exa}
\begin{solu}Since $(\log n)^{100}<< n^{1/4} $, eventually $\dfrac{(\log
n)^{100}}{n^{1/4}}<<1$. We have  $\dfrac{(\log n)^{100}}{n^{3/2}\log
\log n} << \dfrac{(\log n)^{100}}{n^{1/4}}\cdot
\dfrac{1}{n^{5/4}\log\log n}$ and since $\sum _{n=4} ^{+\infty}
\dfrac{1}{n^{5/4}\log\log n}< +\infty$, we have $\sum _{n=4}
^{+\infty}\dfrac{(\log n)^{100}}{n^{3/2}\log \log n} <+\infty$, that
is, the series converges.
\end{solu}
The reader should be aware that the value of the exponent in
Theorems \ref{thm:p-series-test} and \ref{cor:demorgan-log-scale} is
fixed. The following examples should dissuade him that ``having an
exponent higher than $1$'' implies convergence.
\begin{exa}
Test $\dis{\sum _{n = 1} ^\infty \dfrac{1}{n^{1+1/n}}}$ for
convergence by comparing it to a suitable $p$-series. Use the direct
comparison test.
\end{exa}
\begin{solu}
By induction $n<2^n \implies n^{1/n}<2$ and so $n^{1+1/n}<2n
\implies \dfrac{1}{2n}< \dfrac{1}{n^{1+/1n}}$. So the series
diverges by direct comparison to $\dis{\sum _{n = 1} ^\infty
\dfrac{1}{2n}}$.

\end{solu}
\begin{exa}
Test $\dis{\sum _{n = 2} ^\infty \dfrac{1}{n^{1+1/\log  n}}}$ for
convergence by comparing it to a suitable $p$-series. Use the direct
comparison test
\end{exa}
\begin{solu}
We have $n = e^{\log  n} \implies n^{\frac{1}{\log  n}} =e$ and so
$n^{1+1/\log  n} =en, \qquad n>1.$. So the series diverges by direct
comparison to $\dis{\sum _{n = 2} ^\infty \dfrac{1}{en}}$.
\end{solu}
\begin{exa}
Test $\dis{\sum _{n = 2} ^\infty \dfrac{1}{n^{1+1/\log \log  n}}}$
for convergence by comparing it to a suitable $p$-series. Use the
direct comparison test.
\end{exa}
\begin{solu}
By considering the monotonicity of $f(x) = e^x -\dfrac{x^2}{2}$ (see
Theorem \ref{thm:x^2<exp(x)}) or otherwise, we can prove that
$e^x>\dfrac{x^2}{2}$ for $x>0$. Now,
$$ n^{1/\log \log  n} = e^{\log  n^{1/\log \log  n}} = e^{\frac{\log
n}{\log \log n}}
> \dfrac{(\log  n)^2}{2(\log \log  n)^2}.
$$This gives
$$\dfrac{2(\log \log  n)^2}{n(\log  n)^2}> \dfrac{1}{n^{1+\frac{1}{\log \log  n}}}.
$$Now, $$\sum _{n=2} ^{+\infty}\dfrac{2(\log \log  n)^2}{n(\log  n)^2} $$
can be shewn to converge by comparing to a series in the De Morgan
logarithmic scale.
\end{solu}
\begin{exa}
Prove that the series $\sum _{n=1} ^{+\infty} \dfrac{1}{n^{1.8+\sin
n}}$ diverges.
\end{exa}
\begin{solu}
For $k\in \BBZ$, the interval $I_k
=\lcrc{(2k+\frac{4}{3})\pi}{(2k+\frac{5}{3})\pi}$ has length
$\dfrac{\pi}{3}>1$ and  $x\in I_k \implies \sin x \leq
-\dfrac{\sqrt{3}}{2}$. The gap between $I_k$ and $I_{k+1}$ is
$<\dfrac{5\pi}{3}<6$. Hence, among any seven consecutive integers,
at least one must fall into $I_k$ and for this value of $n$ we must
have $1.8+\sin n < 1-\dfrac{\sqrt{3}}{2}<1$. This means that
$$\sum _{n=1} ^{+\infty} \dfrac{1}{n^{1.8+\sin
n}}  = \sum _{m=0} ^{+\infty}\qquad \sum _{n=7m+1}
^{n=7m+7}\dfrac{1}{n^{1.8+\sin n}}  \geq   \sum _{m=0} ^{+\infty}
\dfrac{1}{7m+7}, $$and since the rightmost series diverges, the
original series diverges by the direct comparison test.

\end{solu}
The following result puts the harmonic series at the ``frontier'' of
convergence and divergence for series with monotonically decreasing
positive terms.
\begin{thm}[Pringsheim's Theorem]
Let $\sum _{n \geq 1} a_n$ be a converging series of positive terms
of monotonically decreasing terms. Then $a_n =\soo{\dfrac{1}{n}}$.
\end{thm}
\begin{pro}Since the series converges, its sequence of partial sums
is a Cauchy sequence and by \ref{eq:Cauchy-seq-ser}, given
$\varepsilon > 0$, $\exists m>0$, such that $\forall n \geq m$,
$$  \sum _{k=m+1} ^{n} a_k < \varepsilon . $$  Because the series
decreases monotonically, each of $a_{m+1}, a_{m+2}, \ldots , a_{n}$
is at least $a_n$ and  thus
$$ (n-m)a_{n} \leq \sum _{k=m+1} ^{n} a_k < \varepsilon.$$
Again, since the series converges, $a_n \to 0$ as $\ngroes$ we may
choose $n$ large enough so that $a_n < \dfrac{\varepsilon}{m}$. In
this case
$$ (n-m)a_{n}<\varepsilon \implies na_n < \varepsilon + ma_n < 2\varepsilon \implies a_n < \dfrac{ 2\varepsilon}{n},$$
which proves the theorem.
\end{pro}






The disadvantage of the comparison tests is that in order test for
convergence, we must appeal to the behaviour of an auxiliary series.
The next few tests provide a way of testing the series against its
own terms.

\begin{thm}[Root Test]
Let $\sum _{n=1} ^{+\infty} a_n$  be  a series of positive terms.
Put $r=\lim \sup (a_n)^{1/n}$. Then the series converges if $r <1$
and
 diverges if $r >1$. The test is
 inconclusive if $r =1$.
\end{thm}
\begin{pf}
If $r<1$ choose $r'$ with  $r<r'<1$. Then there exists $N\in \BBN$
such that $$\forall n\geq N, \quad \sqrt[n]{a_n}\leq r' \implies a_n
\leq (r')^n.
$$But then $\sum _{n=0} ^{+\infty} a_n$ converges by direct
comparison to the converging geometric series $\sum _{n=0}
^{+\infty} (r')^n$.



If $r>1$ then there is a sequence $\seq{n_k}{k=1}{+\infty}$ of
positive integers such that $$ \sqrt[n_k]{a_{n_k}} \to r.  $$ This
means that $a_n$ will be $>1$ for infinitely many values of $n$, and
so, the condition $a_n \to 0$ necessary for convergence, does not
hold.


By considering $\sum _{n=1} ^{+\infty} \dfrac{1}{n}$, which
diverges, and $\sum _{n=1} ^{+\infty} \dfrac{1}{n^2}$, which
converges, one sees that  $r=1$ may appear in series of different
conditions.
\end{pf}

\begin{thm}[Ratio Test]
Let $\sum _{n=1} ^{+\infty} a_n$  be  a series of strictly positive
terms. Put $r=\lim \sup \dfrac{a_{n+1}}{a_n} $. Then the series
converges if $r <1$ and
 diverges if $r >1$. The test is
 inconclusive if $ r =1$.
\end{thm}
\begin{pf}
If $r<1$, then there exists $N\in \BBN$ such that $$a_{N+1} \leq
ra_{N}, \quad a_{N+2} \leq ra_{N+1}, \quad a_{N+3} \leq ra_{N+2},
\ldots \quad  a_{N+t} \leq ra_{N+t-1}.
$$Multiplying all these inequalities together,
$$a_{N+t}\leq a_N r^{t}.   $$
Putting $N+t =n$ we deduce that
$$a_n \leq a_N r^{-N}r^{n}.   $$
Since $ a_N r^{-N}$ is a constant, we may use direct comparison
between $\sum _{n=1} ^{+\infty} a_n$ and the converging geometric
series $a_N r^{-N}\sum _{n=1} ^{+\infty}r^{n}$, concluding that
$\sum _{n=1} ^{+\infty} a_n$ converges.



If $r>1$ then $a_{n+1}\geq a_n \geq a_N$ for all $n \geq N$.  This
means that the condition $a_n \to 0$, necessary for convergence,
does not hold.


By considering $\sum _{n=1} ^{+\infty} \dfrac{1}{n}$, which
diverges, and $\sum _{n=1} ^{+\infty} \dfrac{1}{n^2}$, which
converges, one sees that  $r=1$ may appear in series of different
conditions.
\end{pf}

\begin{rem}
The root test is more general than the ratio test, as can be seen
from Theorem \ref{thm:ratio-is-inferior-to-root}.
\end{rem}

\begin{exa}
Since $$ \dfrac{\frac{(n+1)!}{(n+1)^{n+1}}}{\frac{n!}{n^n}} =
\dfrac{1}{\left(1+\dfrac{1}{n}\right)^n} \rightarrow
\dfrac{1}{e}<1$$the series  $\sum _{n=1} ^{+\infty} \dfrac{n!}{n^n}$
converges.
\end{exa}
\begin{exa}
Since $$ \left(\dfrac{(n!)^n}{n^{n^2}}\right)^{1/n} =
\dfrac{n!}{n^n} \rightarrow 0$$the series $\sum _{n=1} ^{+\infty}
\dfrac{(n!)^n}{n^{n^2}}$ converges.
\end{exa}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}True or False:  If the infinite series $\dsum _{n=1} ^{+\infty} a_n$  of strictly positive terms, converges,
then $\dsum _{n=1} ^{+\infty} a_n ^2$ must necessarily  converge.
\begin{answer}True. For, we must have $a_n\to 0$ and so eventually
$0 < a_n \leq 1 $. This means that eventually $a_n ^2 \leq a_n$ and
the series of squares converges by direct comparison to the original
series.
\end{answer}
\end{pro}
\begin{pro}True or False: If the infinite series $\dsum _{n=1} ^{+\infty} a_n$ of strictly positive terms converges,
then $\dsum _{n=1} ^{+\infty} \sin (a_n)$ must necessarily converge.
\begin{answer}True. Since $a_n\to 0$, we must have $\sin a_n \to a_n$
and so the series converges by asymptotic comparison to the original
series. (Recall that $\lim _{x\to 0} \dfrac{\sin x}{x}=1$.)
\end{answer}
\end{pro}
\begin{pro}True or False: If the infinite series $\dsum _{n=1} ^{+\infty} a_n$ of strictly positive terms converges,
then $\dsum _{n=1} ^{+\infty} \tan (a_n)$ must necessarily converge.
\begin{answer}True. Since $a_n\to 0$, we must have $\tan a_n \to a_n$
and so the series converges by asymptotic comparison to the original
series. (Recall that $\lim _{x\to 0} \dfrac{\tan x}{x}=1$.)
\end{answer}
\end{pro}
\begin{pro}True or False: If the infinite series $\dsum _{n=1} ^{+\infty} a_n$ converges,
then $\dsum _{n=1} ^{+\infty} \cos (a_n)$ must necessarily converge.
\begin{answer}False. Since $a_n\to 0$, we must have $\cos a_n \to 1$
and so the series diverges by the $n$-th Term Test.
\end{answer}
\end{pro}
\begin{pro} Use the comparison tests to shew that if $a_n
> 0$ and $\dis{\sum _{n = 1} ^\infty a_n}$ converges, then
$\dis{\sum _{n = 1} ^\infty \frac{a_n}{n}}$ converges. \\
\begin{answer}
Only the fact that $\dfrac{a_n}{n} \leq a_n$ is needed here.
\end{answer}
\end{pro}
\begin{pro}Give an example of a series converging to $1$ with $n$-th term
$a_n>0$ satisfying $a_n <<\dfrac{1}{n^2}$. (That is, the $n$-th term
goes to zero faster than the reciprocal of a square.)
\begin{answer}
Take $a_n = \dfrac{1}{2^n}$. Then $a_n << \dfrac{1}{n^2}$ and $\dsum
_{n=1} ^{+\infty}\dfrac{1}{2^n}=1$.
\end{answer}
\end{pro}
\begin{pro}Give an example of a  converging series of strictly positive terms $\dsum _{n=1} ^{+\infty}
a_n$ such that $\dsum _{n=1} ^{+\infty} (a_n)^{1/n}$ also converges.
\begin{answer}
Take $a_n = \dfrac{1}{2^{n^2}}$ or $a_n=\dfrac{1}{n^{2n}}$.
\end{answer}
\end{pro}

\begin{pro} Give an example of a  converging series of strictly positive terms $\dsum _{n=1} ^{+\infty}
a_n$ such that $\dsum _{n=1} ^{+\infty} (a_n)^{1/n}$ diverges.
\begin{answer}
Take $a_n = \dfrac{1}{2^n}$ or $a_n=\dfrac{1}{n^n}$.
\end{answer}
\end{pro}

\begin{pro}Give an example of a converging series of strictly positive
terms $a_n$ such that $\lim _{n\to +\infty} (a_n)^{1/n}$ does not
exist.
\begin{answer}
For even $n\geq 0$ take $a_n = \dfrac{1}{2^n}$ and for odd $n\geq 1$
take $a_n = \dfrac{1}{3^n}$. Then $$ \sum _{n=0} ^{+\infty} a_n =
\sum _{n=0} ^{+\infty} \dfrac{1}{2^{2n}} + \sum _{n=1} ^{+\infty}
\dfrac{1}{3^{2n-1}}, $$ and both series on the right are geometric
convergent series. However if $n$ is even, $ (a_n)^{1/n} =
\dfrac{1}{2}$ and if $n$ is odd $ (a_n)^{1/n} = \dfrac{1}{3}$
meaning that $\lim _{\ngrows} (a_n)^{1/n}$ does not exist.
\end{answer}
\end{pro}

\begin{pro}Test $\dis{\sum _{n = 1} ^\infty \frac{3^n}{n^{2n}}}$ using both direct comparison
and the root test. \\
\begin{answer}
By the root test $$a_n ^{1/n}=\left(\frac{3^n}{n^{2n}}\right)^{1/n}
= \dfrac{3}{n}\rightarrow 0 <1,
$$and the series converges. By direct comparison, for $n \geq 3$ we
have $$ \dfrac{3^n}{n^{2n}} = \dfrac{3^n}{n^n}\cdot \dfrac{1}{n^n}
\leq 1 \dfrac{1}{n^n} \leq \dfrac{1}{n^3},
$$and the series converges by direct comparison to $\dis{\sum _{n = 1} ^\infty
\frac{1}{n^3}}$.
\end{answer}
\end{pro}
\begin{pro}Let $\mathscr{S}$ be the set of positive integers none of
whose digits in its decimal representation is a $0$:
$$\mathscr{S} = \{1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,21,\cdots\}.  $$
Prove that the series $\sum _{n\in \mathscr{S}}\dfrac{1}{n}$
converges.
\begin{answer}
We divide the sum into decimal blocks. There are $9^k$ $k$-digit
integers in the interval $[10^k; 10^{k+1}[$ that do not have a $0$
in their decimal representation. Thus
$$\sum _{n\in \mathscr{S}}\dfrac{1}{n} = \sum _{k=0} ^{+\infty} \sum _{n\in [10^k; 10^{k+1}[\cap \mathscr{S}} \dfrac{1}{n}
\leq  \sum _{k=0} ^{+\infty}   9^k\left(\dfrac{1}{10^k}\right) =
10.$$

\end{answer}
\end{pro}
\begin{pro}
Let $d(n)$ be the number of strictly positive divisors of the
integer $n$. Prove that $d(n)\leq 2\sqrt{n}$. Use this to prove that
$$ \sum _{n\geq 1} \dfrac{d(n)}{n^2} $$converges.
\end{pro}

\begin{pro}
Let $p_n$ be the $n$-th prime. Thus $p_1=2$, $p_2=3$, $p_3=5$, etc.
Put $a_1=p_1$ and $a_{n+1} = p_1p_2\cdots p_n+1$ for $n\geq 1$. Find
$$\sum _{n=1} ^{+\infty}\dfrac{1}{a_n} . $$
\end{pro}
\begin{pro}
Determine whether $\sum _{n\geq 2}a_n$ converges, when $a_n$ is
given as below.
\begin{multicols}{2}
\begin{enumerate}


\item $\left(1+\dfrac 1n\right)^n - e$.

\item $\cosh^\alpha n - \sinh^\alpha n$.

\item \mbox{$\log \dfrac{(n^3+1)^2}{(n^2+1)^3} $.}

\item $\sqrt[n]{n+1} - \sqrt[n]{n}$.

\item $\arccos\left(\dfrac {n^3+1}{n^3+2} \right)$.


\item $\dfrac {a^n}{1+a^{2n}}$.


\item $\dfrac{2\cdot 4\cdot 6\cdots(2n)}{n^n}$.

\item $\dfrac{1!+2!+\dots+n!}{(n+2)!}$.

\item $\dfrac{1!-2!+\dots\pm n!}{(n+1)!}$.

\item $\dfrac{(\log n)^n}{n^{\log n}}$.

\item $\dfrac1{(\log n)^{\log n}}$.

\end{enumerate}
\end{multicols}
\begin{answer}
\noindent
\begin{multicols}{2}
\begin{enumerate}

\item
   $a_n \sim -\dfrac e{2n} \implies $ diverges.

\item $a_n \sim \dfrac \alpha{2^{\alpha-1}}e^{n(\alpha-2)} \implies $
             converges  iff $\alpha < 2$.

\item $a_n \sim -\dfrac 3{n^2} \implies $ converges.

\item $a_n \sim \dfrac 1{n^2} \implies $ converges.

\item $a_n \sim \sqrt{\dfrac 2{n^3}} \implies $ converges.

\item converges iff $|a| \ne 1$.

\item Converges.

\item $a_n \le \dfrac{(n-1)(n-1)!+n!}{(n+2)!} \le \dfrac 2{(n+1)(n+2)} \implies $
             converges.




\item Converges.

\item  $a_n \not\longrightarrow 0\implies $ diverges.

\item $a_n= \dfrac1{n^{\log\log n}} \implies $ converges.


\end{enumerate}
\end{multicols}
\end{answer}

\end{pro}


\end{multicols}

\section{Summation by Parts}
In this section we consider series whose terms have arbitrary signs.
We first need the following result.
\begin{thm}[Summation by Parts]
Let $A_n = \sum _{0\leq k \leq n} a_k$,\  $A_{-1}=0$. Then for $ p
\leq q$,
$$ \sum _{p \leq k \leq q} a_kb_k =   \sum _{p \leq k \leq q-1}A_k(b_{k}-b_{k+1}) +A_qb_q-A_{p-1}b_p. $$

\end{thm}
\begin{pf}Changing a subindex,
$$\begin{array}{lll}
\sum _{p \leq k \leq q} a_kb_k  & = & \sum _{p \leq k \leq q}
(A_k-A_{k-1})b_k\\ &  = & \sum _{p \leq k \leq q}A_kb_k-\sum _{p
\leq k \leq q}A_{k-1}b_k \\
& = & \sum _{p \leq k \leq q-1}A_kb_k-\sum _{p \leq k \leq
q-1}A_{k}b_{k+1} +A_{q}b_q -A_{p-1}b_p.
\end{array}$$
giving the result.
\end{pf}
\begin{rem}
An alternative and more symmetric formulation will be given once we
introduce Riemann-Stieltjes integration.
\end{rem}
We now obtain a convergence test.
\begin{thm}[Dirichlet's Test] The series $\sum _{k\geq 0}a_kb_k$
converges if
\begin{enumerate}
\item the partial sums $A_n = \sum _{k=0} ^n a_k$ are bounded
\item $b_{n}\geq b_{n+1}$
\item $b_n\to 0$ as $\ngroes$
\end{enumerate}
\end{thm}
\begin{pf}

\end{pf}


\section{Alternating Series}
A series of the form $\sum _{n=1} ^{+\infty} (-1)^na_n$ where the
all the $a_n\geq 0$ is called an {\em alternating series.}
\begin{thm}[Leibniz's Alternating Series Test] The alternating series $\sum _{n=1} ^{+\infty}
(-1)^na_n$ converges if all the following conditions are met
\begin{itemize}
\item the $a_n$ eventually decrease, that is, $a_{n+1} \leq a_n$ for
all $n\geq N$.
\item $a_n\rightarrow 0$
\end{itemize}

\end{thm}
\begin{exa}
The series $\sum _{n=1} ^{+\infty} (-1)^{n+1}\dfrac{1}{n}$ converges
by Leibniz's Test. In fact, one can prove that it equals $\log 2$.
\end{exa}
\section{Absolute Convergence}
If $\sum _{n=1} ^{+\infty} |a_n|$ converges then $\sum _{n=1}
^{+\infty} a_n$ converges. The converse is not true.
\begin{exa}
Since $\Big|\dfrac{\sin n}{n^2}\Big| \leq \dfrac{1}{n^2}$, $\sum
_{n=1} ^{+\infty} |\dfrac{\sin n}{n^2}\Big|$ converges by the
comparison test. Thus $\sum _{n=1} ^{+\infty} \dfrac{\sin n}{n^2}$
converges absolutely and so it converges.\end{exa} \begin{exa}
Determine whether the following  two infinite series converge: (I)
$\displaystyle{\sum_{n=2}^\infty {(-1)^n\sin(3n)\over n^2}}$, \quad
(II) $\displaystyle{\sum_{n=1}^\infty {(-1)^nn\over
n^2+2}}$.\end{exa}
\begin{solu} We have $$ \Big|(-1)^n\dfrac{\sin 3n}{n^2}\Big| \leq
\dfrac{1}{n^2},
$$so (I) converges absolutely. As for number (II), $f(x) = \dfrac{x}{x^2+2}$ is
decreasing (take the first derivative) $\dfrac{n}{n^2+2} \rightarrow
0,$ so it converges by Leibniz's Test.
\end{solu}

\chapter{Real Functions of One Real Variable}
\section{Limits of Functions}
\begin{prop}[Cauchy-Heine, Sinistral
Limit]\label{prop:sinistral-limit}
Let $f:\loro{a}{b}\rightarrow \BBR$ and let $x_0\in\loro{a}{b}$. The
following are equivalent.
\begin{enumerate}
\item $\forall \varepsilon > 0$, $\exists \delta >0$ such that
$$ x_0-\delta <x<x_0 \implies \absval{f(x)-L}<\varepsilon .$$
\item For each sequence $\seq{x_n}{n=1}{+\infty}$ of points in the
interval $\loro{a}{b}$ with $x_n<x_0$, $x_n\rightarrow x_0 \implies
f(x_n)\rightarrow L$.
\end{enumerate}
If either condition is fulfilled we say that {\em $f$ has a
sinistral limit $f(x_0-)$ as $x$ increases towards $x_0$} and we
write
$$f(x_0-)=\lim _{x\rightarrow x_0-}f(x) =\lim _{x\nearrow x_0}f(x).   $$
\end{prop}
\begin{pf}
\begin{enumerate}
\item[1$\implies$2]  Suppose that $\forall \varepsilon > 0$, $\exists \delta >0$ such that
$$ x_0-\delta <x<x_0 \implies \absval{f(x)-L}<\varepsilon .$$ Let
$x_n<x_0$, $x_n\rightarrow x_0$. Then $$\absval{x_n-x_0}<\delta
\implies x_0-\delta <x_n<x_0+\delta $$ for sufficiently large $n$.
But we are assuming that $x_n<x_0$, so in fact we have $x_0-\delta
<x_n<x_0$. By our assumption then $\absval{f(x_n)-L}<\varepsilon$,
and so 1$\implies$2.
\item[2$\implies$1] Suppose that for each sequence $\seq{x_n}{n=1}{+\infty}$ of points in the
interval $\loro{a}{b}$ with $x_n<x_0$, $x_n\rightarrow x_0 \implies
f(x_n)\rightarrow L$. If it were not true that $f(x)\rightarrow L$
as $x\rightarrow x_0$, then there exists some  $\varepsilon_0>0$
such that for all $\delta >0$ we can find $x$ such that
$$0<\absval{x-x_0}<\delta \implies \absval{f(x)-L}\geq
\varepsilon_0.$$In particular, for each strictly positive integer
$n$ we can find $x_n$ satisfying
$$0<\absval{x_n-x_0}<\dfrac{1}{n} \implies \absval{f(x_n)-L}\geq
\varepsilon_0,$$a contradiction to the fact that $f(x_n)\rightarrow
L$.
\end{enumerate}
\end{pf}
In an analogous manner, we have the following.
\begin{prop}[Cauchy-Heine, Dextral Limit] \label{prop:dextral-limit}
Let $f:\loro{a}{b}\rightarrow \BBR$ and let $x_0\in\loro{a}{b}$. The
following are equivalent.
\begin{enumerate}
\item For each sequence $\seq{x_n}{n=1}{+\infty}$ of points in the
interval $\loro{a}{b}$ with  $x_n>x_0$, $x_n\rightarrow x_0 \implies
f(x_n)\rightarrow L$.
\item $\forall \varepsilon > 0$, $\exists \delta >0$ such that
$$ x_0 <x<x_0+\delta \implies \absval{f(x)-L}<\varepsilon .$$
\end{enumerate}
If either condition is fulfilled we say that {\em $f$ has a dextral
limit $f(x_0+)$ as $x$ decreases towards $x_0$} and we write
$$f(x_0+)=\lim _{x\rightarrow x_0+}f(x) =\lim _{x\searrow x_0}f(x).   $$

\end{prop}
Upon combining Propositions \ref{prop:sinistral-limit} and
\ref{prop:dextral-limit} we obtain the following.
\begin{prop}[Cauchy-Heine]\label{prop:limit}
Let $f:\loro{a}{b}\rightarrow \BBR$ and let $x_0\in\loro{a}{b}$. The
following are equivalent.
\begin{enumerate}
\item $f(x_0-)=f(x_0+)$
\item For each sequence $\seq{x_n}{n=1}{+\infty}$ of points in the
interval $\loro{a}{b}$ different from $x_0$, $x_n\rightarrow x_0
\implies f(x_n)\rightarrow L$.
\item $\forall \varepsilon > 0$, $\exists \delta >0$ such that
$$ 0 <\absval{x-x_0}<\delta \implies \absval{f(x)-L}<\varepsilon .$$
\end{enumerate}

If either condition is fulfilled we say that {\em $f$ has a
(two-sided) limit $L$ as $x$ decreases towards $x_0$} and we write
$$L=\lim _{x\rightarrow x_0}f(x).   $$

\end{prop}
We now prove analogues of the theorems that the proved for limits of
sequences.
\begin{thm}[Uniqueness of Limits]
Let $X\subseteqq \BBR$, $a\in\BBR$, and $f:X\rightarrow \BBR$. If
$\lim _{x\rightarrow a}f(x)=L$ and $\lim _{x\rightarrow a}f(x)=L'$
then $L=L'$.
\end{thm}
\begin{pf}If $L\neq L'$ then take $2\varepsilon =\absval{L-L'}$ in
the definition of limit. There  is $\delta
>0$ such that
$$0<\absval{x-a}<\delta \implies \absval{f(x)-L}<\dfrac{\absval{L-L'}}{2}, \quad
\absval{f(x)-L'}<\dfrac{\absval{L-L'}}{2}.
$$By the Triangle Inequality
$$\absval{L-L'}\leq \absval{L-f(x)}+\absval{f(x)-L'}< \dfrac{\absval{L-L'}}{2}+\dfrac{\absval{L-L'}}{2} = \absval{L-L'},$$
but $\absval{L-L'}<\absval{L-L'}$ is a contradiction.
\end{pf}
\begin{thm}[Local Boundedness]\label{thm:local-boundedness} Let $X\subseteqq \BBR$, $a\in\BBR$, and $f:X\rightarrow \BBR$. If  $\lim _{x\rightarrow
a}f(x)=L$ exists and is finite, then $f$ is bounded in a
neighbourhood of $a$.
\end{thm}
\begin{pf}
Take $\varepsilon = 1$ in the definition of limit. Then there is a
$\delta >0$ such that $$0<\absval{x-a}<\delta \implies
\absval{f(x)-L}<1 \implies \absval{f(x)}<1+\absval{L},$$and so $f$
is bounded on this neighbourhood.
\end{pf}
\begin{thm}[Order Properties of Limits]\label{thm:order-prop-func-limits} Let $X\subseteqq \BBR$, $a\in\BBR$, and $f:X\rightarrow \BBR$. Let  $\lim _{x\rightarrow
a}f(x)=L$ exist and be finite. Then
\begin{enumerate}
\item If $s<L$ then there exists a neighbourhood  $\N{a}$ of $a$ contained in
$X$ such that $\forall x\in\N{a}$, $s<f(x)$.
\item If $L<t$ then there exists a neighbourhood  $\N{a}$ of $a$ contained in
$X$ such that $\forall x\in\N{a}$, $f(x)<t$.
\item If $s<L<t$ then there exists a neighbourhood  $\N{a}$ of $a$ contained in
$X$ such that $\forall x\in\N{a}$, $s<f(x)<t$.
\item If there exists a neighbourhood $\N{a}\subseteqq X$ such that
$\forall x\in\N{a}$, $s\leq f(x)$, then $s\leq L$.
\item If there exists a neighbourhood $\N{a}\subseteqq X$ such that
$\forall x\in\N{a}$, $ f(x)\leq t$, then $ L\leq t$.
\item If there exists a neighbourhood $\N{a}\subseteqq X$ such that
$\forall x\in\N{a}$, $s\leq f(x)\leq t$, then $s\leq L\leq t$.
\end{enumerate}

\end{thm}
\begin{pf}
We have

\begin{enumerate}
\item Take $\varepsilon = L-s>0$ in the definition of limit. There is $\delta >0$ such that
$$ 0<\absval{x-a}<\delta \implies \absval{f(x)-L}<L-s\implies s-L+L<f(x)<2L-s \implies s<f(x),   $$
as claimed.
\item Take $\varepsilon = t-L>0$ in the definition of limit. There is $\delta >0$ such that
$$ 0<\absval{x-a}<\delta \implies \absval{f(x)-L}<t-L\implies L-t+L<f(x)<t-L+L \implies f(x)<t,   $$
as claimed.
\item This follows by (1) and (2).
\item If on the said neighbourhood $\N{a}$ we had, on the contrary, $L>s$ then (1) asserts that there is a
neighbourhood of $\mathscr{N}'_{a}\subseteqq \N{a}$ such that
$f(x)>s$, a contradiction to the assumption that $\forall
x\in\N{a}$, $s\leq f(x)$.
\item If on the said neighbourhood $\N{a}$ we had, on the contrary, $L<t$ then (2) asserts that there is a
neighbourhood of $\mathscr{N}'_{a}\subseteqq \N{a}$ such that
$f(x)<t$, a contradiction to the assumption that $\forall
x\in\N{a}$, $t\geq f(x)$.
\item This follows by (4) and (5).
\end{enumerate}
\end{pf}
Analogous to the Sandwich Theorem for sequences we have
\begin{thm}[Sandwich Theorem] Assume that $a, b, c$ are functions
defined on a neighbourhood $\N{x_0}$ of a point $x_0$ except
possibly at $x_0$ itself. Assume moreover that in $\N{x_0}$ they
satisfy the inequalities $a(x)\leq b(x)\leq c(x)$. Then
$$\lim _{x\rightarrow x_0}a(x) = L = \lim _{x\rightarrow x_0}c(x)\implies \lim _{x\rightarrow x_0}b(x) = L.  $$
\end{thm}
\begin{pf}
For all $\varepsilon >0$ there is $\delta >0$ such that
$$0<\absval{x-x_0}<\delta \implies \absval{a(x)-L}<\varepsilon \quad \mathrm{and}\quad \absval{c(x)-L}<\varepsilon
\implies L-\varepsilon <a(x) <L+\varepsilon \quad \mathrm{and}\quad
L-\varepsilon <c(x) <L+\varepsilon .
$$ If we now consider $x\in \N{x_0}\cap
\left\{x:0<\absval{x-x_0}<\delta \right\}$ then
$$  L-\varepsilon <a(x) \leq b(x)\leq c(x) <L+\varepsilon \implies L-\varepsilon <b(x)<L+\varepsilon \implies \absval{b(x)-L}<\varepsilon,$$
whence $\lim _{x\rightarrow x_0}b(x) = L$.
\end{pf}
\begin{thm}\label{thm:algebra-of-fun-limits} Let $X\subseteqq \BBR$, $a\in\BBR$, and $f, g:X\rightarrow \BBR$. Let $(L,L', \lambda)\in\BBR^3$. Then

\begin{enumerate}
\item $\lim _{x\rightarrow
a}f(x)=L \implies \lim _{x\rightarrow a}\absval{f(x)}=\absval{L}$.
\item $\lim _{x\rightarrow
a}f(x)=0 \iff \lim _{x\rightarrow a}\absval{f(x)}=0$.
\item $\lim _{x\rightarrow
a}f(x)=L, \lim _{x\rightarrow a}g(x)=L'  \implies \lim
_{x\rightarrow a}(f(x)+\lambda g(x))=L+\lambda L'$.
\item $\lim _{x\rightarrow
a}f(x)=L, \lim _{x\rightarrow a}g(x)=L'  \implies \lim
_{x\rightarrow a}(f(x)g(x))=LL'$.
\item If $\lim _{x\rightarrow
a}f(x)=0$ and if $g$ is bounded on a neighbourhood $\N{a}$ of $a$,
then  $ \lim _{x\rightarrow a}f(x)g(x)=0$.
\item $\lim _{x\rightarrow
a}f(x)=L, \lim _{x\rightarrow a}g(x)=L'\neq 0  \implies \lim
_{x\rightarrow a}\left(\dfrac{f(x)}{g(x)}\right)=\dfrac{L}{L'}$.
\end{enumerate}

\end{thm}
\begin{pf}

\begin{enumerate}
\item This follows from the inequality $\absval{\absval{f(x)}-\absval{L}}\leq
\absval{f(x)-L}$.
\item This follows from the inequalities $-\absval{f(x)}\leq f(x)\leq \absval{f(x)}$
and $\min (-f(x), f(x))\leq \absval{f(x)}\leq \max (-f(x), f(x))$
and the Sandwich Theorem.
\item For all $\varepsilon >0$ there are $\delta _1>0$ and $\delta
_2>0$ such that $$ 0<\absval{x-a}<\delta _1\implies
\absval{f(x)-L}<\varepsilon, \quad \mathrm{and} \quad
0<\absval{x-a}<\delta _2\implies \absval{g(x)-L'}<\varepsilon .
$$Take $\delta = \min (\delta _1, \delta _2)$. Then
$$ 0<\absval{x-a}<\delta \implies
\absval{f(x)+\lambda g(x) - (L+\lambda L')}\leq \absval{f(x)- L}+
\absval{\lambda}\absval{g(x) - L'} <(1+\absval{\lambda})\varepsilon
.
$$Since the dextral side can be made arbitrarily small, the
assertion follows.
\item For all $\varepsilon >0$ there are $\delta _1>0$ and $\delta
_2>0$ such that $$ 0<\absval{x-a}<\delta _1\implies
\absval{f(x)-L}<\varepsilon, \quad \mathrm{and} \quad
0<\absval{x-a}<\delta _2\implies \absval{g(x)-L'}<\varepsilon .
$$Also, by Theorem \ref{thm:local-boundedness}, $g$ is locally bounded and
so there exists $B>0$, and $\delta _3>0$ such that
$$0<\absval{x-a}<\delta _3 \implies \absval{g(x)}<B.$$
Take $\delta = \min (\delta _1, \delta _2, \delta _3)$. Then
$$\absval{f(x)g(x)-LL'} = \absval{(f(x)-L)g(x) + L(g(x)-L')} \leq
\absval{f(x)-L}\absval{g(x)}+\absval{L}\absval{g(x)-L'}<(B+\absval{L'})\varepsilon
.
$$As the dextral side can be made arbitrarily small, the result
follows.

\item For all $\varepsilon >0$ there are $\delta _1>0$, $B>0$, and $\delta
_2>0$ such that $$ 0<\absval{x-a}<\delta _1\implies
\absval{f(x)}<\varepsilon, \quad \mathrm{and} \quad
0<\absval{x-a}<\delta _2\implies \absval{g(x)}<B .
$$
Take $\delta = \min (\delta _1, \delta _2)$. Then
$$\absval{f(x)g(x)} \leq  \absval{B}\absval{f(x)} <B\varepsilon
.
$$As the dextral side can be made arbitrarily small, the result
follows.


\item First  $\absval{g(x)}\rightarrow
\absval{L'}$ as $x\rightarrow a$ by part (1). Hence, for
$\varepsilon = \absval{\dfrac{L'}{2}}>0$ there is a sufficiently
small  $\delta'>0$ such that
$$ \absval{\absval{g(x)}-\absval{L'}} < \dfrac{\absval{L'}}{2} \implies \absval{L'}- \dfrac{\absval{L'}}{2}<\absval{g(x)}<\absval{L'}+ \dfrac{\absval{L'}}{2}
\implies  \dfrac{\absval{L'}}{2}<\absval{g(x)}<
\dfrac{3\absval{L'}}{2},$$that is, $\absval{g(x)}$ is bounded away
from $0$ $x$ sufficiently close to $a$. Now, for all $\varepsilon
>0$ there are $\delta _1>0$ and   $\delta _2>0$ such that $$
0<\absval{x-a}<\delta _1\implies \absval{f(x)-L}<\varepsilon, \quad
\mathrm{and} \quad 0<\absval{x-a}<\delta _2\implies
\absval{g(x)-L'}<\varepsilon .
$$For $\delta = \min (\delta _1, \delta _2, \delta' )$,
$$0 <\absval{x-a}<\delta \implies L-\varepsilon<f(x)<L+\varepsilon, \quad \dfrac{\absval{L'}}{2}<\absval{g(x)}<
\dfrac{3\absval{L'}}{2}, \quad \mathrm{and} \quad
L'-\varepsilon<g(x)<L'+\varepsilon. $$Hence
$$ \absval{\dfrac{f(x)}{g(x)}-\dfrac{L}{L'}} = \absval{\dfrac{L'f(x)-Lg(x)}{g(x)L'}} =
\absval{\dfrac{L'(f(x)-L)-L(g(x)-L')}{g(x)L'}}\leq
\dfrac{\absval{L'}\absval{f(x)-L}+\absval{L}\absval{g(x)-L'}}{\absval{g(x)}\absval{L'}}<\dfrac{2(\absval{L'}+\absval{L})\varepsilon}{\absval{L'}\absval{L'}},
$$which gives the result.

\end{enumerate}
\end{pf}

In the manner of proof of Proposition \ref{prop:sinistral-limit}, we
may prove the following two propositions.
\begin{prop}[Cauchy-Heine, Limit at
$+\infty$]\label{prop:limit+infty} Let
$f:\loro{a}{+\infty}\rightarrow \BBR$ The following are equivalent.
\begin{enumerate}
\item For each sequence $\seq{x_n}{n=1}{+\infty}$ of points in the
interval $\loro{a}{+\infty}$, $$x_n\rightarrow +\infty \implies
f(x_n)\rightarrow L.$$
\item $\forall \varepsilon
 > 0$, $\exists M$, $M>\max (0,a)$, such that
$$ x\geq M \implies \absval{f(x)-L}<\varepsilon .$$
\end{enumerate}
If either condition is fulfilled we say that {\em $f$ has a  limit
$L$ as $x$ tends towards $+\infty$} and we write
$$L=\lim _{x\rightarrow +\infty}f(x).  $$

\end{prop}
\begin{prop}[Cauchy-Heine, Limit at
$-\infty$]\label{prop:limit-infty} Let
$f:\loro{-\infty}{a}\rightarrow \BBR$ The following are equivalent.
\begin{enumerate}
\item For each sequence $\seq{x_n}{n=1}{+\infty}$ of points in the
interval $\loro{-\infty}{a}$, $$x_n\rightarrow -\infty \implies
f(x_n)\rightarrow L.$$
\item $\forall \varepsilon > 0$, $\exists M$, $M<\min (0,a)$, such that
$$ x\leq M \implies \absval{f(x)-L}<\varepsilon .$$
\end{enumerate}
If either condition is fulfilled we say that {\em $f$ has a  limit
$L$ as $x$ tends towards $-\infty$} and we write
$$L=\lim _{x\rightarrow -\infty}f(x).  $$
\end{prop}
\begin{df}
We write $\lim _{x\rightarrow a+} f(x)=+\infty$ or $\lim _{x\searrow
a} f(x)=+\infty$ if $\forall M>0$, $\exists \delta > 0$ such that
$$x\in \loro{a}{a+\delta}\implies f(x)>M.  $$Similarly,
we write $\lim _{x\rightarrow a-} f(x)=+\infty$ or $\lim _{x\nearrow
a} f(x)=+\infty$  if $\forall M>0$, $\exists \delta
> 0$ such that $$x\in \loro{a-\delta}{a}\implies f(x)>M.
$$Finally, we write $\lim _{x\rightarrow a}
f(x)=+\infty$ if $\forall M>0$, $\exists \delta
> 0$ such that $$x\in \loro{a-\delta}{a+\delta}\implies f(x)>M.
$$
\end{df}
\begin{df}
We write $\lim _{x\rightarrow a+} f(x)=-\infty$ or $\lim _{x\searrow
a} f(x)=-\infty$ if $\forall M<0$, $\exists \delta > 0$ such that
$$x\in \loro{a}{a+\delta}\implies f(x)<M.  $$Similarly,
we write $\lim _{x\rightarrow a-} f(x)=-\infty$ or $\lim _{x\nearrow
a} f(x)=-\infty$  if $\forall M<0$, $\exists \delta
> 0$ such that $$x\in \loro{a-\delta}{a}\implies f(x)<M.
$$Finally, we write $\lim _{x\rightarrow a}
f(x)=-\infty$ if $\forall M<0$, $\exists \delta
> 0$ such that $$x\in \loro{a-\delta}{a+\delta}\implies f(x)<M.
$$
\end{df}

\begin{thm}\label{thm:limit-composition}
Let $X, Y$ be subsets of $\BBR$,  $a\in X$ and $b\in Y$,
$f:X\rightarrow \BBR$, $g:Y\rightarrow \BBR$ such that
$f(X)\subseteqq Y$, and let $L\in \BBR$. Then
$$ \lim _{x\rightarrow a} f(x)=a \quad  \mathrm{and} \quad  \lim _{x\rightarrow b} g(x)=L \quad \implies
\quad  \lim _{x\rightarrow a} (g\circ f)(x)=L.  $$
\end{thm}
\begin{pf}

\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Prove that $\lim _{x\rightarrow 0}\sin \dfrac{1}{x}$ does not exist.
\begin{answer}
Put $a_n = \dfrac{1}{\left(2n-\dfrac{1}{2}\right)\pi}$,  $b_n =
\dfrac{1}{\left(2n+\dfrac{1}{2}\right)\pi}$ for integer $n\geq 1$.
Then $a_n\rightarrow 0$ and $b_n\rightarrow 0$, but $\sin
\dfrac{1}{a_n}\rightarrow -1$ and $\sin \dfrac{1}{b_n}\rightarrow
+1$, so the limit does not exist in view of Proposition
\ref{prop:limit}.
\end{answer}
\end{pro}
\begin{pro}
Let $m,n$ be strictly positive integers. Prove that $\lim
_{x\rightarrow 1} \dfrac{x^n-1}{x^m-1} = \dfrac{n}{m}$.
\end{pro}

\begin{pro}Let $X\subseteqq \BBR$, $a\in\BBR$, and $f,g:X\rightarrow
\BBR$. If $f(x)\rightarrow +\infty$ and there exists a neighbourhood
$\N{a}\subseteqq X$ of $a$ where $f(x)\leq g(x)$, prove that
$g(x)\rightarrow +\infty$.

\end{pro}
\begin{pro} Let $X\subseteqq \BBR$, $a\in\BBR$, and $f, g:X\rightarrow \BBR$.
Suppose that $\lim _{x\rightarrow a} f(x)= +\infty$. Demonstrate
that

\begin{enumerate}
\item If  $\lim _{x\rightarrow a} g(x)= +\infty$, then $\lim _{x\rightarrow a} (f(x)+g(x))=
+\infty$.
\item If  $\lim _{x\rightarrow a} g(x)= L\in \BBR$, then $\lim _{x\rightarrow a} (f(x)+g(x))=
+\infty$.
\item If  $\lim _{x\rightarrow a} g(x)= +\infty$, then $\lim _{x\rightarrow a} (f(x)g(x))=
+\infty$.
\item If  $\lim _{x\rightarrow a} g(x)= L>0$, then $\lim _{x\rightarrow a} (f(x)g(x))=
+\infty$.
\end{enumerate}
\end{pro}

\begin{pro}[Cauchy Criterion for Functional Limits] Let $X\subseteqq
\BBR$, $a\in X$, and $f:X\rightarrow \BBR$. Prove that $f$ has a
limit at $a$ (finite or infinite)  if and only if for all
$\varepsilon >0$ there is a $\delta > 0$ such that
$\absval{x'-x''}<\delta$ implies
$\absval{f(x')-f(x'')}<\varepsilon$.
\end{pro}
\end{multicols}


\section{Continuity}
\begin{df}
A function $f:\loro{a}{b}\rightarrow \BBR$ is said to be {\em
continuous at the point  $x_0\in\loro{a}{b}$}, if we can exchange
limiting operations, as in $$\lim _{x\rightarrow x_0}f(x) =
f\left(\lim _{x\rightarrow x_0}x\right) \qquad (=f(x_0)).
$$In other words, a function is continuous at the point $x_0$ if $$
\forall \varepsilon >0, \exists \delta >0, \quad \mathrm{such\ that}
\quad \absval{x-x_0}<\delta \implies
\absval{f(x)-f(x_0)}<\varepsilon.
$$
\end{df}
\begin{df}
A function $f:\lcrc{a}{b}\rightarrow \BBR$ is said to be {\em right
continuous at $a$}, if
$$f(a) =f(a+).$$It is   said to be {\em left
continuous at $b$}, if
$$f(b) =f(b-).$$
\end{df}
In view of the above definitions and Proposition \ref{prop:limit},
we have the following
\begin{thm} The following are equivalent.
\begin{enumerate}
\item The function $f:\loro{a}{b}\rightarrow \BBR$ is  continuous at the
point $x_0\in\loro{a}{b}$.
\item $f(x_0-)=f(x_0)=f(x_0+)$.
\item If $\seq{x_n}{n=1}{+\infty}$, and for all $n$, $x_n\in
\loro{a}{b}$, then $x_n\rightarrow x_0 \implies f(x_n)\rightarrow
f(x_0)$.
\end{enumerate}
\end{thm}

\begin{exa}
What are the points of discontinuity of the function
$$\fun{f}{x}{\left\{\begin{array}{ll} \dfrac{1}{p+q} & \mathrm{if}\ x\in\BBQ\cap \lcro{0}{+\infty}, x=\dfrac{p}{q}, \ \mathrm{in\ lowest\ terms}\\
0 & \mathrm{if}\ x\in
\lcro{0}{+\infty}\setminus \BBQ\\
\end{array}\right.}{\lcro{0}{+\infty}}{\BBR}?$$
\end{exa}
\begin{solu}
Let $a\in\BBQ$. Since $\lcro{0}{+\infty}\setminus \BBQ$ is dense in
$\lcro{0}{+\infty}$, there exists a sequence
$\seq{a_n}{n=1}{+\infty}$ of points in $\lcro{0}{+\infty}\setminus
\BBQ$ such that $a_n\rightarrow a$ as $\ngroes$. Observe that
$f(a_n)=0$ but $f(a)\neq 0$. Hence $a_n\rightarrow a$ does not imply
$f(a_n)\rightarrow f(a)$ and $f$ is not continuous at $a$. On the
other hand, let $n\in \lcro{0}{+\infty}\setminus \BBQ$. Then
$f(b)=0$. Let $\seq{b_n}{n=1}{+\infty}$ be a sequence in
$\lcro{0}{+\infty}\cap \BBQ$ converging to $b$, $b_n =
\dfrac{p_n}{q_n}$ in lowest terms. By Dirichlet's Approximation
Theorem we must have $p_n\rightarrow +\infty$ and $q_n\rightarrow
+\infty$. Hence $\dfrac{1}{p_n+q_n}\rightarrow 0$. So $f$ is
continuous at $b$. In conclusion, $f$ is continuous at every
irrational in $\lcro{0}{+\infty}$ and discontinuous at every
rational in $\lcro{0}{+\infty}$.
\end{solu}

\begin{prop}[Oscillation of a function at a point] Let $f$ be
bounded. The function $\omega: \dom{f} \to [0;+\infty[$, called the
{\em oscillation of $f$ at $x$} and given by
$$\omega (f, x) = \lim _{\delta \to 0+} \sup \{\absval{f(a)-f(b)}:
\absval{a-x}< \delta, \absval{b-x}< \delta\}
$$is
well-defined. Moreover, $f$ is continuous at $x$ if and only if
$\omega (f, x)=0$. \label{prop:oscillation-at-a-point}\end{prop}
\begin{pf}
Observe that in fact
$$\omega (f, x) = \lim _{\delta \to 0+} \sup \{\absval{f(a)-f(b)}:
\absval{a-x}< \delta, \absval{b-x}< \delta\} = \inf _{\delta > 0}
\sup \{\absval{f(a)-f(b)}: \absval{a-x}< \delta, \absval{b-x}<
\delta\} \leq \absval{f(a)-f(b)} \leq 2\absval{f} < +\infty.
$$This says that $\omega (f, x)$ is well-defined.

\bigskip

\end{pf}



\begin{df}We say that a function $f$ is continuous on the closed
interval $\lcrc{a}{b}$ if it is continuous everywhere on
$\loro{a}{b}$, continuous on the right at $a$ and continuous on the
left at $b$. If $X\subseteqq \BBR$, then $f:X\rightarrow \BBR$ is
said to be {\em continuous on $X$} (or {\em continuous}) if it is
continuous at every element of $X$.
\end{df}
\begin{thm} \label{thm:continuous-iff-open-sets}Let $X\subseteqq \BBR$. A function $f:X\rightarrow \BBR$ is continuous if and only if the
the inverse image of an open set is open in $X$.
\end{thm}
\begin{pf}
\begin{enumerate}
\item[$\implies$]
Let $A \subseteqq \BBR$ be an open set. We must shew that
$f^{-1}(A)$ is open in $X$. Let $a\in f^{-1}(A)$. Since $f(a)\in A$
and $A$ is open in $\BBR$, there exists an $r>0$ such that
$\loro{f(a)-r}{f(a)+r}\subseteqq A$. Since $f$ is continuous at $a$,
there exists a $\delta >0$ such that $$\begin{array}{lll}
\absval{x-a}<\delta \implies   \absval{f(x)-f(a)}<r,  & \mathrm{that
\ is}, &  x\in\loro{a+\delta}{a-\delta} \implies f(x)\in
\loro{f(a)-r}{f(a)+r}, \\ & \mathrm{that \ is}, &
x\in\loro{a+\delta}{a-\delta} \implies x\in
f^{-1}\left(\loro{f(a)-r}{f(a)+r}\right),  \\ & \mathrm{that \ is},
& \loro{a+\delta}{a-\delta} \subseteqq
f^{-1}\left(\loro{f(a)-r}{f(a)+r}\right) \\
\end{array}
$$Since $f^{-1}\left(\loro{f(a)-r}{f(a)+r}\right) \subseteqq
f^{-1}(A)$, we have shewn that $ \loro{a+\delta}{a-\delta}
\subseteqq f^{-1}(A)$, which means that for any $a$, a neighbourhood
of $a$ lies entirely in $f^{-1}(A)$, that is, $f^{-1}(A)$ is open.
\item[$\Leftarrow$] Given $\varepsilon > 0$, we must find a $\delta >
0$ such that for all $a\in X$, $$\absval{x-a}<\delta \implies
\absval{f(x)-f(a)}< \varepsilon.
$$Now
$$ \absval{f(x)-f(a)}< \varepsilon \implies  f(x)\in \loro{f(a)-\varepsilon}{f(a)+\varepsilon}
\implies x\in
f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right).$$Now,
$\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\subseteqq \BBR$ is open
in $\BBR$, and so, by assumption, so is
$f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right)$. This
means that if $t\in
f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right)$ then
there is a $r >0$ such that
$$\loro{t-r}{t+r}\subseteqq f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right).$$
But clearly $a\in
f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right)$, and
hence there is a $\delta > 0$ such that
$$\loro{a-\delta}{a+\delta}\subseteqq f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right).$$
Thus
$$x\in\loro{a-\delta}{a+\delta}\implies x\in f^{-1}\left(\loro{f(a)-\varepsilon}{f(a)+\varepsilon}\right),$$
or equivalently,
$$ \absval{x-a}<\delta \implies f(x)\in  \loro{f(a)-\varepsilon}{f(a)+\varepsilon}, $$
that is,
$$ \absval{x-a}<\delta \implies \absval{f(x)-f(a)}< \varepsilon, $$
as we needed to shew.
\end{enumerate}\end{pf}

\begin{thm}Let $X\subseteqq \BBR$. A function $f:X\rightarrow \BBR$  is continuous if and only if the
the inverse image of a closed set is closed in $X$.
\label{thm:continuous-iff-closed-sets}
\end{thm}
\begin{pf}
Let $F\subseteqq \BBR$ be a closed set. Then $\BBR \setminus F$ is
open. By Theorem  \ref{thm:continuous-iff-open-sets} $f^{-1}(\BBR
\setminus F)$ is open in $X$, and so $X\setminus f^{-1}(\BBR
\setminus F)$ is closed in $X$. But $X\setminus f^{-1}(\BBR
\setminus F) = f^{-1}(F)$, proving the theorem.
\end{pf}

\begin{thm}If two continuous functions agree on a dense set of the reals, then they are identical. That is, if
$X\subseteqq \BBR$ is dense in $\BBR$ and if $f:\BBR\rightarrow
\BBR$ and $g:\BBR\rightarrow \BBR$ satisfy $f(x)=g(x)$ for all $x\in
X$, then $f(x)=g(x)$ for all
$x\in\BBR$.\label{thm:fun-agree-in-dense-set}
\end{thm}
\begin{pf}
Let $a\in\BBR\setminus X$. Since $X$ is dense in $\BBR$, there is a
sequence $\seq{x_n}{n=1}{+\infty}\subseteqq X$ such that
$x_n\rightarrow a$ as $\ngroes$. Notice that since $x_n\in X$, we
have $f(x_n)=g(x_n)$. By continuity
$$f(a) = f\left(\lim _{\ngroes}x_n\right) = \lim _{\ngroes} f(x_n) = \lim _{\ngroes} g(x_n) =
g\left(\lim _{\ngroes}x_n\right) =g(a), $$proving the theorem.
\end{pf}


\begin{thm}[Cauchy's Functional
Equation]\label{thm:cauchy-functional-equation} Let $f$ be a
continuous function defined over the real numbers that satisfies the
{\em Cauchy functional equation}: $$\forall (x,y)\in \BBR^2, \quad
f(x+y)=f(x)+f(y).
$$Then $f$ is linear, that is, there is a constant $c$ such that $f(x)=cx$.
\end{thm}
\begin{pf}
Our method of proof is as follows. We first prove the assertion for
positive integers $n$ using induction. We then extend our result to
negative integers. Thence we extend the result to reciprocals of
integers and after that to rational numbers. Finally we extend the
result to all real numbers by means of Theorem
\ref{thm:fun-agree-in-dense-set}.

\bigskip

We prove by induction that for integer $n \geq 0$, $f(nx)=nf(x)$.
Using the functional equation, $$f(0\cdot x) =f(0\cdot x+ 0\cdot x)
= f(0\cdot x)+f(0\cdot x)\implies f(0\cdot x)=0f(x),$$ and the
assertion follows for $n=0$. Assume $n\geq 1$ is an integer and that
$f((n-1)x) = (n-1)f(x)$. Then
$$f(nx) = f((n-1)x+x) = f((n-1)x)+f(x) =  (n-1)f(x)+ f(x) = nf(x), $$
proving the assertion for all strictly positive integers.

\bigskip

Let $m<0$ be an integer. Then $-m>0$ is a strictly positive integer,
for which the result proved in the above paragraph holds, and thus
and by the above paragraph, $f(-mx) = -mf(x)$. Now,
$$0=f(0) \implies 0=f(mx+(-mx)) = f(mx)+f(-mx)\implies f(mx)=-f(-mx)=-(-mf(x))=mf(x),
$$and the assertion follows for negative integers. We have thus
proved the theorem for all integers.

\bigskip

Assume now that $x=\dfrac{a}{b}$, with $a\in \BBZ$ and $b\in
\BBZ\setminus \{0\}$. Then $f(a) =f(a\cdot 1)= af(1)$ and $f(a) =
f\left(b\dfrac{a}{b}\right) = bf\left(\dfrac{a}{b}\right)$ by the
result we proved for integers and hence
$$af(1)= bf\left(\dfrac{a}{b}\right) \implies  = f\left(\dfrac{a}{b}\right) =f(1)\left(\dfrac{a}{b}\right).$$
We have established that for all rational numbers $x\in \BBQ$, $f(x)
= xf(1)$.


\bigskip

We have not used the fact that the function is continuous so far.
Since the rationals are dense in the reals the extension of the
result now follows from Theorem
\ref{thm:fun-agree-in-dense-set}.\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Find all functions $f:\BBR\rightarrow \BBR$, continuous at $x=0$
such that $\forall x\in \BBR$, $f(x)=f(3x)$.
\end{pro}
\begin{pro}
Find all functions $f:\BBR\rightarrow \BBR$, continuous at $x=0$
such that $\forall x\in \BBR$,
$f(x)=f\left(\dfrac{x}{1+x^2}\right)$.
\end{pro}
\begin{pro}
Determine the set of points of discontinuity of the function
$f:\BBR\rightarrow \BBR$, $f:x\mapsto \floor{x}+
\sqrt{x-\floor{x}}$.
\end{pro}
\begin{pro}
What are the points of discontinuity of the function
$\fun{f}{x}{\left\{\begin{array}{ll}  x & \mathrm{if}\ x\in \BBQ\\
0 & \mathrm{if}\ x\in
\BBR\setminus \BBQ\\
\end{array}\right.}{\BBR}{\BBR}$?
\end{pro}
\begin{pro}
What are the points of discontinuity of the function
$\fun{f}{x}{\left\{\begin{array}{ll} 0 & \mathrm{if}\ x\in \BBQ\\
 x & \mathrm{if}\ x\in
\BBR\setminus \BBQ\\
\end{array}\right.}{\BBR}{\BBR}$?
\end{pro}
\begin{pro}
What are the points of discontinuity of the function
$\fun{f}{x}{\left\{\begin{array}{ll} 0 & \mathrm{if}\ x\in \BBQ\\
1 & \mathrm{if}\ x\in
\BBR\setminus \BBQ\\
\end{array}\right.}{\BBR}{\BBR}$?
\end{pro}
\begin{pro}
What are the points of discontinuity of the function
$\fun{f}{x}{\left\{\begin{array}{ll} \cos x & \mathrm{if}\ x\in
\BBQ\\ \sin x & \mathrm{if}\ x\in
\BBR\setminus \BBQ\\
\end{array}\right.}{\BBR}{\BBR}$?
\end{pro}

\begin{pro}
Find all functions $f:\BBR\rightarrow \BBR$, continuous at $x=1$
such that $\forall x\in \BBR$, $f(x)=-f(x^2)$.
\end{pro}
\begin{pro}
Let $a\in \BBR$ be fixed. Find all functions $f:\BBR\rightarrow
\BBR$, continuous everywhere such that $\forall (x,y)\in \BBR^2$,
$f(x-y)=f(x)-f(y)+axy$.
\end{pro}
\begin{pro}
Let $f:\lcro{0}{+\infty}\rightarrow \lcro{0}{+\infty}$, $x\mapsto
\sqrt{x+\sqrt{x+\sqrt{x+\cdots}}}$. Is $f$ right-continuous at $0$?
\begin{answer}
$f(0)=0$, but for $x>0$, $f(x)=\dfrac{1+\sqrt{1+4x}}{2}$, so $f$ is
not right-continuous at $x=0$.
\end{answer}

\end{pro}


\end{multicols}

\section{Algebraic Operations with Continuous Functions}

\begin{thm}[Algebra of Continuous Functions]\label{thm:algebra-of-cont-fun} Let $f, g:\loro{a}{b}\rightarrow
\BBR$ be continuous a the point $x_0\in \loro{a}{b}$. Then
\begin{enumerate}
\item $f+g$ is continuous at $x_0$.
\item $fg$ is continuous at $x_0$.
\item if $g(x_0)\neq 0$, $\dfrac{f}{g}$ is continuous at $x_0$.
\end{enumerate}
\end{thm}
\begin{pf}
This follows directly from Theorem \ref{thm:algebra-of-fun-limits}.
\end{pf}
\begin{thm}\label{thm:continuity-composition}
Let $X, Y$ be subsets of $\BBR$,  $a\in X$ and $b\in Y$,
$f:X\rightarrow \BBR$, $g:Y\rightarrow \BBR$ such that
$f(X)\subseteqq Y$. If $f$ is continuous at $a$ and $g$ is
continuous at $f(a)$, then $g\circ f$ is continuous at $a$.
\end{thm}
\begin{pf}
This follows at once from Theorem \ref{thm:limit-composition}.
\end{pf}

\begin{thm}
Let $f:I\rightarrow \BBR$ be a monotone function, where $I
\subseteqq \BBR$ is a non-empty interval. Then the set of points of
discontinuity of $f$ is either finite or countable.
\end{thm}
With Theorems \ref{thm:algebra-of-cont-fun} and
\ref{thm:continuity-composition} we can now demonstrate the
\section{Monotonicity and Inverse Image}
\begin{df}Let $X$ and $Y$ be subsets of $\BBR$.
Let $f:X\rightarrow Y$, and assume that $X$ has at least two
elements. Then $f$ is said to be
\begin{itemize}
\item {\em increasing} if $\forall (a,b)\in X^2$, $a<b \implies f(a)\leq
f(b)$. Equivalently, if the ratio $\dfrac{f(b)-f(a)}{b-a} \geq 0$.
\item {\em strictly increasing} if $\forall (a,b)\in X^2$, $a<b \implies
f(a)< f(b)$. Equivalently, if the ratio $\dfrac{f(b)-f(a)}{b-a} >0$.
\item {\em decreasing} if $\forall (a,b)\in X^2$, $a<b \implies f(a)\geq
f(b)$. Equivalently, if the ratio $\dfrac{f(b)-f(a)}{b-a} \leq 0$.
\item {\em strictly decreasing} if $\forall (a,b)\in X^2$, $a<b \implies
f(a)> f(b)$. Equivalently, if the ratio $\dfrac{f(b)-f(a)}{b-a} <0$.
\end{itemize}
$f$ is said to be {\em monotonic} if it is either increasing or
decreasing, and {\em strictly monotonic} if it is either strictly
increasing or strictly decreasing.
\end{df}
\begin{rem}
Observe that if $f$ is increasing, then $-f$ is decreasing, and
conversely. Similarly for strictly monotonic functions.
\end{rem}

\begin{thm}\label{thm:monotone->injective}
Let $X\subseteqq \BBR$ and let $f:X\rightarrow \BBR$ be strictly
monotone. Then $f$ is injective.
\end{thm}
\begin{pf}
Recall that $f$ is injective if $x\neq y\implies f(x)\neq f(y)$. If
$f$ is strictly increasing then $x<y \implies f(x)<f(y)$ and if $f$
is strictly decreasing then $x<y \implies f(x)>f(y)$. In either
case, the condition for injectivity is fulfilled.
\end{pf}
\begin{thm}Let $I\subseteqq \BBR$ be an interval and let $f:I\rightarrow
f(I)$ be strictly monotone. Then $f^{-1}$ is strictly monotone in
the same sense as $f$.
\end{thm}
\begin{pf}Assume first that $f$ is strictly increasing and put $x=f^{-1}(a)$, $y=f^{-1}(b)$ and that $a<b$. If $x\geq y$, then, since
$f$ is  strictly increasing,  $f(x)\geq f(y)$. But then,
$f(f^{-1}(a)) \geq f(f^{-1}(b)) \implies a\geq b$, a contradiction.

\bigskip A similar argument finishes the theorem for $f$ strictly decreasing.

\end{pf}
The following theorem is remarkable, since it does not allude to any
possible continuity of the function in question.
\begin{thm}\label{thm:monotone-from-interval-has-cont-inverse}Let $I\subseteqq \BBR$ be an interval and let $f:I\rightarrow
f(I)$ be strictly monotone. Then $f^{-1}$ is continuous.
\end{thm}
\begin{pf}
 Let $b\in f(I)$, $b=f(a)$, and $\varepsilon
>0$. We must shew that there is $\delta >0$ such that
$$ \absval{y-b}<\delta \implies \absval{f^{-1}(y)-b}<\varepsilon . $$
If $a$ is not an endpoint of $I$, there is an $\alpha >0$ such that
$\loro{a-\alpha}{a+\alpha}\subseteqq I$. Put $\varepsilon ' = \min
(\varepsilon , \alpha)$. Since both $f$ and $f^{-1}$ are both
strictly monotone
$$ \absval{f^{-1}(y)-b}<\varepsilon ' \implies b-\varepsilon ' < f^{-1}(y)<
b+\varepsilon ' \implies f(b-\varepsilon ') < f(f^{-1}(y))<
f(b+\varepsilon ') \implies f(b-\varepsilon ') < y< f(b+\varepsilon
').
$$Since $f$ is strictly increasing and $a-\varepsilon '<a$, $f(a-\varepsilon
')<f(a)=b$. Thus there must be an $\eta >0$ such that
$f(a-\varepsilon ') = b-\eta <b$. Similarly, there is an $\eta '$
such that $b< b+\eta '=f(a+\varepsilon ')$. Putting $\eta '' = \min
(\eta , \eta ')$, we have that for all $y\in f(I)$,
$$ \begin{array}{lll} \absval{y-b}<\eta '' &  \implies &  b-\eta '' < y < b+\eta '' \\
& \implies &  b-\eta < y < b + \eta '\\
&  \implies & a-\varepsilon ' < f^{-1}(y)<a+\varepsilon ' \\
& \implies & \absval{f^{-1}(y)-f^{-1}(b)}<\varepsilon ',
\end{array}$$finishing the proof for when $a$ is not an endpoint. If
$a$ were an endpoint, the above proof carries by suppressing one of
$\eta$ or $\eta '$.
\end{pf}
\begin{thm}
A continuous function $f:\lcrc{a}{b}\rightarrow
f\left(\lcrc{a}{b}\right)$ is invertible if and only if it is
strictly monotone.
\end{thm}
\begin{pf}

\begin{enumerate}
\item[$\implies$] Assume $f$ is continuous and invertible.
Since $f$ is injective, $f(a)\neq f(b)$. Assume that $f(a)<f(b)$, if
$f(a)>f(b)$ the argument is similar. We would like to shew that if
$a'<b' \implies f(a')<f(b').$ Consider the continuous function
$g:\lcrc{0}{1}\rightarrow \BBR$,
$$g(t)= f((1 - t)a + ta') - f((1 - t)b + tb').$$ We have  $$g(0) =
f(a)-f(b) < 0 \qquad \mathrm{and} \qquad g(1) = f(a')-f(b').$$If
$g(1) = 0$, then we must have $a'=b'$, contradicting $a'<b'$. If $
g(1)
> 0$, then by the Intermediate Value Theorem there must be an $s\in\loro{0}{1}$ such that $g(s) = 0$. This entails
$$(1 - s)a + sa' = (1-s)b +sb' \implies  0 > (1-s)(a -b) =
s(b' -a') > 0,$$absurd. This entails that $g(1) < 0 \implies
f(a')<f(b')$, as wanted.




\item[$\Leftarrow$] Trivially, $f$ is surjective. If $f$ is strictly monotone, then
$f$ is injective by Theorem \ref{thm:monotone->injective}, and thus
$f$ is invertible, by Theorem \ref{thm:invertible<->bijective}.

\end{enumerate}


\end{pf}


\section{Convex Functions}
\begin{df}Let $A\times B\subseteqq \BBR^2$.
A function $f: A \rightarrow B$ is {\em convex} in $A$ if $\forall
(a, b, \lambda)\in A^2 \times [0; 1]$, $$ f(\lambda a + (1 -
\lambda)b) \leq  f(a)\lambda + (1 - \lambda)f(b).$$ It is {\em
strictly convex} if the inequality above is strict. Similarly, a
function $g: A \rightarrow B$ is {\em concave} in $A$ if $\forall
(a, b, \lambda)\in A^2 \times [0; 1]$,
$$ g(\lambda a + (1 - \lambda)b) \geq  g(a)\lambda + (1 -
\lambda)g(b).$$It is {\em strictly concave} if the inequality above
is strict.
\end{df}
\subsection{Graphs of Functions}
\begin{df}
Given a function $f$, its {\em graph} is the set on the plane
$$\Gamma _f = \{(x,y)\in\BBR^2:  y = f(x)\}.$$
\end{df}

\begin{exa}
Figures \ref{fig:x} through \ref{fig:floorx} shew the graphs of a
few standard functions, with which we presume the reader to be
familiar.
\end{exa}

\vspace{1cm}


\section{Classical Functions}

\subsection{Affine Functions}
\begin{df}
An {\em affine function} is one with assignment rule of the form
$x\mapsto ax+b$, where $a, b$ are real constants.
\end{df}

\begin{thm}
The graph of an affine function is a line on the plane. Conversely,
any non-vertical straight line on the plane is the graph on an
affine function.
\end{thm}


\subsection{Quadratic Functions}

\subsection{Polynomial Functions}

\subsection{Exponential Functions}
\begin{prop}\label{prop:e^x}
Let $x\in \BBR$ be  fixed. The sequence
$\seq{\left(1+\dfrac{x}{n}\right)^n}{n>-x}{+\infty}$ is bounded and
strictly increasing. Thus it converges and we define {\em the
natural exponential function} by
$$ \exp  : \BBR\rightarrow \BBR, \qquad \exp(x)  := \lim _{\ngroes} \left(1+\dfrac{x}{n}\right)^n.
$$
\end{prop}
\begin{pf}
Observe that $1+\dfrac{x}{n}>0$ for $n>-x$. Using the AM-GM
Inequality with $x_1 =1,  x_2 = \cdots =x_{n+1}=1+\dfrac{x}{n}$
$$ \left(1+\dfrac{x}{n}\right)^{n/(n+1)} < \dfrac{1 + n\left(1+\dfrac{x}{n}\right)}{n+1} = 1+\dfrac{x}{n+1} \implies \left(1+\dfrac{x}{n}\right)^{n} <
\left(1+\dfrac{x}{n+1}\right)^{n+1},$$whence the sequence is
increasing.
\bigskip

For $0 < x \leq 1$ then $ \left(1+\dfrac{x}{n}\right)^{n}\leq
\left(1+\dfrac{1}{n}\right)^{n}<e$, by Theorem \ref{thm:e}.

\bigskip

If $x>1$ then by the already proved monotonicity,
 $$ \left(1+\dfrac{x}{n}\right)^{n} \leq
\left(1+\dfrac{\floor{x}+1}{n}\right)^{n}
<\left(1+\dfrac{\floor{x}+1}{n(\floor{x}+1)}\right)^{n(\floor{x}+1)}
< e^{\floor{x}+1}.
$$

\bigskip

If $x\leq 0$ then $1+\dfrac{x}{n} \leq 1$ and so
$\left(1+\dfrac{x}{n}\right)^{n} \leq 1$.
\end{pf}
\begin{rem}
By Theorem \ref{thm:e}, $\exp (1) = e$. We will later prove, in
????, that for all $x\in \BBR$, $\exp (x) = e^x$.
\end{rem}
\subsection{Logarithmic Functions}
\subsection{Trigonometric Functions}

\begin{thm}
Let $x\in \loro{0}{\frac{\pi}{2}}$. Then $\sin x < x < \tan x$.
\end{thm}
\begin{pf}

\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
How many solutions does the equation $$ \sin x = \dfrac{x}{100}
$$ have?
\end{pro}
\begin{pro}
Prove that
$$ \frac{2}{\pi}x\leq\sin(x)\leq x, \forall\;x\in \lcrc{0}{\frac{\pi}{2}}. $$
\begin{answer}
Consider a unit circle and take any point $P$ on the circumference
of the circle.

Drop the perpendicular from $P$ to the horizontal line, $M$ being
the foot of the perpendicular and $Q$ the reflection of $P$ at $M$.
(refer to figure)

Let $x = \angle POM.$

For $x$ to be in $[0,\frac{\pi}{2}]$, the point $P$ lies in the
first quadrant, as shown.


The length of line segment $PM$ is $\sin(x)$. Construct a circle of
radius $MP$, with $M$ as the center.


Length of line segment $PQ$ is $2\sin(x)$.

Length of arc $PAQ$ is $2x$.

Length of arc $PBQ$ is $\pi\sin(x)$.

Since $PQ \leq$ length of arc $PAQ$ (equality holds when $x = 0$) we
have $2\sin(x) \leq 2x$. This implies $$\sin(x) \leq x$$

Since length of arc $PAQ$ is $\leq $ length of arc $PBQ$ (equality
holds true when $x = 0$ or $x = \frac{\pi}{2}$), we have $2x \leq
\pi\sin(x)$. This implies $$\frac{2}{\pi}x \leq \sin(x)$$

Thus we have
$$ \frac{2}{\pi}x\leq\sin(x)\leq x, \forall\;x\in [0,\frac{\pi}{2}] $$
\end{answer}

\end{pro}
\begin{pro}
How many solutions does the equation $$ \sin x = \log x
$$ have?
\end{pro}

\begin{pro}
How many solutions does the equation $$ \sin (\sin (\sin (\sin (\sin
(x))))) = \dfrac{x}{3}
$$ have?
\end{pro}
\begin{pro}[Chebyshev Polynomials]

\end{pro}
\begin{pro}[Cardano's Formula]

\end{pro}
\end{multicols}
\subsection{Inverse Trigonometric Functions}




\section{Continuity of Some Standard Functions.}
\subsection{Continuity Polynomial Functions}
\begin{lem}\label{lem:constant-fun-continuous-ist}
Let $K\in \BBR$ be a constant. The constant function
$f:\BBR\rightarrow \BBR$, $f(x)=K$ is everywhere continuous.
\end{lem}
\begin{pf}
Given $a\in \BBR$ and $\varepsilon >0$, take $\delta = \varepsilon$.
Then clearly
$$ \absval{x-a}<\delta \implies \absval{f(x)-f(a)}<\varepsilon
,$$since $f(x)=f(a)=K$ and the quantity after the implication is
$0<\varepsilon$ and we obtain a tautology.
\end{pf}
\begin{lem}\label{lem:ide-fun-continuous-ist}
The identity function $f:\BBR\rightarrow \BBR$, $f(x)=x$ is
everywhere continuous.
\end{lem}
\begin{pf}
Given $a\in \BBR$ and $\varepsilon >0$, take $\delta = \varepsilon$.
Then clearly
$$ \absval{x-a}<\delta \implies \absval{f(x)-f(a)}<\varepsilon ,$$since
the quantity after the implication is $\absval{x-a}<\delta$ and we
obtain a tautology.
\end{pf}
\begin{lem}
\label{lem:power-fun-continuous-ist} Given a strictly positive
integer $n$, the power function $f:\BBR\rightarrow \BBR$, $f(x)=x^n$
is everywhere continuous.
\end{lem}
\begin{pf}
By Lemma \ref{lem:ide-fun-continuous-ist}, the function $x\mapsto x$
is continuous. Applying this Lemma and the product rule from Theorem
\ref{thm:algebra-of-cont-fun} $n$ times, we obtain the result.
\end{pf}
\begin{thm}[Continuity of Polynomial Functions]Let $n$ be a fixed  positive integer. Let $a_k\in \BBR$, $0 \leq k \leq
n$ be constants. Then the polynomial function $f:\BBR \rightarrow
\BBR$, $f(x)=a_0 + a_1x + a_2x^2 + \cdots + a_nx^n$ is everywhere
continuous.
\end{thm}
\begin{pf}
This follows from Lemma \ref{lem:power-fun-continuous-ist} and the
sum rule from Theorem \ref{thm:algebra-of-cont-fun} applied  $n+1$
times.
\end{pf}

\subsection{Continuity of the Exponential and Logarithmic Functions}
\begin{lem}\label{lem:exp-func-cont-at-0}
Let $a>1$. The exponential function $\BBR\rightarrow \BBR$,
$x\mapsto a^x$ is continuous at $x=0$.
\end{lem}
\begin{pf}
For integral $n>0$ we know that $\lim _{\ngroes }a^{1/n}=1$ by
virtue of Theorem \ref{thm:root-n-of-a-to-1-goes}. We wish to shew
that $a^x \rightarrow 1$ as $x\rightarrow 0$. Observe first that
$\lim _{\ngroes }a^{-1/n}= \lim _{\ngroes }\dfrac{1}{a^{1/n}}=1$
also. Thus given $\varepsilon >0$, and since $a>1$, there is $N>0$
such that $$1-\varepsilon < a^{-1/N}< a^{1/N}<1+ \varepsilon.
$$If $x\in \loro{-\dfrac{1}{N}}{\dfrac{1}{N}}$ then, $$ a^{-1/N}<a^x<a^{1/N}.
$$By the above, this implies that $$1-\varepsilon < a^x<1+ \varepsilon \implies \absval{a^x-1}<\varepsilon \implies  \absval{a^x-a^0}<\varepsilon,  $$
finishing the proof.
\end{pf}
\begin{thm}[Continuity of the Exponential Function]
Let $a>0$, $a\neq 1$. The exponential function $f:\BBR\rightarrow
\loro{0}{+\infty}$, $x\mapsto a^x$ is everywhere continuous.
\end{thm}
\begin{pf}
Assume first that $a>1$. Let us shew that it is continuous at an
arbitrary $u\in \BBR$. If $x\rightarrow u$ then $x-u \rightarrow 0$.
Thus
$$\lim _{x\rightarrow u} a^x =a^u\lim _{x\rightarrow u} a^{x-u} =
a^u\lim _{x-u\rightarrow 0} a^{x-u}=a^u\lim _{t\rightarrow 0} a^{t}
= a^u\cdot 1 = a^u,
$$by Lemma  \ref{lem:exp-func-cont-at-0}, and so the continuity is
established for $a>1$.

\bigskip

If $0<a<1$ then $\dfrac{1}{a}>1$ and by what we have proved,
$x\mapsto \dfrac{1}{a^x}$ is continuous. Then
$$\lim _{x\rightarrow u} a^x  = \lim _{x\rightarrow u} \dfrac{1}{\dfrac{1}{a^x}} = \dfrac{1}{\dfrac{1}{a^u}} = a^u,   $$
proving continuity in the case $0<a<1$.\end{pf}
\begin{lem}\label{lem:logs-are-conts}Let $a>0$, $a\neq 1$. Then $\loro{0}{+\infty}\rightarrow
\BBR$, $x\mapsto \log _ax$ is everywhere continuous.
\end{lem}
\begin{pf}
Its inverse function $\BBR\rightarrow \loro{0}{+\infty}$, $x\mapsto
a^x$, is everywhere continuous and strictly monotone. The result
then follows from Theorem
\ref{thm:monotone-from-interval-has-cont-inverse}.
\end{pf}

\subsection{Continuity of the Power Functions}
\begin{thm}\label{thm:cont-of-power-fun}
Let $p\in \BBR$. Then $\loro{0}{+\infty}\rightarrow
\loro{0}{+\infty}$, $x\mapsto x^p$ is everywhere continuous.
\end{thm}
\begin{pf}
This follows by the continuity of compositions: $x^p = e^{p\log x}$.
\end{pf}



\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Prove the continuity of the function $\BBR \rightarrow
\lcrc{-1}{1}$, $x\mapsto \sin x$.
\end{pro}
\begin{pro}
Prove the continuity of the function $\lcrc{-1}{1}\rightarrow
\lcrc{-\frac{\pi}{2}}{\frac{\pi}{2}}$, $x\mapsto \arcsin x$.
\end{pro}
\begin{pro}
Prove the continuity of the function $\BBR \rightarrow
\lcrc{-1}{1}$, $x\mapsto \cos x$.
\end{pro}
\begin{pro}
Prove the continuity of the function $\lcrc{-1}{1}\rightarrow
\lcrc{0}{\pi}$, $x\mapsto \arccos x$.
\end{pro}
\begin{pro}
Prove the continuity of the function $\BBR \setminus
(2\BBZ+1)\dfrac{\pi}{2} \rightarrow \BBR $, $x\mapsto \tan x$.
\end{pro}
\begin{pro}
Prove the continuity of the function $\BBR\rightarrow
\loro{-\frac{\pi}{2}}{\frac{\pi}{2}}$, $x\mapsto \arctan x$.
\end{pro}
\end{multicols}



\section{Inequalities Obtained by Continuity Arguments}

The technique used Theorem \ref{thm:cauchy-functional-equation}, of
proving results in a dense set of the real numbers and extending the
result by continuity can be exploited in a variety of situations. We
now use it to give a generalisation of Bernoulli's Inequality.

\begin{thm}[Generalisation of Bernoulli's
Inequality]\label{thm:generalisation-bernoulli-ineq}Let $(\alpha, x)
\in \BBR^2$ with $x\geq -1$. If $0 < \alpha < 1$ then
$$ (1+x)^\alpha \leq 1 + \alpha x. $$
If $\alpha \in \loro{-\infty}{0}\cup \loro{1}{+\infty}$ then
$$ (1+x)^\alpha \geq 1 + \alpha x. $$Equality holds in either case
if and only if $x=0$.
\end{thm}
\begin{pf}
Let $\alpha \in \BBQ$, $0<\alpha <1$. Then $\alpha = \dfrac{m}{n}$
for integers $m, n$ with $1 \leq m < n$. Since $x+1\geq 0$, we may
use the AM-GM Inequality to obtain
$$\begin{array}{lll} (1+x)^{\alpha} & = & (1+x)^{m/n}\\
& = & \left((1+x)^m\cdot 1^{n-m}\right)^{1/n}\\
& \leq & \dfrac{m(1+x)+(n-m)\cdot 1}{n}\\
& = & \dfrac{n+mx}{n}\\
& = & 1+\dfrac{m}{n}x\\
& = & 1 + \alpha x. \end{array}$$

Equality holds when are the factors are the same, that is, when
$1+x=1 \implies x=0$.

\bigskip Assume now that $\alpha \in \BBR\setminus \BBQ$ with $0 <\alpha <1$. We
can find a sequence of rational numbers
$\seq{a_n}{n=1}{+\infty}\subseteqq \BBQ$ such that $a_n\rightarrow
\alpha$ as $\ngroes$. Then
$$ (1+x)^{a_n}\leq 1 + a_nx, $$whence by the continuity of the power
functions (Theorem \ref{thm:cont-of-power-fun}),
$$ (1+x)^\alpha = \lim _{\ngroes}(1+x)^{a_n}\leq  \lim _{\ngroes} (1 + a_nx) = 1 + \alpha
x,$$giving the result for all real numbers $\alpha$ with $0 < \alpha
< 1$, except that we need to prove that equality holds only for
$x=0$. Take a rational number $r$ with $0 < \alpha < r < 1$, and
recall that we are assuming that $\alpha$ is irrational. Then
$$ (1+x)^{\alpha} = (1+x)^{\alpha /r})^r \leq \left(1 + \dfrac{\alpha}{r}x\right)^r .
$$Since the exponent on the right is rational, by what we have
proved above $\left(1 + \dfrac{\alpha}{r}x\right)^r \leq 1 + x$ with
equality if and only if $x=0$. Hence the full result has been proved
for the case  $\alpha\in\BBR$ with $0 < \alpha < 1$.
\bigskip

Let $\alpha > 1$. If $1+\alpha x < 0$, then obviously $(1+x)^\alpha
>0
> 1+ \alpha x$, and there is nothing to prove. Hence we will assume
that $\alpha x \geq -1$. By the first part of the theorem, since
$0<\dfrac{1}{\alpha}<1$,
$$ (1+\alpha x)^{1/\alpha} \leq 1 + \dfrac{1}{\alpha}\cdot \alpha x =
1+x \implies 1+\alpha x\leq (1+x)^\alpha,
$$with equality only if $x=0$. The theorem has been proved for $\alpha > 1$.


\bigskip

Finally, let $\alpha < 0$. Again, if $1+\alpha x < 0$, then
obviously $(1+x)^\alpha
>0
> 1+ \alpha x$, and there is nothing to prove. Assume thus $\alpha x \geq
-1$. Choose a strictly positive integer $n$ satisfying $0<-\alpha <
n$. Now, $$1 \geq 1-\dfrac{\alpha ^2}{n^2}x^2 = \left(1 -
\dfrac{\alpha}{n}x\right) \left(1 + \dfrac{\alpha}{n}x\right)
\implies  \dfrac{1}{1 - \dfrac{\alpha}{n}x} \geq 1 +
\dfrac{\alpha}{n}x,$$and so by the first pat of the theorem
$$\begin{array}{lll} (1+x)^{-\alpha /n} \leq 1 - \dfrac{\alpha}{n}x & \implies & (1+x)^{\alpha /n} \geq \dfrac{1}{1 - \dfrac{\alpha}{n}x}  \\
& \implies & (1+x)^{\alpha /n} \geq 1 + \dfrac{\alpha}{n}x
\\
& \implies & (1+x)^{\alpha } \geq \left(1 +
\dfrac{\alpha}{n}x\right)^n, \\
\end{array}$$and since $n$ is a positive integer, $ \left(1 +
\dfrac{\alpha}{n}x\right)^n \geq 1 + n\cdot \dfrac{\alpha}{n}x = 1+
\alpha x$ and so $(1+x)^\alpha \geq 1+\alpha x$ also when $\alpha <
0$. This finishes the proof of the theorem.
\end{pf}


\begin{thm}[Monotonicity of Power
Means]\label{thm:monotonicity-power-means} Let $a_1, a_2, \ldots ,
a_n$ be strictly positive real numbers and let $(\alpha, \beta
)\in\BBR^2$ be such that $\alpha \cdot \beta \neq 0 $ and $\alpha <
\beta$. Then
$$ \left(\dfrac{a_1 ^\alpha + a_2 ^\alpha + \cdots + a_n ^\alpha}{n}\right)^{1/\alpha}
\leq \left(\dfrac{a_1 ^\beta + a_2 ^\beta + \cdots + a_n
^\beta}{n}\right)^{1/\beta}, $$ with equality if and only is
$a_1=a_2=\cdots = a_n$.
\end{thm}
\begin{pf}Assume first that $0 < \alpha < \beta$. Put $c_\alpha = \left(\dfrac{a_1 ^\alpha + a_2 ^\alpha + \cdots + a_n
^\alpha}{n}\right)^{1/\alpha}$ and $d_k =
\left(\dfrac{a_k}{c_\alpha}\right)^\alpha$. Observe that $$
\dfrac{c_\beta}{c_\alpha} =
\left(\dfrac{\left(\dfrac{a_1}{c_\alpha}\right)^\beta+\left(\dfrac{a_2}{c_\alpha}\right)^\beta+\cdots
+ \left(\dfrac{a_n}{c_\alpha}\right)^\beta}{n}\right)^{1/\beta}=
\left(\dfrac{d_1 ^{\beta/\alpha}+d_2 ^{\beta/\alpha}+\cdots + d_n
^{\beta/\alpha}}{n}\right)^{1/\beta},
$$and that
$$\left(\dfrac{d_1 +d_2 +\cdots +
d_n }{n}\right)^{1/\alpha}  = \dfrac{1}{c_\alpha}\left(\dfrac{a_1
^\alpha + a_2 ^\alpha + \cdots + a_n ^\alpha}{n}\right)^{1/\alpha}=1
\implies d_1 +d_2 +\cdots + d_n=n.
$$Put $d_k = 1+x_k$. Then $x_1+x_2+\cdots + x_n = 0$. By Theorem
\ref{thm:generalisation-bernoulli-ineq},
\begin{equation}\label{eq:d's-in-mono-means} d_k ^{\beta/\alpha} =
(1+x_k)^{\beta/\alpha} \geq 1 + \dfrac{\beta}{\alpha}x_k.
\end{equation} Letting $k$ run from $1$ through $n$ and adding,
$$ d_1 ^{\beta/\alpha}+d_2 ^{\beta/\alpha}+\cdots + d_n
^{\beta/\alpha} \geq n + \dfrac{\beta}{\alpha}(x_1+x_2+\cdots + x_n)
= n.
$$
Hence
$$ \dfrac{d_1 ^{\beta/\alpha}+d_2 ^{\beta/\alpha}+\cdots + d_n
^{\beta/\alpha}}{n}\geq 1 \implies \dfrac{c_\beta}{c_\alpha}\geq 1,
$$proving the theorem when $0<\alpha < \beta$.

\bigskip

If $\alpha < \beta < 0$, then  $0 < \dfrac{\beta}{\alpha} < 1$. The
inequality in (\ref{eq:d's-in-mono-means}) is reversed, giving
$\dfrac{d_1 ^{\beta/\alpha}+d_2 ^{\beta/\alpha}+\cdots + d_n
^{\beta/\alpha}}{n}\leq 1$, and since $\beta < 0$,
$$\dfrac{c_\beta}{c_\alpha} = \left( \dfrac{d_1 ^{\beta/\alpha}+d_2 ^{\beta/\alpha}+\cdots + d_n
^{\beta/\alpha}}{n}\right)^{1/\beta}\geq 1^{1/\beta} = 1,
$$proving the theorem when $\alpha < \beta <0$.

\bigskip

Finally, we tackle the case $\alpha<0 < \beta$. By the AM-GM
Inequality, putting $G = (a_1a_2\cdots a_n)^{1/n}$
$$G^\alpha  = (a_1 ^\alpha a_2 ^\alpha\cdots a_n ^\alpha)^{1/n} \leq \dfrac{a_1 ^\alpha + a_2 ^\alpha + \cdots + a_n ^\alpha}{n}.$$
Raising the quantities at the extreme of the inequalities to the
power $-1/\alpha$ and remembering that $-1/\alpha > 0$, we gather
that
$$  \left(\dfrac{a_1 ^\alpha + a_2 ^\alpha + \cdots + a_n ^\alpha}{n}\right)^{1/\alpha} \leq G.  $$
In a similar manner,
$$G^\beta  = (a_1 ^\beta a_2 ^\beta\cdots a_n ^\beta)^{1/n} \leq \dfrac{a_1 ^\beta + a_2 ^\beta + \cdots + a_n ^\beta}{n},$$
and $$ G\leq  \left( \dfrac{a_1 ^\beta + a_2 ^\beta + \cdots + a_n
^\beta}{n}\right)^{1/\beta},$$since $\beta >0$. This finishes the
proof.
\end{pf}
\begin{lem}\label{lem:homogenised-youngs}
Let $\alpha, a, x$ be real numbers with $\alpha > 1$, $a>0$, and
$x\geq 0$. Then $$ x^\alpha -ax \geq (1-\alpha)
\left(\dfrac{a}{\alpha}\right)^{\alpha/(\alpha -1)}.
$$
\end{lem}
\begin{pf}
By Theorem \ref{thm:generalisation-bernoulli-ineq}, since $\alpha
>1$, $$ (1+z)^\alpha \geq 1 + \alpha z, \qquad z \geq -1,
$$with equality only is $z=0$. Putting $z = 1 + y$,
$$ y^\alpha \geq 1 + \alpha (y-1) \implies y^\alpha - \alpha y \geq 1-\alpha, \qquad y \geq 0,
$$with equality only if $y=1$. Let $c>0$ be a constant. Multiplying
the above inequality by $c^\alpha$ we obtain
$$(cy)^\alpha - \alpha c^{\alpha -1} (cy) \geq (1-\alpha)c^{\alpha}, \qquad \mathrm{for} \qquad y\geq 0. $$
Putting $x=cy$ and $a=\alpha c^{\alpha -1}$, we get
$$ x^\alpha - ax \geq (1-\alpha)\left(\dfrac{a}{\alpha}\right)^{\alpha /(\alpha -1)},
$$with equality if and only if $x=c=\left(\dfrac{a}{\alpha}\right)^{\alpha /(\alpha
-1)}$.

\end{pf}
\begin{thm}[Young's Inequality]\label{thm:youngs-ineq} Let
 $p > 1$ and put $\dfrac{1}{p} + \dfrac{1}{q} = 1$. Then for $(x, y)\in ([0;+\infty[)^2$ we have
$$ xy \leq \frac{x^p}{p} + \frac{y^q}{q}. $$
\end{thm}
\begin{pf}
Put $\alpha = p$, $a = py$ in Lemma  \ref{lem:homogenised-youngs},
obtaining
$$ x^p - (py)x \geq (1-p)\left(\dfrac{py}{p}\right)^{p/(p-1)} = (1-p)y^{p/(p-1)}. $$
Now, $\dfrac{1}{q} = \dfrac{p-1}{p} \implies q = \dfrac{p}{p-1}$ and
$p-1 = \dfrac{p}{q}$. Hence
$$  x^p - (py)x \geq  (1-p)y^{p/(p-1)} \implies (1-p)y^{p/(p-1)} \geq
-\dfrac{p}{q}y^q,$$and rearranging gives the result sought.
\end{pf}
We now derive a generalisation of the Cauchy-Bunyakovsky-Schwarz
Inequality.
\begin{thm}[H\"{o}lder Inequality] \label{thm:holders-ineq} Let $x_j ,y_k$, $1 \leq j, k \leq n$, be real numbers. Let
 $p > 1$ and put $\dfrac{1}{p} + \dfrac{1}{q} = 1$. Then
 $$ \absval{\sum _{k=1} ^n x_ky_k}\leq \left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p}\left(\sum _{k=1} ^n \absval{y_k} ^q\right) ^{1/q}. $$

\end{thm}
\begin{pf}
If either $\sum _{k=1} ^n \absval{x_k} ^p = 0$ or $\sum _{k=1} ^n
\absval{y_k} ^q = 0$ there is nothing to prove, so assume otherwise.
From  Young's Inequality we have
$$ \frac{|x_k|}{ \left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p}}\frac{|y_k|}{{ \left(\sum _{k=1} ^n \absval{y_k} ^q\right) ^{1/q}}}
\leq   \dfrac{|x_k| ^p}{ \left(\sum _{k=1} ^n \absval{x_k} ^p\right)
p} + \dfrac{|y_k| ^q}{ \left(\sum _{k=1} ^n \absval{y_k}
^q\right)q}.
$$Adding, we deduce

$$ \begin{array}{lll}\sum _{k = 1} ^n  \dfrac{|x_k|}{ \left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p}}\dfrac{|y_k|}{{ \left(\sum _{k=1} ^n \absval{y_k} ^q\right) ^{1/q}}} &
\leq&  \dfrac{1}{ \left(\sum _{k=1} ^n \absval{x_k} ^p\right)p}\sum
_{k = 1} ^n |x_k| ^p
+ \dfrac{1}{ \left(\sum _{k=1} ^n \absval{y_k} ^q\right)q}\sum _{k = 1} ^n |y_k| ^q \\
& =  & \dfrac{\sum _{k=1} ^n \absval{x_k} ^p}{ \left(\sum _{k=1} ^n
\absval{x_k} ^p\right)p} + \dfrac{ \left(\sum _{k=1} ^n \absval{y_k}
^q\right)q}{ \left(\sum _{k=1} ^n \absval{y_k} ^q\right)q}\\
&  = & \dfrac{1}{p} + \dfrac{1}{q}\\  & =  & 1.
\end{array}
$$
This gives $$ \sum _{k = 1} ^n |x_ky_k| \leq
 \left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p} \left(\sum _{k=1} ^n \absval{y_k} ^q\right) ^{1/q}.
$$The result follows by observing that
$$ \left| \sum _{k = 1} ^n x_ky_k \right|\leq\sum _{k = 1} ^n
|x_ky_k| \leq  \left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p}
\left(\sum _{k=1} ^n \absval{y_k} ^q\right) ^{1/q}.
$$

\end{pf}
Finally, we derive a generalisation of Minkowski's Inequality.
\begin{thm}[Generalised Minkowski Inequality] Let $p \in ]1; +\infty[$. Let $x_j ,y_k$, $1 \leq j, k \leq n$, be real numbers. Then  the following inequality holds
$$  \left(\sum _{k=1} ^n \absval{x_k + y_k} ^p\right) ^{1/p}\leq  \left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p} +
 \left(\sum _{k=1} ^n \absval{y_k} ^p\right) ^{1/p}. $$
\label{thm:minkowski_inequality-general}\end{thm}
\begin{pf}From the triangle inequality for real numbers  $$|x_k + y_k|^p
= |x_k + y_k||x_k + y_k|^{p - 1} \leq \left(|x_k| + |y_k|\right)|x_k
+ y_k|^{p - 1}.$$ Adding \begin{equation}  \sum _{k = 1} ^n |x_k +
y_k|^p \leq \sum _{k = 1} ^n |x_k||x_k + y_k|^{p - 1} + \sum _{k =
1} ^n |y_k||x_k + y_k|^{p - 1}.\label{eq:minkowski_1}\end{equation}
By the H\"{o}lder Inequality \begin{equation}\begin{array}{lll} \sum
_{k = 1} ^n |x_k||x_k + y_k|^{p - 1}  & \leq & \left(\sum _{k = 1}
^n |x_k|^p\right)^{1/p}\left(\sum _{k = 1} ^n|x_k + y_k|^{(p -
1)q}\right)^{1/q} \\ & = &  \left(\sum _{k = 1} ^n
|x_k|^p\right)^{1/p}\left(\sum _{k = 1} ^n|x_k +
y_k|^{p}\right)^{1/q}\\
\end{array}\label{eq:minkowski_2}\end{equation}
In the same manner we deduce \begin{equation}\sum _{k = 1} ^n
|y_k||x_k + y_k|^{p - 1} \leq \left(\sum _{k=1} ^n \absval{y_k}
^p\right) ^{1/p} \left(\sum _{k=1} ^n \absval{x_k + y_k} ^p\right)
^{1/q} \label{eq:minkowski_3}.\end{equation} Hence
(\ref{eq:minkowski_1}) gives
$$\begin{array}{lll}  \sum _{k = 1} ^n |x_k + y_k|^p &  \leq &  \left(\sum _{k=1} ^n
\absval{x_k} ^p\right) ^{1/p}\left(\sum _{k=1} ^n \absval{x_k + y_k}
^p\right) ^{1/q}  +
 \left(\sum _{k=1} ^n \absval{y_k} ^q\right) ^{1/q}\left(\sum _{k=1} ^n \absval{x_k + y_k} ^p\right) ^{1/q}\\
& = &   \left(\left(\sum _{k=1} ^n \absval{x_k} ^p\right) ^{1/p} +
\left(\sum _{k=1} ^n
\absval{y_k} ^p\right) ^{1/p}\right)\left(\sum _{k=1} ^n \absval{x_k + y_k} ^p\right) ^{1/q}\\
 ,


 \end{array}$$from where we deduce
the result.
\end{pf}



\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}
Prove that if $\alpha >0$ and $n>0$ an integer then
$$  \dfrac{n^{1+\alpha}-(n-1)^{1+\alpha}}{1+\alpha} <n^\alpha < \dfrac{(n+1)^{1+\alpha}-n^{1+\alpha}}{1+\alpha}.$$
Deduce that $$ \lim _{\ngroes} \dfrac{1^\alpha+2^\alpha+\cdots +
n^\alpha }{n^{1+\alpha}} = \dfrac{1}{1+\alpha}.
$$
\end{pro}
\end{multicols}

\section{Intermediate Value Property}
\begin{thm}[Intermediate Value
Theorem]\label{thm:intermediate-value} Let $I\subseteqq \BBR$ and
let $(a, b)\in I^2$. Let $f:I\rightarrow \BBR$ be a continuous
function such that $f(a)\leq f(b)$. Then $f$ attains every
intermediate value between $f(a)$ and $f(b)$, that is,
$$\forall t \in \lcrc{f(a)}{f(b)}, \exists c\in I, \qquad \mathrm{such \ that}\qquad f(c)=t.   $$
\end{thm}
\begin{pf}
Suppose on the contrary that there is a $t\in \lcrc{f(a)}{f(b)}$
such that for all $c\in I$, $f(c)\neq t$. Hence $f(a)<t<f(b)$.
Assume, without loss of generality, that $a<b$. Consider the sets
$$U = \loro{-\infty}{a}\cup \left\{x\in \lcrc{a}{b}: f(x)<t\right\}= \loro{-\infty}{a}\cup f^{-1}\left(\loro{-\infty}{t} \cap \loro{a}{b}\right),  $$
and
$$V = \loro{b}{+\infty}\cup \left\{x\in \lcrc{a}{b}: f(x)>t\right\}= \loro{b}{+\infty}\cup f^{-1}\left(\loro{t}{+\infty} \cap \loro{a}{b}\right).  $$
Then $U, V$ are open sets of $\BBR$ by virtue of Theorem
\ref{thm:continuous-iff-open-sets}. But then $\BBR = U \cup V$ and
$U\cap V = \varnothing$, $U\neq \varnothing$, $V\neq \varnothing$,
contradicting the fact that $\BBR$ is connected. Thus there must
exist a $c$ such that $f(c)=t$.
\end{pf}
\begin{cor} A continuous function defined on an
interval maps that interval into an interval.
\end{cor}
\begin{pf}
This follows at once from the Intermediate Value Theorem and the
definition of an interval.
\end{pf}
\begin{thm}[Bolzano's Theorem]If $f:\lcrc{u}{v}\rightarrow \BBR$ is
continuous and $f(u)f(v)<0$, then there is a $w\in \loro{u}{v}$ such
that $f(w)=0$.
\end{thm}
\begin{pf}
This follows at once from the Intermediate Value Theorem by putting
$a=\min (f(u), f(v))<0$ and $b=\max (f(u), f(v))>0$ .
\end{pf}
\begin{cor}
Every polynomial $p(x)\in \BBR[x]$   with real coefficients and odd
degree has at least one real root.
\end{cor}
\begin{pf}Let $p(x) = a_0  + a_1x+ a_2x^2+\cdots + a_nx^n$, with $a_n\neq
0$ and $n$ odd. Since $p$ has odd degree, $\lim _{x\rightarrow
-\infty}p(x) = (-\infty)\signum{a_n}$ and $\lim _{x\rightarrow
+\infty}p(x) = (+\infty)\signum{a_n}$, which are of opposite sign.
The polynomial must then attain positive and negative values and
between values of opposite sign, it will have a real root.
\end{pf}
\begin{cor}\label{cor:retention-of-sign-cont}
If $f$ is continuous at the point $a$ and $f(a)\neq 0$, then there
is a neighbourhood of $a$ where $f(x)$ has the same sign as $f(a)$.
\end{cor}
\begin{pf}
Take $\varepsilon = \dfrac{\absval{f(a)}}{2}>0 $ in the definition
of continuity. There is a $\delta > 0$ such that
$$ \absval{x-a}<\delta \implies \absval{f(x)-f(a)}< \dfrac{\absval{f(a)}}{2} \implies f(a)- \dfrac{\absval{f(a)}}{2} <f(x)< f(a)+ \dfrac{\absval{f(a)}}{2},
$$from where the result follows.
\end{pf}
\begin{thm} A continuous function defined on a compact set maps that compact set  into a compact set.
\label{thm:compact-to-compact}
\end{thm}
\begin{pf}
Let $f:X\rightarrow \BBR$ be continuous and $X\subseteqq \BBR$
compact. Let $\seq{y_n}{n=1}{+\infty}\subseteqq f(X)$ be an infinite
sequence of $f(X)$. There are $x_n\in X$ such that $x_n=f(y_n)$.
Since $\seq{x_n}{n=1}{+\infty}\subseteqq X$ is an infinite sequence
of $X$ and $X$ is compact, it has a convergent subsequence in $X$,
say, $\seq{x_{n_k}}{k=1}{+\infty}$  with $x_{n_k}\rightarrow x\in
X$, by virtue of Theorem
\ref{thm:equivalent-statements-for-compactness}. Since $f$ is
continuous
$$x_{n_k}\rightarrow x \implies f(x_{n_k})\rightarrow f(x).
$$Clearly $f(x)\in f(X)$. Thus the arbitrary sequence $\seq{y_n}{n=1}{+\infty}\subseteqq
f(X)$ has the convergent subsequence $\seq{y_{n_k}}{k=1}{+\infty}$
in $f(X)$, and one more appeal to Theorem
\ref{thm:equivalent-statements-for-compactness} proves compactness.
\end{pf}


\begin{thm}[Weierstrass Theorem]A continuous function $f:\lcrc{a}{b}\rightarrow
\BBR$ attains a maximum and a minimum on
$\lcrc{a}{b}$.\label{thm:weierstrass-max-min}

\end{thm}
\begin{pf}
By Theorem \ref{thm:compact-to-compact}, $f(\lcrc{a}{b})$ is
compact, and so, by the Heine-Borel Theorem, it is closed and
bounded. Thus there exists $(m, M)\in\BBR^2$ such that $m = \inf
_{x\in \lcrc{a}{b}}f(x)$ and $M=\sup _{x\in \lcrc{a}{b}}f(x)$. We
must prove that these are attained in $ \lcrc{a}{b}$, i.e., that
there exist $\mu \in \lcrc{a}{b}$ and $\mu ' \in \lcrc{a}{b}$ such
that $f(\mu)=m$ and $f(\mu ')=M$. By the Approximation Property of
the Infimum and the Supremum, we may find sequences $\seq{m
_n}{n=1}{+\infty}\subseteqq \lcrc{a}{b}$, and $\seq{M
_n}{n=1}{+\infty}\subseteqq \lcrc{a}{b}$ such that $m \leq m_n$ and
$ m_n \rightarrow m$,  and also, $M_n\leq M$, and $M _n \rightarrow
M$ as $\ngroes$. By the Intermediate Value Theorem, there exist $\mu
_n \in \lcrc{a}{b}$ and $\mu _n' \in \lcrc{a}{b}$ such that $m_n =
f(\mu _n)$ and $M_n = f(\mu _n')$. By the compactness of
$\lcrc{a}{b}$ the sequences $\seq{\mu _n}{n=1}{+\infty}\subseteqq
\lcrc{a}{b}$ and $\seq{\mu _n '}{n=1}{+\infty}\subseteqq
\lcrc{a}{b}$ have convergent subsequences $\seq{\mu
_{n_k}}{k=1}{+\infty}\subseteqq \lcrc{a}{b}$ and $\seq{\mu _{n_k}
'}{k=1}{+\infty}\subseteqq \lcrc{a}{b}$ such that $\mu
_{n_k}\rightarrow \mu\in \lcrc{a}{b}$ and $\mu _{n_k}' \rightarrow
\mu'\in \lcrc{a}{b}$. By continuity and the uniqueness of limits,

$$\mu _{n_k}
\rightarrow \mu \implies m_{n_k}=f(\mu _{n_k}) \rightarrow m =f(
\mu), \qquad \mathrm{and}\qquad \mu _{n_k} ' \rightarrow \mu '
\implies M_{n_k} =f(\mu _{n_k} ') \rightarrow M=f( \mu '),
$$and so $f$ attains both extrema in $\lcrc{a}{b}$.\end{pf}








\begin{thm}[Fixed Point Theorem]Let $f:\lcrc{a}{b}\rightarrow
\lcrc{a}{b}$ be continuous. Then $f$ has a fixed point, that is,
there is $c\in \lcrc{a}{b}$ such that $f(c)=c$.
\end{thm}
\begin{pf}
If either $f(a)=a$ or $f(b)=b$ we are done. Assume then that
$f(a)>a$ and $f(b)<b$. Put $g(x)=f(x)-x$. Then $g$ is continuous,
$g(a)>0$ and $g(b)<0$. By Bolzano's Theorem, there must be a $c\in
\loro{a}{b}$ such that $g(c)=0$, that is, $f(c)-c=0$, finishing the
proof.
\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Let $p(x), q(x)$ be polynomials with real coefficients such that $$
p(x^2+x+1) = p(x)q(x).$$Prove that $p$ must have even degree.
\begin{answer}
If $p$ had odd degree, then, by the Intermediate Value Theorem it
would have a real root. Let $\alpha$ be its largest real root. Then
$$
0=p(\alpha )q(\alpha )=p(\alpha ^2+\alpha+1)$$meaning that $\alpha
^2+\alpha+1>\alpha$ is a real root larger than the supposedly
largest real root $\alpha$, a contradiction.
\end{answer}
\end{pro}
\begin{pro}
A function $f$ defined over all real numbers is continuous and for
all real $x$ satisfies $$\left(f(x)\right) \cdot \left((f\circ
f)(x)\right) =1. $$Given that $f(1000)=999$, find $f(500)$.
\begin{answer}
Observe that $f(1000)f(f(1000)) = 1 \implies f(999) =
\dfrac{1}{999}$. So the range of $f$ include all numbers from
$\dfrac{1}{999}$ to $999$. By the intermediate value theorem, there
is a real number $a$ such that $f(a) = 500$. Thus
$$f(a)f(f(a)) = 1 \implies f(500) = \dfrac{1}{500}.  $$
\end{answer}
\end{pro}
\begin{pro}
Let $f:\BBR\rightarrow \BBR$ be a continuous function such that
$\lim _{x\rightarrow -\infty} f(x)=0=\lim _{x\rightarrow +\infty}
f(x)$. If $f$ is strictly negative somewhere on $\BBR$ then $f$
attains a finite absolute minimum on $\BBR$. If $f$ is strictly
positive somewhere on $\BBR$ then $f$ attains a finite absolute
maximum on $\BBR$.
\end{pro}
\begin{pro}
Let $f:\lcrc{0}{1}\rightarrow \lcrc{0}{1}$ be continuous. Prove that
there is no $c\in \lcrc{0}{1}$ such that $f^{-1}(\{c\})$ has exactly
two elements.
\end{pro}
\begin{pro}
Let $f, g$ be continuous functions from $\lcrc{0}{1}$ to
$\lcrc{0}{1}$ such that $$\forall x\in \lcrc{0}{1} \qquad f(g(x)) =
g(f(x)).$$Prove that $f$ and $g$ have a common fixed point in
$\lcrc{0}{1}$. \begin{answer}

\end{answer}
\end{pro}


\begin{pro}
A continuous function $f:\BBR \rightarrow \BBR$ satisfies
$$\forall x\in\BBR \qquad f(x+f(x))=f(x).  $$Prove that $f$ is
constant.
\end{pro}

\begin{pro}
Let $I$ be a closed and bounded interval on the line and let $f$ be
continuous on $I$. Suppose that for each $x \in I$, there exists a
$y \in I$ such that $$ |f(y)| \leq \frac{1}{2}|f(x)|.$$ Prove the
existence of a $t \in I$ such that $f(t ) = 0.$ \end{pro}
\begin{pro} Find all continuous functions that satisfy the functional
equation
$$ f(x) + f(y) = f\left(\frac{x + y}{1 - xy}\right),$$ for all $-1 < x, y < 1.$
\end{pro}
\begin{pro}[Putnam 1947] A real valued continuous function satisfies for all
real $x, y$ the functional equation $$ f(\sqrt{x^2 + y^2}) =
f(x)f(y) .$$ Prove that $f(x) = (f(x))^{x^2}$. \end{pro}
\begin{pro}Suppose that $f:\lcrc{0}{1} \rightarrow \lcrc{0}{1}$ is continuous.
Prove that there is a number $c$ in $\lcrc{0}{1}$ such that $f(c) =
1 - c$.
\begin{answer}
If either $f(0)=1$ or $f(1)=0$, we are done. So assume that $0\geq
f(0)<1$ and $0<f(1)\leq 1$. Put $g(x)=f(x)+x-1$. Then $g(0) =
f(0)-1<0$ and $g(1)=f(1)>0$. By Bolzano's Theorem there is a $c\in
\loro{0}{1}$ such that $g(c)=0$, that is, $f(c)+c-1=0$, as required.
\end{answer}

 \end{pro}
\begin{pro}[Universal Chord Theorem]\label{pro:universal-chord-thm} Suppose that $f$ is a continuous
function of $\lcrc{0}{1}$ and that $f(0) = f(1)$. Let
$n$ be a strictly positive integer. Prove that there is some number $x \in \lcrc{0}{1}$ such that $f(x) = f(x + 1/n).$ \\

\begin{answer}
Consider $g(x) = f(x) - f(x + 1/n)$, which is clearly continuous. If
$g$ is never $0$ in $\lcrc{0}{1}$ then by Corollary
\ref{cor:retention-of-sign-cont} $g$ must be either strictly
positive or strictly negative. But then
$$  0=f(0)-f(1)= \left(f(0)-f\left(\dfrac{1}{n}\right)\right) + \left(f\left(\dfrac{1}{n}\right)-f\left(\dfrac{2}{n}\right)\right)
+
\left(f\left(\dfrac{2}{n}\right)-f\left(\dfrac{3}{n}\right)\right)+
\cdots +
\left(f\left(\dfrac{n-1}{n}\right)-f\left(\dfrac{n}{n}\right)\right).
$$The sum of each parenthesis on the right is strictly positive or
strictly negative and hence never $0$, a contradiction.
\end{answer}


\end{pro} \begin{pro}  Under the same conditions of
problems \ref{pro:universal-chord-thm}  prove that there are no
universal chords of length $a, 0 < a < 1, a \neq 1/n.$

\begin{answer}
Consider the function $f:\lcrc{0}{1}\rightarrow \lcrc{0}{1}$, $x
\mapsto \dfrac{\sin \frac{2\pi x}{a}}{\sin \frac{2\pi}{a}}-x$.
\end{answer}
\end{pro}
\end{multicols}

\section{Variation of a Function and Uniform Continuity}
\begin{df}
A {\em partition} $\curlyP$ of the interval   $\lcrc{a}{b}$ is any
finite set of points $x_0, x_1, \ldots , x_n$ such that
$$ a=x_0<x_1<\cdots < x_n=b. $$ A partition $\curlyP '$ of
$\lcrc{a}{b}$ is said to be {\em finer} than the partition $\curlyP$
if $\curlyP \subseteqq \curlyP '$.

\end{df}
\begin{df}
The {\em mesh} or {\em norm} of $\curlyP$ is $$ \norm{\curlyP} =
\max _{1 \leq k \leq n} \absval{x_k-x_{k-1}}.$$
\end{df}
\begin{rem}
If $\curlyP  \subseteqq \curlyP '$ then clearly $\norm{\curlyP
'}\leq \norm{\curlyP }$, since the finer partition has probably more
points which will make the corresponding subintervals smaller.
\end{rem}

\begin{df}
Let $f$ be a bounded function on an interval $\lcrc{a}{b}$ and let
$I\subseteqq \lcrc{a}{b}$ be a subinterval. The {\em oscillation of
$f$ on $I$} is defined and denoted by
$$ \omega (f, I) = \sup _{x\in I}f(x) - \inf _{x\in I}f(x). $$
\end{df}

\begin{thm}\label{thm:oscillation-1}
Let $f:\lcrc{a}{b} \rightarrow \BBR$ be a continuous function. Given
$\varepsilon  > 0$ there exists a partition of $\lcrc{a}{b}$ into a
finite number of subintervals of equal length such that the
oscillation of $f$ on each of these subintervals is at most
$\varepsilon$.
\end{thm}
\begin{pf}Let  $P_\varepsilon$ mean the following: there is an $\varepsilon
>0$ such that for all partitions
of $\lcrc{a}{b}$ into a finite number of intervals of equal length,
the oscillation of $f$ is $\geq \varepsilon$. By bisecting
$\lcrc{a}{b}$, at least one of the halves must have property
$P_\varepsilon$, say $\lcrc{a_1}{b_1}$. If $\lcrc{a}{b}$ we to have
property $P_\varepsilon$, then  by bisecting $\lcrc{a_1}{b_1}$, at
least one of the halves must have property $P_\varepsilon$, say
$\lcrc{a_2}{b_2}$. Continuing in this way we have constructed a
sequence of imbricated intervals
$$\lcrc{a}{b} \supseteqq  \lcrc{a_1}{b_1}  \supseteqq  \lcrc{a_2}{b_2} \supseteqq  \cdots \supseteqq  \lcrc{a_n}{b_n}\supseteqq \cdots    $$
where the length of  $ \lcrc{a_n}{b_n}$ is $b_n-a_n=\dfrac{b-a}{2^n}
\rightarrow 0$ as $\ngroes$. By the Cantor Intersection Theorem,
there is a point $c\in \bigcap _{n=1} ^\infty \lcrc{a_n}{b_n}$.
Moreover, we have $\omega (f, \lcrc{a_n}{b_n}) \geq \varepsilon$.
Since $c\in \lcrc{a}{b}$, $f$ is continuous at $c$. Hence there is a
$\delta > 0$ such that
$$x\in \loro{c-\delta}{c+\delta}\implies \absval{f(x)-f(c)}<\dfrac{\varepsilon}{2}$$.
Taking $(x',x'')\in  \loro{c-\delta}{c+\delta}^2$ we have
$$ \absval{f(x')-f(x'')} \leq \absval{f(x')-f(c)} + \absval{f(c)-f(x'')} < \varepsilon,  $$
whence $$\omega (f, \lcrc{a}{b}\cap  \loro{c-\delta}{c+\delta}) <
\varepsilon.
$$
Now, if there was an $\varepsilon > 0$  such that for all partitions
of $\lcrc{a}{b}$ into a finite number of intervals of equal length,
the oscillation of $f$ is $\geq \varepsilon$, then by taking $n$
large enough above we could find one of the $\lcrc{a_n}{b_n}$
completely inside one of the subintervals of the partition. By the
above, the oscillation there would be $<\varepsilon$, a
contradiction.
\end{pf}
\begin{thm}\label{thm:oscillation-2}
Let $f:\lcrc{a}{b} \rightarrow \BBR$ be a continuous function. Given
$\varepsilon > 0$ there exists a $\delta >0$ such that on any
subinterval  $I\subseteqq \lcrc{a}{b}$ having length $<\delta$ the
oscillation of $f$ on $I$ is $<\varepsilon$.
\end{thm}
\begin{pf}Let $\delta = \dfrac{b-a}{n}$.
By Theorem \ref{thm:oscillation-1} we may choose $n$ so large that
the oscillation of $f$ on each of
\begin{equation}\lcrc{a}{a+\delta}, \quad
\lcrc{a+\delta}{a+2\delta}, \quad  \ldots \quad  , \quad
\lcrc{a+(n-1)\delta}{b},  \label{eq:subintervals-oscillation}
\end{equation} is $<\dfrac{\varepsilon}{2}$. Let $I\subseteqq
\lcrc{a}{b}$ be any subinterval of length $<\delta$ and let $x'\in
I$ be the point where $f$ achieves its largest value and $x''\in I$
be the point where $f$ achieves its smallest value. Then $x'$ and
$x''$ either belong to the same interval in
\ref{eq:subintervals-oscillation}---in which case $\absval{
f(x')-f(x'')}<\dfrac{\varepsilon}{2}$---or since $I$ has length
smaller than $\delta$, to two consecutive subintervals
$$\lcrc{a+(j-1)\delta}{a+j\delta}, \lcrc{a+j\delta}{a+(j+1)\delta}.  $$
In this case
$$ f(x')-f(x'') = (f(x')-f(a+j\delta)) + (f(a+j\delta)-f(x''))< \dfrac{\varepsilon}{2} + \dfrac{\varepsilon}{2} = \varepsilon.$$
The theorem now follows.\end{pf}

\begin{df}
A function $f$ is said to be {\em uniformly continuous} on
$\lcrc{a}{b}$ if $\forall \varepsilon > 0$ there exists $\delta > 0$
depending only on $\varepsilon$ such that for any $(u, v)\in
\lcrc{a}{b}^2 $,
$$ \absval{u-v}<\delta \implies \absval{f(u)-f(v)}<\varepsilon . $$
\end{df}

\begin{thm}\label{thm:semi-heine}
If $f:\lcrc{a}{b}\rightarrow \BBR$ is continuous, then $f$ is
uniformly continuous.
\end{thm}
\begin{pf}
This follows from Theorem \ref{thm:oscillation-2}.
\end{pf}
\begin{thm}[Heine's Theorem] \label{thm:heine}
If $f:X\rightarrow \BBR$ is continuous and $X$ is compact, then $f$
is uniformly continuous.
\end{thm}
\begin{pf}
This follows from Theorem \ref{thm:semi-heine}.
\end{pf}
\begin{thm}\label{thm:-x+y-mono}
Let $f$ be an increasing function on an open interval $\loro{a}{b}$.
Then, for any $x$ satisfying $a<x<b$, $$ \sup _{t\in \loro{a}{x}}
f(t) = f(x-) \leq f(x) \leq  \inf _{t\in \loro{x}{b}} f(t) = f(x+).
$$Moreover, if $a<x<y<b$,  then $f(x+)\leq f(y-)$.
\end{thm}
\begin{pf}
The set $\{f(t):a<u<x\}$ is bounded above by $f(x)$ and hence it has
a supremum $\sup _{t\in \loro{a}{x}} f(t)=A$ and clearly $A\leq
f(x)$ as $f$ is increasing. Let us shew that $A= f(x-)$. By the
Approximation Property of the Supremum, there is $\delta > 0$ such
that $a<x-\delta < x$ and $A-\varepsilon <f(x-\delta)\leq A$. But as
$f$ is increasing,
$$x-\delta <t < x \implies f(x-\delta) \leq f(t)<A \implies
\absval{f(x)-A},$$whence $f(x-)=A$.

\bigskip

A similar reasoning gives $ \inf _{t\in \loro{x}{b}} f(t) = f(x+)$.

\bigskip

Now, if $a<x<y<b$, then by what has already been proved we obtain
$$f(x+) = \inf _{x<t<b}f(t) =  \inf _{x<t<y}f(t),  $$
again, remembering that $f$ is increasing. Similarly,
$$f(y-) = \sup _{a<t<y}f(t) =  \inf _{x<t<y}f(t),  $$from where
$f(x+)\leq f(y-)$.
\end{pf}


\begin{thm}\label{thm:bounds-per-jump-monotone}
Let $f$ be an increasing function defined on the interval
$\lcrc{a}{b}$ and let
$$ a=x_0<x_1<x_2<\cdots < x_n=b $$be $n+1$ points partitioning the
interval. Then
$$\sum _{k=1} ^{n-1} \left(f(x_k +)-f(x_k-)\right)\leq f(b)-f(a).  $$
\end{thm}
\begin{pf}
Let $y_k\in \loro{x_k}{x_{k+1}}$. For $1 \leq k \leq n-1$, by
Theorem \ref{thm:x+y-mono},
$$f(x_k +)\leq f(y_k)\qquad \mathrm{and} \quad f(y_{k-1}) \leq f(x_k-) \implies f(x_k+)-f(x_k-) \leq f(y_k)-f(y_{k-1}).
$$Adding,
$$\sum _{k=1} ^{n-1}  \left(f(x_k+)-f(x_k-)\right) \leq \sum _{k=1} ^{n-1} \left(f(y_k)-f(y_{k-1})\right) = f(y_{n-1})-f(y_0).  $$
The proof is completed upon noticing that $ f(y_{n-1})-f(y_0) \leq
f(b)-f(a)$.
\end{pf}

\begin{thm}\label{thm:discontinuities-of-monotone-functions}
Let $f:\lcrc{a}{b}\rightarrow \BBR$ be a monotone function, Then the
set of points of discontinuity of $f$ is either finite or countable.
\end{thm}
\begin{pf}
Assume $f$ is increasing, for if $f$ were decreasing, we may apply
the same argument to $-f$. Let $m>0$ be an integer, and let
$$ \mathscr{S}_m = \left\{x\in \loro{a}{b}: f(x+)-f(x-)\geq \dfrac{1}{m}\right\}.
$$If $x_1<x_2<\cdots < x_{n}$ are in $\mathscr{S}_m$ then by Theorem
\ref{thm:bounds-per-jump-monotone}, $$ \dfrac{n}{m}\leq f(b)-f(a),
$$which implies that $\mathscr{S}_m $ is a finite set. The set of
discontinuities of $f$ in $\lcrc{a}{b}$ is $\bigcup _{m=1} ^\infty
\mathscr{S}_m$, the countable union of finite sets, and hence it is
countable.
\end{pf}

\begin{df}Let $f$ be a function defined on the interval
$\lcrc{a}{b}$ and let
$$ a=x_0<x_1<x_2<\cdots < x_n=b $$be $n+1$ points partitioning the
interval. If there exists $V>0$ such that
$$\sum _{k=1} ^n \absval{f(x_k)-f(x_{k-1})} \leq V  $$for all
partitions of $\lcrc{a}{b}$, the we say that {\em $f$ is of bounded
variation on $\lcrc{a}{b}$.}
\end{df}
\begin{thm}
If $f$ is monotonic on $\lcrc{a}{b}$, then $f$ is bounded variation
on $\lcrc{a}{b}$.
\end{thm}
\begin{pf}
Let
$$ a=x_0<x_1<x_2<\cdots < x_n=b $$be any partition of $\lcrc{a}{b}$.
Then
$$\sum _{n=1} ^{n} \absval{f(x_k)-f(x_{k-1})} = \max (f(b)-f(a), f(a)-f(b)),
$$the first choice occurring when $f$ is increasing and the second
when $f$ is decreasing. Then $V = \absval{f(b)-f(a)}$ satisfies the
definition of bounded variation for an arbitrary partition.
\end{pf}

\begin{thm}
If $f$ is of bounded variation on $\lcrc{a}{b}$ then $f$ is bounded
on $\lcrc{a}{b}$.
\end{thm}
\begin{pf}
Let $x\in\loro{a}{b}$ and consider the partition $a<x<b$ of
$\lcrc{a}{b}$. Since $f$ is of bounded variation there is a $V>0$
such that $$\absval{f(a)-f(x)}+\absval{f(x)-f(b)}\leq V.  $$ But
then $$\absval{f(x)} \leq \absval{f(x)-f(a)} + \absval{f(a)} \leq V
+ \absval{f(a)}.
$$and so $f$ is bounded by the constant quantity $V +
\absval{f(a)}$.
\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Shew that $\loro{0}{+\infty} \rightarrow \loro{0}{+\infty}$, $x
\mapsto x^2$, is not uniformly continuous.
\end{pro}
\end{multicols}

\section{Classical Limits}
\begin{lem}\label{lem:ineqs-for-exp}
If $0 < x \leq 1$ then $$1 \leq  \dfrac{\exp (x)-1}{x} \leq 1 +
x(e-2).$$ If $-\dfrac{1}{2}\leq x < 0$ then
$$ 1 + x\leq \dfrac{\exp (x)-1}{x} \leq 1 + \dfrac{x}{4}.   $$
\end{lem}
\begin{pf}

Since $\left(1+\dfrac{x}{n}\right)^n\leq \exp (x)$ for $n>-x$ by
Proposition \ref{prop:e^x}, we have $1+x\leq  \exp (x)$ for all
$x>-1$. Now, for $n \geq 2$ and $0<x\leq 1$,
$$\begin{array}{lll} \left(1+\dfrac{x}{n}\right)^n & = & 1 + \binom{n}{1}\dfrac{x}{n}+\binom{n}{2}\dfrac{x^2}{n^2}+\binom{n}{3}\dfrac{x^3}{n^3} + \cdots + \binom{n}{n}\dfrac{x^n}{n^n}\\
& = & 1 +
x+x^2\left(\dfrac{1}{2!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
+\dfrac{1}{3!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
\left(1-\dfrac{2}{n}\right)x +\cdots
+\dfrac{1}{n!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
\left(1-\dfrac{2}{n}\right)\cdots \left(1-\dfrac{n-1}{n}\right) x^{n-2}\right)\\
& \leq  & 1 +
x+x^2\left(\dfrac{1}{2!}+ \dfrac{1}{3!}+ \cdots + \dfrac{1}{n!}\right)\\
& < & 1+ x + x^2(e-2),
\end{array}
$$upon using Theorem \ref{thm:e-as-a-sum}. This proves the first set
of inequalities.

\bigskip
For $x>-2$, $1+x+\dfrac{x^2}{4}=\left(1+\dfrac{x}{2}\right)^2\leq
\exp (x)$ by Proposition \ref{prop:e^x}. Now we assume that
$-\dfrac{1}{2}\leq x \leq 0$. As before,

$$\begin{array}{lll} \left(1 + \dfrac{x}{n}\right)^n & = & 1 + x
+x^2\left(\dfrac{1}{2!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
+\dfrac{1}{3!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
\left(1-\dfrac{2}{n}\right)x +\cdots
+\dfrac{1}{n!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
\left(1-\dfrac{2}{n}\right)\cdots \left(1-\dfrac{n-1}{n}\right) x^{n-2}\right).\\
\end{array}$$
Since $x^k \leq 0$ for odd $k$ and $x^k \leq \dfrac{1}{2^k}$ for
even $k$ we may delete the odd terms from the dextral side and so
$$\begin{array}{lll} \left(1 + \dfrac{x}{n}\right)^n & \leq & 1 + x
+x^2\left(\dfrac{1}{2!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
+0 +\cdots
+\dfrac{1}{n!}\left(\dfrac{1}{n}\right)\left(1-\dfrac{1}{n}\right)
\left(1-\dfrac{2}{n}\right)\cdots \left(1-\dfrac{2k-1}{n}\right) x^{2k}+ \cdots\right)\\
& \leq & 1 + x + x^2 \left(\dfrac{1}{2}+\dfrac{1}{2^2} + \cdots
\right)\\
& \leq & 1 + x + x^2.
\end{array}$$On taking limits $\exp (x)\leq 1 + x + x^2$ for $-\dfrac{1}{2}\leq x\leq 0$.
Thus we have
$$-\dfrac{1}{2}\leq x< 0\implies 1+x+\dfrac{x^2}{4}\leq \exp (x) \leq 1 + x + x^2 \implies 1+x\leq \dfrac{\exp (x)-1}{x}\leq 1 + \dfrac{x}{4},
$$since division by negative $x$ reverses the sense of the
inequalities.
\end{pf}

\begin{thm}$\lim _{x\rightarrow 0} \dfrac{\exp (x)-1}{x}=1$.
\label{thm:limit(expx-1)/x}\end{thm}
\begin{pf}
We prove that $\lim _{x\rightarrow 0+} \dfrac{\exp (x)-1}{x}=1$ and
that $\lim _{x\rightarrow 0-} \dfrac{\exp (x)-1}{x}=1$. Let us start
with the first assertion. For $0<x\leq 1$ we have, by the Sandwich
Theorem, and Lemma \ref{lem:ineqs-for-exp},
$$1 \leq  \dfrac{\exp (x)-1}{x} \leq 1 + x(e-2) \implies  \lim _{x\rightarrow 0+} \dfrac{\exp (x)-1}{x}=1,$$
proving the first assertion.

\bigskip

For $-\dfrac{1}{2}\leq x\leq 0$ we have, by the Sandwich Theorem and
Lemma \ref{lem:ineqs-for-exp},
$$ 1 + x + \dfrac{x^2}{4} \leq \exp (x) \leq 1 + x + x^2 \implies 1 +\dfrac{x}{4}\leq  \dfrac{\exp (x)-1}{x} \leq 1 + x \implies  \lim _{x\rightarrow 0-} \dfrac{\exp (x)-1}{x}=1,$$
proving the second assertion.
\end{pf}
\begin{lem}\label{lem:ineqs-for-logs}
For $0 < x \leq 1$, $$  1 - \dfrac{x(e-2)}{1+x}\leq \dfrac{\log
(1+x)}{x}\leq  1$$ and for $-\dfrac{1}{2}\leq x \leq 0$,
$$1  \leq \dfrac{\log (1+x)}{x}\leq 1- \dfrac{x}{1+x}.  $$
\end{lem}
\begin{pf}
Since  $x\mapsto \log (1+x)$ is strictly increasing, we have by
Lemma \ref{lem:ineqs-for-exp} for $0<x\leq 1$,
$$1 + x \leq \exp (x)\leq 1 + x + x^2(e-2) \implies \log (1+x) \leq x \leq \log (1+x+x^2(e-2)).  $$
Notice that we have established that $\log (1+x)\leq x$ for $0<x\leq
1$.   Now
$$ \log (1+x+x^2(e-2)) = \log (1+x)\left(1+ \dfrac{x^2(e-2)}{1+x}\right) = \log (1+x) + \left(1+ \dfrac{x^2(e-2)}{1+x}\right).$$
Since for $x>0$, $x\mapsto \dfrac{x^2}{1+x}$ is strictly increasing,
$\dfrac{x^2(e-2)}{1+x} <\dfrac{e-2}{2}<1$ for $0<x<1$. Thus we may
use $\log (1+y)\leq y$, \quad $0\leq y \leq 1 $ with $y=
\dfrac{x^2(e-2)}{1+x} $ obtaining
$$\log \left(1+ \dfrac{x^2(e-2)}{1+x}\right) \leq \dfrac{x^2(e-2)}{1+x}. $$
Hence $$x\leq \log (1+x+x^2(e-2)) \leq \log (1+x) +
\dfrac{x^2(e-2)}{1+x}.
$$
In conclusion,
$$0<x\leq 1 \implies \log (1+x)\leq x \leq \log (1+x) + \dfrac{x^2(e-2)}{1+x} \implies 1 - \dfrac{x(e-2)}{1+x} \leq \dfrac{\log (1+x)}{x} \leq 1.$$
Similarly, for $-\dfrac{1}{2}\leq x < 0$, by Lemma
\ref{lem:ineqs-for-exp},
$$1+x+\dfrac{x^2}{4} \leq \exp (x) \leq 1+x+x^2 \implies \log\left(1+x+\dfrac{x^2}{4}\right) \leq x\leq \log(1+x+x^2).  $$
Since $x\mapsto \log (1+x)$ is increasing, plainly
$$\log (1+x)\leq \log \left(1+x+\dfrac{x^2}{4}\right) \leq x.  $$

\bigskip

Now observe that  $-\dfrac{1}{2}\leq x < 0\implies 0<
\dfrac{x^2}{1+x}\leq \dfrac{1}{2}<1 $ and so
$$ \log (1+x+x^2) = \log (1+x)+\log \left(1+\dfrac{x^2}{1+x}\right)\leq  \log (1+x)+\dfrac{x^2}{1+x} \implies x\leq \log (1+x)+
\dfrac{x^2}{1+x}.
$$ In conclusion,
$$-\dfrac{1}{2}\leq x < 0
\implies \log (1+x)\leq x \leq \log (1+x)+  \dfrac{x^2}{1+x}
\implies 1  \leq \dfrac{\log (1+x)}{x}\leq 1- \dfrac{x}{1+x},
$$since division by negative $x$ reverses the sense of the
inequalities.
\end{pf}

\begin{thm}$\lim _{x\rightarrow 0} \dfrac{\log (1+x)-x}{x}=0$.
\label{thm:limit(log(1+x)-x/x}
\end{thm}
\begin{pf}
By Lemma \ref{lem:ineqs-for-logs}, for $0<x\leq 1$,
 $$  1 - \dfrac{x(e-2)}{1+x}\leq \dfrac{\log
(1+x)}{x}\leq  1 \implies \lim _{x\to 0+} \dfrac{\log (1+x)}{x}=1,$$
by the Sandwich Theorem. Again, by Lemma \ref{lem:ineqs-for-logs}
and the Sandwich Theorem,  $$-\dfrac{1}{2}\leq x \leq 0 \implies 1
\leq \dfrac{\log (1+x)}{x}\leq 1- \dfrac{x}{1+x} \implies \lim
_{x\to 0-} \dfrac{\log (1+x)}{x}=1.  $$ Combining both results, the
theorem follows.\end{pf} \vspace{2cm}
\begin{figure}[h]
$$\psset{unit=4pc}
\pscircle(0,0){1}\psline(0,0)(1,1)(1,0)(0,0)\uput[r](.2,.1){\theta}
\uput[dl](0,0){O}\uput[ur](1,0){A}
\uput[u](1,1){B}\psdots[dotstyle=*,dotscale=1](0.70710678118654752440084436210485,
0.70710678118654752440084436210485)(1,0)(1,1)(0,0)
\uput[u](0.70710678118654752440084436210485,
0.70710678118654752440084436210485){C}
 \psline(0.70710678118654752440084436210485,
0.70710678118654752440084436210485)(1,0)
\psaxes[labels=none,ticks=none]{->}(0,0)(-1.5,-1.5)(1.5,1.5)
$$\vspace{2cm}\hangcaption{Theorem \ref{thm:limit-sinx/x}.}\label{fig:limit-sinx/x}
\end{figure}


\begin{thm}If $a\in \BBR$, then $\lim _{x\rightarrow 0} \dfrac{(1+x)^a-1}{x}=a$.
\label{thm:limit((1+x)a-1)/x}
\end{thm}
\begin{pf}This is evident for $a=0$. Assume now $a\neq 0$.
Since $x\mapsto \exp (x)$ is continuous and since $a\log (1+x)\to 0$
as $x\to 0$, by Theorems \ref{thm:limit(expx-1)/x} and
\ref{thm:limit(log(1+x)-x/x},
$$ \lim _{x\rightarrow 0} \dfrac{(1+x)^a-1}{x} = a\lim _{x\to 0} \dfrac{\exp (a\log (1+x))-1}{a\log (1+x)}\cdot \lim _{x\to 0} \dfrac{\log (1+x)}{x} = a\cdot 1 \cdot 1 =a. $$
\end{pf}


\begin{thm}$\lim _{\theta\rightarrow 0} \dfrac{\sin
\theta}{\theta}=1$.\label{thm:limit-sinx/x}
\end{thm}
\begin{pf}
We first prove that $\lim _{\theta\to 0+}\dfrac{\sin
\theta}{\theta}=1$. Since $\theta\mapsto \dfrac{\sin
\theta}{\theta}$ is an even function it will also  follow that $\lim
_{\theta\to 0-}\dfrac{\sin \theta}{\theta}=1$.

\bigskip
Assume $0<\theta <\dfrac{\theta}{2}$ and consider $\triangle OAB$
right-angled at $A$, with $OA =1$ and $\angle BOA =\theta$. $C$ is
the point where line $OB$ meets the unit circle with centre at $O$
and $D$ is its perpendicular projection. The area of $\triangle OAC$
is smaller than the area of the circular sector $OAC$, which is
smaller than the area of $\triangle OAB$. Hence
$$\dfrac{1}{2}\sin \theta < \dfrac{\theta }{2}< \dfrac{1}{2}\tan \theta
\implies \dfrac{1}{\cos \theta} <\dfrac{\sin \theta}{\theta} <  1
\implies \lim _{\theta \to 0+}\dfrac{\sin\theta}{\theta}=1$$by the
Sandwich Theorem, proving the theorem.\end{pf}
\chapter{Differentiable Functions}
\section{Derivative at a Point}
\begin{df}
Let $I$ be an interval,  $a\in \interiorone{I}$, and $f:I\rightarrow
\BBR$. We say that $f$ is differentiable at $a$ if the limit
$$\lim _{x\rightarrow a} \dfrac{f(x)-f(a)}{x-a}=\lim _{h\rightarrow 0} \dfrac{f(a+h)-f(a)}{h}  $$exists and is
finite. In such a case we denote this limit by $f'(a)$, $Df(a)$, or
$\dfrac{\d{f}}{\d{x}}(a)$ and we call this quantity  {\em the
derivative of $f$ at $a$.}
\end{df}
\begin{df}
Let $I$ be an interval,  $a\in \interiorone{I}$, and $f:I\rightarrow
\BBR$. If
$$\lim _{x\rightarrow a+} \dfrac{f(x)-f(a)}{x-a}=\lim _{h\rightarrow 0+} \dfrac{f(a+h)-f(a)}{h}  $$exists and is
finite we say that {\em $f$ is differentiable at $a$ on the right}
and write $f_+ '(a)$ for this limit. If
$$\lim _{x\rightarrow a-} \dfrac{f(x)-f(a)}{x-a}=\lim _{h\rightarrow 0-} \dfrac{f(a+h)-f(a)}{h}  $$exists and is
finite we say that {\em $f$ is differentiable at $a$ on the left}
and write $f_- '(a)$ for this limit.
\end{df}
\begin{thm}
Let $I$ be an interval,  $a\in \interiorone{I}$, and $f:I\rightarrow
\BBR$. Then $f$ is differentiable at $a$ if and only if both
$f_+(a)$ and $f_-(a)$ exist and are equal. In this case $f_+(a) =
f'(a)=f_-(a)$.
\end{thm}
\begin{pf}
Obvious.
\end{pf}
\begin{thm}
Let $I$ be an interval,  $a\in \interiorone{I}$, and $f:I\rightarrow
\BBR$. If $f$ is differentiable at $a$ then it is continuous at $a$.
\end{thm}
\begin{pf}
We have
$$ \lim _{h\rightarrow 0} f(a+h)-f(a) = \lim _{h\rightarrow 0} \left(\dfrac{f(a+h)-f(a)}{h}\right)h =
\left(\lim _{h\rightarrow 0} \dfrac{f(a+h)-f(a)}{h}\right)\left(\lim
_{h\rightarrow 0} h\right) = f'(a)\cdot 0 = 0.
$$
Thus $\lim _{h\rightarrow 0} f(a+h)-f(a) = 0 \implies \lim
_{h\rightarrow 0} f(a+h)=f(a)$ and so $f$ is continuous.
\end{pf}

\begin{thm}
Let $I\subseteqq \BBR$ be an interval. If $f:I\rightarrow \BBR$ is
identically constant, then $f'(I) = 0$.
\label{thm:derivative-of-constant}\end{thm}
\begin{pf}Assume that $f(I)=K$, a constant.
Let $c\in\interiorone{I}$. Then $f'(c) = \lim _{x\rightarrow c}
\dfrac{f(x)-f(c)}{x-c} =\lim _{x\rightarrow c} \dfrac{K-K}{x-c}=0.$
If $c$ is an endpoint of $I$, then the argument is modified to be
either the left or right derivative.
\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Let $f:\BBR\rightarrow \BBR$,
$$f(x) \left\{ \begin{array}{ll}  x+1 & \mathrm{if}\ x\in \BBQ \\
2-x & \mathrm{if}\ x\in \BBR\setminus \BBQ\end{array}\right.  $$
Prove that $f$ is nowhere differentiable. \end{pro}
\begin{pro}
Let $f:\BBR \rightarrow \BBR$, $x\mapsto \absval{x}$. Prove that $f$
is not differentiable at $x=0$ and that for $x\neq 0$, $f'(x) =
\signum{x}$.
\end{pro}
\begin{pro}
Let $f:\BBR \rightarrow \BBR$, $x\mapsto x\absval{x}$. Determine
whether $f'(0)$ exists.
\end{pro}
\end{multicols}

\section{Differentiation Rules}
\begin{thm}\label{thm:differentiation-rules}
Let $I$ be an interval,  $a\in \interiorone{I}$, $\lambda \in \BBR$
a constant, and $f, then , g:I\rightarrow \BBR$. If $f$ and $g$ are
differentiable at $a$ then \begin{enumerate}
\item {\bf (Linearity Rule)} $f+\lambda g$ is differentiable at $a$ and $(f+\lambda g)'(a)=f'(a) + \lambda g'(a)$
\item {\bf (Product Rule)} $fg$ is differentiable at $a$ and $(fg)'(a)=f'(a)g(a) + f(a)g'(a)$
\item if $g(a)\neq 0$, $\dfrac{1}{g}$ is differentiable at $a$ and $\left(\dfrac{1}{g}\right)'(a)=-\dfrac{g'(a)}{(g(a))^2}$
\item {\bf (Quotient  Rule)} if $g(a)\neq 0$, $\dfrac{f}{g}$ is differentiable at $a$ and $\left(\dfrac{f}{g}\right)'(a)=\dfrac{f'(a)g(a)-f(a)g'(a)}{(g(a))^2}$
\end{enumerate}
\end{thm}

\begin{pf}
\begin{enumerate}
\item This follows by the linearity of limits.
\item We have $$\begin{array}{lll}(fg)'(a) & = & \lim _{h\rightarrow 0}
\dfrac{(fg)(a+h)-(fg)(a)}{h}\\
 & = & \lim _{h\rightarrow 0}
\dfrac{g(a+h)(f(a+h)-f(a))+f(a)(g(a+h)-g(a))}{h}\\
& = &  \lim _{h\rightarrow 0}g(a+h)\lim _{h\rightarrow 0}
\dfrac{(f(a+h)-f(a))}{h}+ \lim _{h\rightarrow 0}f(a)\lim _{h\rightarrow 0}\dfrac{(g(a+h)-g(a))}{h}\\
& = & g(a)f'(a)+f(a)g'(a),
 \end{array}$$as desired.
\item We have  $$\begin{array}{lll}\left(\dfrac{1}{g}\right)'(a) & = & \lim _{h\rightarrow 0}
\dfrac{\dfrac{1}{g(a+h)}-\dfrac{1}{g(a)}}{h}\\
 & = & \lim _{h\rightarrow 0}
\dfrac{\dfrac{g(a)-g(a+h)}{g(a+h)g(a)}}{h}\\
& = & \lim _{h\rightarrow 0}
\dfrac{g(a)-g(a+h)}{h}\lim _{h\rightarrow 0} \dfrac{1}{g(a+h)g(a)}\\
& = & \left(-g'(a)\right)  \left(\dfrac{1}{g(a)g(a)}\right)\\
& = &  -\dfrac{g'(a)}{g(a)^2},\\
\end{array}$$as desired.
\item Using (2) and (3),
$$\begin{array}{lll}\left(\dfrac{f}{g}\right)'(a) & = & f'(a)\left(\dfrac{1}{g}\right)(a) + f(a)\left(\dfrac{1}{g}\right)'(a)\\
& = & \dfrac{f'(a)}{g(a)} -\dfrac{f(a)g'(a)}{g(a)^2}\\
& = & \dfrac{f'(a)g(a)-f(a)g'(a)}{(g(a))^2},
\end{array} $$as desired.


\end{enumerate}
\end{pf}
\begin{thm}[Chain Rule] Let $I, J$ be intervals of $\BBR$, with $a\in
I$. Let $f:I\rightarrow \BBR$ and $g:J\rightarrow \BBR$ be such that
$f(I)\subseteqq J$. If $f$ is differentiable at $a$ and $g$ is
differentiable at $f(a)$, then $g\circ f$ is differentiable at $a$
and $(g\circ f)' = g'(f(a))f'(a)$.\label{thm:chain-rule}
\end{thm}
\begin{pf}Put $b = f(a)$, and
$$\varphi(y) = \left \{ \begin{array}{ll}
\frac{g(y) - g(b)}{y-b} & \textrm{if $y \neq b$} \\
g'(b) & \textrm{if $y = b$}
\end{array} \right.
$$

Since $g$ is differentiable at $b$, $\varphi$ is continuous at
$y=b$. Now, for $x \neq a$,
$$\dfrac{g(f(x))-g(f(a))}{x-a} = \varphi(f(x)) \dfrac{f(x) - f(a)}{x-a}. $$
(If  $f(x) \neq f(a)$ this  follows directly from the definition of
$\varphi$. If $f(x) = f(a)$, both sides of the equality are $0$.)


By the continuity of  $f$ at $a$ and of $\varphi$ at $b$,
$$\lim_{x \to a} \varphi(f(x)) = \varphi(f(a)) = g'(f(a)), $$
whence
\begin{eqnarray*}
(g \circ f)'(a) &=& \lim_{x\to a} \frac{g(f(x))-g(f(a))}{x-a} \\
&=& \lim_{x\to a} \varphi(f(x)) \frac{f(x) - f(a)}{x-a} \\
&=& g'(f(a))f'(a),
\end{eqnarray*}as desired.


\end{pf}
\begin{thm}[Inverse Function Rule] Let $I$ be an interval of $\BBR$, with $a\in
I$. Let $f:I\rightarrow \BBR$ be strictly monotonic and continuous
over $I$. If $f$ is differentiable at $a$ and $f'(a) \neq 0$, then
the inverse $f^{-1}: f(I)\rightarrow \BBR$ is differentiable at
$f(a)$ and $$ (f^{-1})'(f(a))=\dfrac{1}{f'(a)}. $$
\end{thm}
\begin{pf} Put  $b= f(a)$.
Observe that $ \lim _{ y\rightarrow b} f^{-1}(y) = a$, and by the
composition rule for limits,
$$ \lim _{ y\rightarrow b} \dfrac{f^{-1}(y)-f^{1}(b)}{y-a} =
\lim _{ y\rightarrow b} \dfrac{f^{-1}(y)-a}{f(f^{-1}(y))-a} =
\dfrac{1}{f'(a)},$$proving the theorem.
\end{pf}
\begin{rem}
Once it is known that $ (f^{-1})'$ exists, we may proceed as
follows. Since $f^{-1}(f(x))=x$, differentiating on both sides,
using the Chain Rule on the sinistral side,
$$ (f^{-1})'(f(x))f'(x) =1, $$from where the result follows.
\end{rem}

\begin{df} Let $I$ be an interval of $\BBR$. Let $f:I\rightarrow \BBR$ be
differentiable at every point of $I$. The function $f':I\rightarrow
\BBR$, $x\mapsto f'(x)$ is called the {\em derivative function} or
{\em derivative} of the function $f$.
\end{df}

\begin{thm} Let $n\geq 0$ be an integer. Let $f:\BBR\rightarrow
\BBR$, $x\mapsto x^n$. Then $f$ is everywhere differentiable and
$f':\BBR\rightarrow \BBR$ is given by $x\mapsto nx^{n-1}$.
\label{thm:power-rule-1}
\end{thm}
\begin{pf}
Assume first $n$ is strictly positive. By Theorem
\ref{thm:diffbinom},
$$\begin{array}{lll}\lim _{x\rightarrow a} \dfrac{x^n-a^n}{x-a} & = &  \lim _{x\rightarrow a}
\dfrac{(x-a)(x^{n-1}+ax^{n-2}+a^2x^{n-3}+\cdots + a^{n-2}x +
a^{n-1})}{x-a} \\ & = &    \lim _{x\rightarrow a}
(x^{n-1}+ax^{n-2}+a^2x^{n-3}+\cdots + a^{n-2}x + a^{n-1}) \\ &  = &
na^{n-1}.\end{array}
$$Observe that this is true for all $a\in \BBR$.

\bigskip

If $n=0$ then $f$ is constant, say $f(x)=K$ for all $x$ and so
$$ \lim _{x\rightarrow a} \dfrac{f(x)-f(a)}{x-a} =  \lim _{x\rightarrow a} \dfrac{K-K}{x-a} = 0. $$

\end{pf}

\begin{thm}\label{thm:power-rule-2}
Let  $n>0$ be an integer and $f:\loro{0}{+\infty} \rightarrow
\loro{0}{+\infty}$, $x\mapsto \dfrac{1}{x^n}$. Then $f'$ exists
everywhere in $\loro{0}{+\infty}$ and $f':\loro{0}{+\infty}
\rightarrow \loro{0}{+\infty}$ is given by $f'(x) =
-\dfrac{n}{x^{n+1}}$.
\end{thm}
\begin{pf}
We use the result above,  part (3) of Theorem
\ref{thm:differentiation-rules}, and the Chain Rule, to get
$$\dfrac{\d{}}{\d{x}}\dfrac{1}{x^{n}} = -\dfrac{nx^{n-1}}{(x^n)^2} = -\dfrac{n}{x^{n+1}},   $$
and the theorem follows.\end{pf}
\begin{lem}\label{lem:power-rule}
Let  $q\in\BBZ$, $q>0$ be an integer, and $f:\loro{0}{+\infty}
\rightarrow \loro{0}{+\infty}$, $x\mapsto x^{1/q}$. Then $f'$ exists
everywhere in $\loro{0}{+\infty}$ and $f':\loro{0}{+\infty}
\rightarrow \loro{0}{+\infty}$ is given by $f'(x) =
\dfrac{x^{1/q-1}}{q}$.
\end{lem}
\begin{pf}
We have $(f(x))^q=x$. Using the Chain Rule $qf'(x)(f(x))^{q-1}=1$.
Since $f(x)\neq 0$,
$$ f'(x) = \dfrac{1}{q(f(x))^{q-1}} = \dfrac{1}{q(x^{1/q})^{q-1}} = \dfrac{1}{q}x^{1/q-1}. $$

\end{pf}
\begin{thm}\label{thm:power-rule-3}
Let  $r\in \BBQ$ and let $f:\loro{0}{+\infty} \rightarrow
\loro{0}{+\infty}$, $x\mapsto x^r$. Then $f'$ exists everywhere in
$\loro{0}{+\infty}$ and $f':\loro{0}{+\infty} \rightarrow
\loro{0}{+\infty}$ is given by $f'(x) = rx^{r-1}$.
\end{thm}
\begin{pf}
Let $r=\dfrac{a}{b}$, where $a, b$ are integers, with $b>0$. We use
the Chain Rule, Lemma \ref{lem:power-rule}, and Theorem
\ref{thm:power-rule-2}. Then
$$\dfrac{\d{}}{\d{x}}x^{a/b} =  \dfrac{\d{}}{\d{x}}(x^{1/b})^a = a(x^{1/b})^{a-1}\cdot \dfrac{1}{b}x^{1/b-1} = \dfrac{a}{b}x^{a/b-1} = rx^{r-1}, $$
proving the theorem.

\end{pf}


\begin{thm}[Derivative of the Exponential Function]
Let $\exp :\BBR\rightarrow \BBR$, $x\mapsto e^x$. Then $\exp$ is
everywhere differentiable and $\exp ':\BBR\rightarrow \BBR$ is given
by $x\mapsto e^x$. \label{thm:derivative-of-exp}
\end{thm}
\begin{pf}
Using Theorem \ref{thm:limit(expx-1)/x}, we have, with $h=x-a$,
$$\begin{array}{lll} \lim _{x\rightarrow a}\dfrac{e^x-e^a}{x-a} & = & e^a\lim _{x\rightarrow a}\dfrac{e^{x-a}-1}{x-a}\\
&= & e^a \lim _{h\rightarrow 0} \dfrac{e^h-1}{h}\\
& = & e^a\cdot 1\\
& =& e^a. \end{array}$$
\end{pf}
\begin{thm}[Derivative of the Logarithmic Function]\label{thm:derivative-of-log}
Let  $f:\loro{0}{+\infty} \rightarrow \loro{-\infty}{+\infty}$,
$x\mapsto \log x$. Then $f'$ exists everywhere in
$\loro{0}{+\infty}$ and $f':\loro{0}{+\infty} \rightarrow
\BBR\setminus \{0\}$ is given by $f'(x) = \dfrac{1}{x}$.
\end{thm}
\begin{pf}
Let $a>0$. Then, with $h=\dfrac{x}{a}-1$, and using Theorem
\ref{thm:limit(log(1+x)-x/x},
$$\begin{array}{lll} \lim _{x\rightarrow a}\dfrac{\log x-\log a}{x-a} & = & \lim _{x\rightarrow a}\dfrac{\log \dfrac{x}{a}}{x-a}\\
& = & \dfrac{1}{a}\cdot \lim _{x\rightarrow a}\dfrac{\log \left(1+\dfrac{x}{a}-1\right)}{\dfrac{x}{a}-1}\\
&= &  \dfrac{1}{a}\cdot  \lim _{h\rightarrow 0} \dfrac{\log (1+h)}{h}\\
& = &  \dfrac{1}{a}\cdot 1\\
& =&  \dfrac{1}{a}. \end{array}$$
\end{pf}
\begin{thm}[Power Rule]\label{thm:power-rule-4}
Let  $t\in \BBR$ and let $f:\loro{0}{+\infty} \rightarrow
\loro{0}{+\infty}$, $x\mapsto x^t$. Then $f'$ exists everywhere in
$\loro{0}{+\infty}$ and $f':\loro{0}{+\infty} \rightarrow
\loro{0}{+\infty}$ is given by $f'(x) = tx^{t-1}$.
\end{thm}
\begin{pf}
Using the Chain Rule,
$$ \dfrac{\d{}}{\d{x}}x^t = \dfrac{\d{}}{\d{x}}\left(\exp (t\log x)\right) = \dfrac{t}{x}\cdot \left(\exp (t\log x)\right) = \dfrac{t}{x}\cdot x^t = tx^{t-1}.  $$

\end{pf}
\begin{thm}[Derivative of $\sin$].
Let $\sin :\BBR\rightarrow \BBR$, $x\mapsto \sin x$. Then $\sin$ is
everywhere differentiable and $\sin ':\BBR\rightarrow \BBR$ is given
by $x\mapsto \cos x$. \label{thm:derivative-of-sin}
\end{thm}
\begin{pf}
We make a change of variables, and use Theorem
\ref{thm:limit-sinx/x},
$$\begin{array}{lll}  \lim _{x\rightarrow a}\dfrac{\sin x-\sin a}{x-a} & = & \lim _{x\rightarrow a}\dfrac{\sin (x-a+a)-\sin a}{x-a}\\
 & = & \lim _{x\rightarrow a}\dfrac{\sin (x-a)\cos a+\cos(x-a)\sin a-\sin a}{x-a}\\
 & = & (\cos a)\lim _{x\rightarrow a}\dfrac{\sin (x-a)}{x-a} + (\sin a)\lim _{x\rightarrow a}\dfrac{\cos(x-a)-1}{x-a}\\
 & = & (\cos a)\lim _{h\rightarrow 0}\dfrac{\sin h}{h} + (\sin a)\lim _{h\rightarrow 0}\dfrac{\cos h-1}{h}\\
 & = & (\cos a)\cdot 1 + (\sin a)\lim _{h\rightarrow 0}\dfrac{\cos^2 h-1}{h(\cos h +1)}\\
 & = & (\cos a)\cdot 1 + (\sin a)\lim _{h\rightarrow 0}\dfrac{-\sin^2h}{h(\cos h +1)}\\
& = & (\cos a) + (\sin a)\lim _{h\rightarrow 0}\dfrac{\sin h}{h}\cdot \lim _{h\rightarrow 0}\dfrac{-\sin h}{\cos h +1}\\
& = & \cos a,
\end{array}  $$and the theorem follows.
\end{pf}


\begin{thm}[Derivatives of the Goniometric
Functions]\label{thm:derivatives-goniometric}
$$ \begin{array}{lllll}1. & \dfrac{\d{}}{\d{x}} \sin x & = & \cos x & x\in \BBR \\
2. & \dfrac{\d{}}{\d{x}} \cos x & = & \sin x & x\in \BBR \\
3. &  \dfrac{\d{}}{\d{x}} \tan x & = & \sec^2 x & x\in \BBR \setminus (2\BBZ+1)\dfrac{\pi}{2}\\
4. &  \dfrac{\d{}}{\d{x}} \sec x & = & \sec x\tan x & x\in \BBR \setminus (2\BBZ+1)\dfrac{\pi}{2}\\
5. &  \dfrac{\d{}}{\d{x}} \csc x & = & -\csc x\cot x & x\in \BBR \setminus \BBZ\pi\\
6. & \dfrac{\d{}}{\d{x}} \cot x & = & -\csc^2x & x\in \BBR \setminus \BBZ\pi\\
 \end{array}$$

\end{thm}
\begin{pf}
(1) is Theorem \ref{thm:derivative-of-sin}. To prove (2), observe
that
$$\dfrac{\d{}}{\d{x}} \cos x  = \dfrac{\d{}}{\d{x}} \sin \left(\dfrac{\pi}{2}-x\right) = -\cos \left(\dfrac{\pi}{2}-x\right)  = -\sin x. $$
To prove (3), we use the Quotient Rule,
$$\dfrac{\d{}}{\d{x}} \tan x = \dfrac{\d{}}{\d{x}} \dfrac{\sin x}{\cos x} = \dfrac{(\cos x)(\cos x)-(-\sin x)(\sin x)}{\cos^2x} =\dfrac{1}{\cos^2x}=\sec^2x. $$
To prove (4), we use once again the Quotient Rule,
$$\dfrac{\d{}}{\d{x}} \sec x = \dfrac{\d{}}{\d{x}} \dfrac{1}{\cos x} = \dfrac{(0)(\cos x)-(-\sin x)(1)}{\cos^2x} =\dfrac{\sin x}{\cos^2x}=\sec x\tan x. $$
To prove (5), observe that
$$\dfrac{\d{}}{\d{x}} \csc x  = \dfrac{\d{}}{\d{x}} \sec \left(\dfrac{\pi}{2}-x\right) = -\sec \left(\dfrac{\pi}{2}-x\right)\tan \left(\dfrac{\pi}{2}-x\right)
= -\csc x\cot x. $$ To prove (6), observe that
$$\dfrac{\d{}}{\d{x}} \cot x  = \dfrac{\d{}}{\d{x}} \tan \left(\dfrac{\pi}{2}-x\right) = -\sec ^2 \left(\dfrac{\pi}{2}-x\right)
= -\csc^2 x. $$
\end{pf}

\begin{df}[Higher Order Derivatives]
Let $I$ be an interval of $\BBR$ and let $f:I\rightarrow \BBR$. For
$a\in I$ we define the successive derivatives of $f$ at $a$,
inductively. Put $f(a) = f^{(0)}(a)$. If $n \geq 1$, $$ f^{(n)}(a) =
f'(f^{(n-1)}(a)),
$$provided $f$ is differentiable at $f^{(n-1)}(a)$.
\end{df}
\begin{rem}
We usually write $f''$ instead of $f^{(2)}$.
\end{rem}
\begin{thm}[Leibniz's Rule]Let $n$ be a positive integer.
$$ (fg)^{(n)} = \sum _{k=0} ^n \binom{n}{k}f^{(k)}g^{(n-k)} $$
\end{thm}
\begin{pf}
This is a generalisation of the Product Rule. The proof is by
induction on $n$. For $n=0$ and $n=1$ the assertion is obvious.
Assume that $ (fg)^{(n)} = \sum _{k=0} ^n
\binom{n}{k}f^{(k)}g^{(n-k)} $. Observe that
$$\begin{array}{lll}(fg)^{(n+1)} & = & ((fg)^{(n)})'\\
& = & \left(\sum _{k=0} ^n \binom{n}{k}f^{(k)}g^{(n-k)}\right)'\\
& = & \sum _{k=0} ^n \binom{n}{k}(f^{(k+1)}g^{(n-k)}+f^{(k)}g^{(n-k+1)})\\
& = & \sum _{k=0} ^n \binom{n}{k}f^{(k+1)}g^{(n-k)}+\sum _{k=0} ^n \binom{n}{k}f^{(k)}g^{(n-k+1)}\\
& = & f^{(0)}g^{(n+1)}+ \sum _{k=0} ^n \left(\binom{n}{k}
+\binom{n}{k+1} \right)f^{(k)}g^{(n+1-k)} + f^{(n+1)}g^{(0)}\\
& = & \sum _{k=0} ^{n+1} \binom{n+1}{k}f^{(k)}g^{(n+1-k)},
\end{array} $$proving the statement.


\end{pf}


\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small



\begin{pro}
Prove that $$ \dfrac{2}{x^2 -1} =  \dfrac{1}{x-1} -  \dfrac{1}{x+1}
$$and use this result to find the $100$th derivative
of $f(x) = \dfrac{2}{x^2 -1}$. \begin{answer} Observe that  that $$
\dfrac{1}{x-1} - \dfrac{1}{x+1}  = \dfrac{(x+1) -
(x-1)}{(x-1)(x+1)}= \dfrac{2}{x^2 -1}. $$ If $f(x) = (x-1)^{-1}$
then $$f'(x) = -1(x-1)^{-2}; f''(x) = (-1)(-2)(x-1)^{-3};
(-1)(-2)(-3)(x-1)^{-4}; \ldots ; f^{(100)}(x) =
100!(x-1)^{-101}.$$Similarly, if $g(x) = (x+1)^{-1}$ then $$g'(x) =
-1(x+1)^{-2}; g''(x) = (-1)(-2)(x+1)^{-3}; (-1)(-2)(-3)(x+1)^{-4};
\ldots ; g^{(100)}(x) = 100!(x+1)^{-101}.$$Hence
$$ \dfrac{\mathrm{d}^{100}}{\d{x}^{100}} \ \dfrac{2}{x^2 -1} =
f^{(100)}(x) - g^{(100)}(x) = 100!(x-1)^{-101} - 100!(x+1)^{-101}.
$$
\end{answer}
\end{pro}
\begin{pro}
Find the $100$-th derivative of $x\mapsto x^{2}\sin x$.
\begin{answer}
We use Leibniz's Rule and the observation that the third derivative
of $x\mapsto x^{2}$ is $0$. Also $(\sin x)^{(4n)} = \sin x$,
 $(\sin x)^{(4n+2)} = -\sin x$,  $(\sin x)^{(4n+1)} = \cos x$, and  $(\sin x)^{(4n+3)} = -\cos x$,
Then
$$ \dfrac{\d{}^{100}}{\d{x^{100}}}x^{2}\sin x = \binom{100}{0}x^2(\sin x)^{(100)} + \binom{100}{1}(x^2)'(\sin x)^{(99)}+
\binom{100}{2}(x^2)''(\sin x)^{(98)} = x^2\sin x-200x\cos x-9900\sin
x.
$$
\end{answer}
\end{pro}
\begin{pro}
Demonstrate that the polynomial $p(x)\in\BBR [x]$ has a zero at
$x=a$ of multiplicity $k$ if and only if
$$ p(a) = p'(a) =\cdots = p^{(k-1)}(a) = 0. $$
\end{pro}
\begin{pro}
Demonstrate that if for all $x\in \BBR$ there holds the identity
$$ \sum _{k=0} ^n a_k(x-a)^k=\sum _{k=0} ^n b_k(x-b)^k, $$then
$a_k = \sum _{j=k} ^n \binom{n}{j}b_j(a-b)^{j-k}.$
\end{pro}
\begin{pro}
Let $p$ be a polynomial of degree $r$ and consider the polynomial
$F$ with
$$F(x) = p(x)+p'(x)+p''(x) + \cdots + p^{(r)}(x).  $$Prove that
$$ \dfrac{\d{\left(F(x)\exp (-x)\right)}}{\d{x}} = -\exp (-x)p(x). $$
\end{pro}
\end{multicols}
\section{Rolle's Theorem and the Mean Value Theorem}
\begin{thm}[Rolle's Theorem]
Let $(a, b)\in \BBR^2$ such that $a<b$, $f:\lcrc{a}{b}\rightarrow
\BBR$ be such that $f$ is continuous on $\lcrc{a}{b}$ and
differentiable in $\loro{a}{b}$, and $f(a)=f(b)$. Then there exists
$c\in \loro{a}{b}$ such that $f'(c)=0.$
\end{thm}\begin{pf} Since $f$ is continuous on $\lcrc{a}{b}$,  by
Weierstrass' Theorem \ref{thm:weierstrass-max-min},
$$m= \inf  _{x\in \lcrc{a}{b}}f(x), \qquad M= \sup  _{x\in \lcrc{a}{b}}f(x),$$
exist. If $m=M$, then $f$ is constant and so by Theorem
\ref{thm:derivative-of-constant}, $f'$ is identically $0$ and there
is nothing to prove. Assume that $m<M$. Since $f(a)=f(b)$, one may
not simultaneously have $M=f(a)$ and $m=f(a)$. Assume thus without
loss of generality that $M\neq f(a)$. Then there exists $c\in
\loro{a}{b}$ such that $f(c)=M$. Now
$$ \lim _{x\rightarrow c-}\dfrac{f(x)-f(c)}{x-c} \geq 0, \qquad  \lim _{x\rightarrow c+}\dfrac{f(x)-f(c)}{x-c} \leq 0 ,$$
whence it follows that $f'(c) =0$, proving the theorem. \end{pf}
\begin{thm}[Mean Value Theorem]
Let $(a, b)\in \BBR^2$ such that $a<b$, $f:\lcrc{a}{b}\rightarrow
\BBR$ be such that $f$ is continuous on $\lcrc{a}{b}$ and
differentiable on $\loro{a}{b}$. Then there exists $c\in
\loro{a}{b}$ such that $f'(c)=\dfrac{f(b)-f(a)}{b-a}.$
\end{thm}\begin{pf}
Put $$g:\lcrc{a}{b}\rightarrow \BBR, \quad g(x) = f(x) -
\dfrac{f(b)-f(a)}{b-a}x.
$$Then $g$ is continuous on $\lcrc{a}{b}$ and
differentiable on $\loro{a}{b}$, and $g(a) = g(b)$. Since $g$
satisfies the hypotheses of Rolle's Theorem, there is $c\in
\loro{a}{b}$ such that
$$g'(c) = 0 \implies f'(c) -\dfrac{f(b)-f(a)}{b-a}=0 \implies  f'(c) =\dfrac{f(b)-f(a)}{b-a},
$$proving the theorem.\end{pf}

\begin{thm}\label{thm:derivative-0-gives-const-fun}
If $f:I\rightarrow \BBR$ is continuous on the interval $I$,
differentiable on $\interiorone{I}$, and if $\forall x\in
\interiorone{I}$, $f'(x) = 0$ then $f$ is constant on $I$.
\end{thm}
\begin{pf}
Let $(a, b)\in I^2$, $a<b$. By the Mean Value Theorem, there is
$c\in \loro{a}{b}$ such that
$$f(b)-f(a) =f'(c)(b-a)=0\cdot (b-a) \implies f(b)=f(a),  $$thus any
two outputs have exactly the same value and $f$ is constant.
\end{pf}
\begin{thm}\label{thm:increasing-iff-firstderiv-pos}
If $f:I\rightarrow \BBR$ is continuous on the interval $I$, and
differentiable on $\interiorone{I}$. Then $f$ is increasing on $I$
if and only if $\forall x\in  \interiorone{I}$, $f'(x) \geq 0$ and
$f$ is decreasing on $I$ if and only if $\forall x\in
\interiorone{I}$, $f'(x) \leq 0$.
\end{thm}
\begin{pf}
\begin{enumerate}
\item[$\implies$] Suppose $f$ is increasing. Let
$x_0\in\interiorone{I}$. If $h\neq 0$ is so small that
$x_0+h\in\interiorone{I}$, then $$\dfrac{f(x_0+h)-f(x_0)}{h} \geq 0
\implies \lim _{h\rightarrow 0} \dfrac{f(x_0+h)-f(x_0)}{h} \geq 0
\implies f'(x_0)\geq 0.
$$
If $f$ is decreasing we apply what has just been proved to $-f$.
\item[$\Leftarrow$] Suppose that for all $x\in \interiorone{I}$, $f'(x)\geq
0$. Let $(a, b)\in I^2$, $a<b$. By the Mean Value Theorem, there is
$c\in \loro{a}{b}$ such that
$$ f(b)-f(a) = (b-a)f'(c) \geq 0, $$and so $f$ is increasing. If  for all $x\in \interiorone{I}$, $f'(x)\leq
0$ we apply what we just proved to $-f$.
\end{enumerate}
\end{pf}

\begin{thm}\label{thm:strict-increasing-iff-firstderiv-pos}
If $f:I\rightarrow \BBR$ is continuous on the interval $I$, and
differentiable on $\interiorone{I}$. Then $f$ is  strictly
increasing on $I$ if and only if $\forall x\in  I$, $f'(x) \geq 0$
and the set $\interior{\{x\in I^\circ : f'(x) =0\}} = \varnothing$.
Also, $f$ is strictly decreasing on $I$ if and only if $\forall x\in
I$, $f'(x) \leq 0$ and $\interior{\{x\in I^\circ : f'(x) =0\}} =
\varnothing$.
\end{thm}
\begin{pf}
\begin{enumerate}
\item[$\implies$] Suppose $f$ is strictly increasing. From Theorem
\ref{thm:increasing-iff-firstderiv-pos} we know that $\forall x\in
\interiorone{I}$, $f'(x)\geq 0$. Assume that $\interior{\{x\in
I^\circ : f'(x) =0\}} \neq \varnothing$. Then there is $c\in
\interior{\{x\in I^\circ : f'(x) =0\}}$ and $\varepsilon >0$ such
that $\loro{c-\varepsilon}{c+\varepsilon}\subseteqq I$ and $\forall
x\in \loro{c-\varepsilon}{c+\varepsilon}, f'(x) =0 $. By Theorem
\ref{thm:derivative-0-gives-const-fun}, $f$ must be constant on
$\loro{c-\varepsilon}{c+\varepsilon}$ and so it is not strictly
increasing, a contradiction. If $f$ is strictly decreasing, we apply
what has been proved to $-f$.
\item[$\Leftarrow$] Conversely, suppose that  $\forall x\in  I$, $f'(x) \geq
0$.
and the set $\interior{\{x\in I^\circ : f'(x) =0\}} = \varnothing$.
From Theorem \ref{thm:increasing-iff-firstderiv-pos}, $f$ is
increasing on $I$. Suppose that there exist $(a, b)\in I^2$, $a<b$
such that $f(a)=f(b)$. Since $f$ is increasing, we have $\forall
x\in\lcrc{a}{b}$, $f(x)=f(a)$. But then $\loro{a}{b}\subseteqq
\{x\in I^\circ : f'(x) =0\} $, a contradiction, since this last set
was assumed empty. If $f'(x)\leq 0$ we apply what has been proved to
$-f$.
\end{enumerate}
\end{pf}



\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Shew, by means of Rolle's Theorem, that $5x^4 - 4x + 1 = 0$ has a
solution in $[0;1]$.  \begin{answer}   Put $f(x) = x^5 -2x^2 + x$.
Then $f(0) = f(1) = 0$ and by Rolle's Theorem there is  $c\in ]0;1[$
such that $f'(c) = 5c^4-4c + 1 = 0$.
\end{answer}
\end{pro}
\begin{pro}
Let $a_0 , a_1 \ldots , a_n$ be real numbers satisfying
$$ {a_0} + \frac{a_1}{2} + \frac{a_2}{3} + \cdots + \frac{a_n}{n + 1} = 0.$$
Shew that the polynomial $$ a_0 + a_1 x + \cdots + a_n x^n $$ has a
root in $\loro{0}{1}$.
\begin{answer}
Set $$f(x) =  {a_0}x + \frac{a_1x^2}{2} + \frac{a_2x^3}{3} + \cdots
+ \frac{a_nx^{n+1}}{n + 1}, $$and use Rolle's Theorem.
\end{answer}
\end{pro}
\begin{pro}
Let $a, b, c$ be three functions such that $a'=b$, $b'=c$, and
$c'=a$. Prove that the function $a^3+b^3+c^3-3abc$ is constant.
\end{pro}
\begin{pro}
Suppose that $f: \lcrc{0}{1} \rightarrow \BBR $ is differentiable,
$f(0) = 0$ and $f(x) > 0$ for $x \in \loro{0}{1}.$ Is there a number
$d \in \loro{0}{1}$ such that $$ \frac{2f'(c)}{f(c)} = \frac{f'(1 -
c)}{f(1 - c)} ?$$
\begin{answer}
Set $g(x) = f(x)^2f(1-x)$. Since $g(0) = g(1)=0$, $g$ satisfies the
hypotheses of Rolle's Theorem. There is a $c\in\loro{0}{1}$ such
that $$ g'(c)= 0 \implies 2f'(c)f(c)f(1-c) - f(c)^2f'(1-c)=0.
$$Since by assumption $f(c)f(1-c) \neq 0$ we must have, upon dividing by
every term by $f(c)^2f(1-c)$, the assertion.
\end{answer}
\end{pro}
\begin{pro}
Let $n\geq 1$ be an integer and let $f:[0;1]\rightarrow \BBR$ be
differentiable and such that $f(0)=0$ and $f(1)=1$. Prove that there
exist distinct points $0 < a_0< a_2< \cdots < a_{n-1} < 1$ such that
$$ \sum _{k=0} ^{n-1} f'(a_k)=n. $$
\begin{answer}
For $0\leq k \leq n-1$, consider the interval
$\lcrc{\dfrac{k}{n}}{\dfrac{k+1}{n}}$. By the Mean Theorem, there
are $a_k\in\loro{\dfrac{k}{n}}{\dfrac{k+1}{n}}$ such that
$$ f'(a_k)  = \dfrac{f\left(\dfrac{k+1}{n}\right)-f\left(\dfrac{k}{n}\right)}{\dfrac{1}{n}} =
n\left(f\left(\dfrac{k+1}{n}\right)-f\left(\dfrac{k}{n}\right)\right).
$$Summing from $k=0$ to $k=n-1$ and noting that the dextral side
telescopes,
$$ \sum _{k=0} ^{n-1}f'(a_k) = n\sum _{k=0} ^{n-1}
\left(f\left(\dfrac{k+1}{n}\right)-f\left(\dfrac{k}{n}\right)\right)
=n(f(1)-f(0)) =n. $$
\end{answer}
\end{pro}
\begin{pro}
Let $n\geq 1$ be an integer and let $f:[0;1]\rightarrow \BBR$ be
differentiable and such that $f(0)=0$ and $f(1)=1$. Prove that there
exist distinct points $0 < a_0< a_2< \cdots < a_{n-1} <1$ such that
$$ \sum _{k=0} ^{n-1 } \dfrac{1}{f'(a_k)}=n. $$
\begin{answer}
Let $k_i\in \lcrc{0}{1}$ be the smallest number such that $f(k_i) =
\dfrac{i}{n}$, $1\leq i \leq n-1$. Put $k_0=0, k_n=1$. The existence
of the $k_i$ is guaranteed by the Intermediate Value Theorem.
Moreover, since the $k_i$ are chosen to be the first time $f$ is
$\dfrac{i}{n}$, once again, by the Intermediate Value Theorem we
must have
$$0<k_1<k_2<\cdots < k_{n-1}<1.  $$Hence, by the Mean Value Theorem,
there exists $a_i\in \loro{k_i}{k_{i+1}}$, $0 \leq i \leq n-1$, such
that
$$ f'(a_i) =\dfrac{f(k_{i+1})-f(k_{i})}{k_{i+1}-k_i} = \dfrac{1}{n(k_{i+1}-k_i)} \implies \dfrac{1}{f'(a_i)} = n(k_{i+1}-k_i).  $$
Summing,
 $$ \sum _{k=0} ^{n-1} \dfrac{1}{f'(a_k)}= n\sum _{k=0} ^{n-1}(k_{i+1}-k_i) = n(k_n-k_0)=n. $$
\end{answer}
\end{pro}


\begin{pro}[Putnam 1946] Let $p(x)$ is a quadratic
polynomial with real coefficients satisfying $\max _{x\in
\lcrc{-1}{1}} \absval{f(x)} \leq 1$. Prove that $\max _{x\in
\lcrc{-1}{1}} \absval{f'(x)} \leq 4$.
\end{pro}
\begin{pro}[Generalised Mean Value Theorem] Let $f, g$ be continuous
of $\lcrc{a}{b}$ and differentiable on $\loro{a}{b}$. Then there is
$c\in \loro{a}{b}$ such that $$ (f(b)-f(a))g'(c) =
(g(b)-g(a))f'(c).$$
\end{pro}
\begin{pro}[First L'H\^{o}pital Rule]
Let $I$ be an open interval (finite or infinite) having $c$ has an
endpoint (which may be finite or infinite). Assume $f, g$ are
differentiable on $I$, $g$ and $g'$ never vanish on $I$ and that
$\lim _{x\rightarrow c}f(x)=0=\lim _{x\rightarrow c}g(x)$. Prove
that  if $\lim _{x\rightarrow c} \dfrac{f'(x)}{g'(x)} = L$ (where
$L$ is finite or infinite), then  $\lim _{x\rightarrow c}
\dfrac{f(x)}{g(x)} = L$
\end{pro}
\begin{pro}[Second L'H\^{o}pital Rule]
Let $I$ be an open interval (finite or infinite) having $c$ has an
endpoint (which may be finite or infinite). Assume $f, g$ are
differentiable on $I$, $g$ and $g'$ never vanish on $I$ and that
$\lim _{x\rightarrow c}\absval{f(x)}=\lim _{x\rightarrow
c}\absval{g(x)}=+\infty$. Prove that if $\lim _{x\rightarrow c}
\dfrac{f'(x)}{g'(x)} = L$ (where $L$ is finite or infinite), then
$\lim _{x\rightarrow c} \dfrac{f(x)}{g(x)} = L$
\end{pro}
\begin{pro}
If $f'$ exists on an interval containing $c$, then
$$ f'(c) = \lim _{h\rightarrow 0} \dfrac{f(c+h)-f(c-h)}{2h}. $$
\end{pro}
\begin{pro}
If $f''$ exists on an interval containing $c$, then
$$ f''(c) = \lim _{h\rightarrow 0} \dfrac{f(c+h)+f(c-h)-2c}{h^2}. $$
\end{pro}
\end{multicols}
\section{Extrema}
\begin{df}
Let $X\subseteqq \BBR$,  $f:X\rightarrow
\BBR$.\begin{enumerate}\item  We say that $f$ has a {\em local
maximum at $a$} if there exists a neighbourhood of $a$, $\N{a}$ such
that $\forall x\in \N{a}$, $f(x)\leq f(a)$.\item   We say that $f$
has a {\em local minimum at $a$} if there exists a neighbourhood of
$a$, $\N{a}$ such that $\forall x\in \N{a}$, $f(x)\geq f(a)$. \item
We say that $f$ has a {\em strict local maximum at $a$} if there
exists a neighbourhood of $a$, $\N{a}$ such that $\forall x\in
\N{a}$, $f(x)< f(a)$. \item  We say that $f$ has a {\em strict local
minimum at $a$} if there exists a neighbourhood of $a$, $\N{a}$ such
that $\forall x\in \N{a}$, $f(x)>f(a)$.\item  We say that $f$ has a
{\em local extremum at $a$} if $f$ has either a local maximum or a
local minimum at $a$.\item  We say that $f$ has a {\em strict local
extremum at $a$} if $f$ has either a strict local maximum or a
strict local minimum at $a$. The plural of extremum is {\em
extrema}.

\end{enumerate}
\end{df}
\begin{thm}
If $f:I\rightarrow \BBR$ is continuous on the interval $I$,
differentiable on $\interiorone{I}$, and if $f$ has a local extremum
at $a\in \interiorone{I}$, then $f'(a) = 0$.
\end{thm}

\begin{pf}
Suppose $f$ admits a local maximum at $a$. Let $h\neq 0$ be so small
that $a+h\in I$. Now
$$ h>0 \implies \dfrac{f(a+h)-f(a)}{h} \leq 0, \qquad h< 0 \implies \dfrac{f(a+h)-f(a)}{h} \geq 0. $$
Upon taking limits as $h\rightarrow 0$,  $f'(a)\leq 0$ and
$f'(a)\geq 0$, whence $f'(a)=0$.
\end{pf}
\begin{df}
Let $f:I\rightarrow \BBR$. The points $x\in I$ where $f'(x)=0$ are
called {\em critical points or stationary points} of $f$.
\end{df}
\begin{thm}
Let  $f:\lcrc{a}{b}\rightarrow \BBR$ be a twice differentiable
function having a critical point at $c\in\loro{a}{b}$. If $f''(c)<0$
then $f$ has a relative maximum at $x=c$, and if $f''(c)>0$ then $f$
has a relative minimum at $x=c$.
\end{thm}
\begin{pf}
Assume that $f'(c)=0 $ and $f''(c)<0$. Since $$\lim _{x\rightarrow
c} \dfrac{f'(x)}{x-c} = \lim _{x\rightarrow c}
\dfrac{f'(x)-f'(c)}{x-c} = f''(c)<0,
$$ there exists $\delta >0$ such that $f'(x)>0$ when $c-\delta <x<c$
and $f'(x)>0$ when $c<x<x+\delta$. Consequently, $f$ is strictly
increasing on $\loro{c-\delta}{c}$ and strictly decreasing on
$\loro{c}{c+\delta}$. Hence
$$ \absval{x-c}<\delta \implies f(x)\leq f(c), $$and so $x=c$ is a
local maximum. If $f''>0$ then we apply what has been proved to
$-f$.
\end{pf}

\begin{thm}[Darboux's Theorem]\label{thm:darboux}
Let $f$ be differentiable on $\lcrc{a}{b}$ and suppose that
$f'(a)<C<f'(b)$. Then there exists $c\in\loro{a}{b}$ such that
$f'(c) = C$.
\end{thm}

\begin{pf}Put $g(x)=f(x)-Cx$. Then $g$ is differentiable on
$\lcrc{a}{b}$. Now $g'(a)=f'(a)-C<0$ so $g$ is strictly increasing
at $x=a$. Similarly, $g'(b)=f'(b)-C<0$ so $g$ is strictly decreasing
at $x=b$. Since $g$ is continuous, $g$ must have a local maximum at
some point $c\in\loro{a}{b}$, where $g'(c) = f'(c)-C=0$, proving the
theorem.
\end{pf}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Let $f$ be a polynomial with real coefficients of degree $n$ such
that $\forall x\in\BBR \quad f(x) \geq 0$. Prove that $$\forall
x\in\BBR \qquad f(x) + f'(x) + f''(x) + \cdots + f^{(n)}(x) \geq 0.
$$
\end{pro}
\begin{pro}
Put $f(0) = 1$, $f(x) = x^x$ for $x>0$. Find the minimum value of
$f$. \begin{answer}  We have $f'(x) = x^x(\log x + 1)$ whence $f'(x)
= 0 \implies x = e^{-1}$. Since $f'(x) < 0 $ for $0 < x < e^{-1}$
and $f'(x) > 0$ for $x> e^{-1}$, $x = e^{-1}$ is a local (relative)
minimum. Thus $f(x) \geq f(e^{-1}) =
\left(\dfrac{1}{e}\right)^{1/e}$.

\end{answer}
\end{pro}

\end{multicols}
\section{Convex Functions}
\begin{df}
Let $I \subseteqq \BBR$ be an interval. A function $f:I\rightarrow
\BBR$ is said to be {\em convex} if
$$\forall (a, b)\in I^2, \forall \lambda \in \lcrc{0}{1}, f(\lambda a + (1-\lambda)b)\leq \lambda f(a) + (1-\lambda)f(b).
$$ We say that $f$ is {\em concave} if $-f$ is convex.
\end{df}
\begin{rem}$f$ is convex if given any two points on its graph, the
straight line joining these two points lies above the graph of $f$.
See figure \ref{fig:convex}.
\end{rem}
\vspace{1cm}
\begin{figure}[h]
\centering
\begin{minipage}{7cm}
$$ \psset{unit=.8pc}
 \rput(0,-4){\parabola[linewidth=2pt,linecolor=red]{<->}(2.25,5.0625)(0,0)
 \psline[linewidth=2pt,linecolor=blue](2,4)(-1,1)\psdots[dotscale=1,dotstyle=*](2,4)(-1,1)(.5,2.5)(.5,.25)
 }
$$\vspace{1cm}\footnotesize \hangcaption{ A convex curve }\label{fig:convex}\end{minipage}
\begin{minipage}{7cm}
$$ \psset{unit=.8pc}
 \parabola[linewidth=2pt,linecolor=red]{<->}(-2.25,-5.0625)(0,0)
 \psline[linewidth=2pt,linecolor=blue](2,-4)(-1,-1)\psdots[dotscale=1,dotstyle=*](2,-4)(-1,-1)(.5,-2.5)(.5,-.25)
$$\vspace{1cm}\footnotesize \hangcaption{ A concave curve. }\label{fig:concave}\end{minipage}

\end{figure}

\begin{df}
Let $(x_1, x_2, \ldots , x_n)\in \BBR^n$ and let $\lambda _k\in
\lcrc{0}{1}$ be such that $\sum _{k=1} ^n \lambda _k =1$. The sum
$$ \sum _{k=1} ^n \lambda _k x_k $$ is called a {\em convex
combination} of the $x_k$.
\end{df}
\begin{thm}\label{thm:convex-combinations-in-intervals}
If $(x_1, x_2, \ldots , x_n)\in \lcrc{a}{b}^n$, then any convex
combination of the $x_k$ also belongs to $\lcrc{a}{b}$.
\end{thm}
\begin{pf}
Assume  $\lambda _k\in \lcrc{0}{1}$ be such that $\sum _{k=1} ^n
\lambda _k =1$. Since the $\lambda _k \geq 0$ we have
$$a\leq x_k \leq b \implies \lambda _ka \leq \lambda _kx_k \leq \lambda _kb.   $$
Adding, and bearing in mind that $\sum _{k=1} ^n \lambda _k =1$,
$$ \left(\sum _{k=1} ^n \lambda _k\right)a \leq \sum _{k=1} ^n \lambda _kx_k \leq \left(\sum _{k=1} ^n \lambda _k\right)b
\implies a \leq \sum _{k=1} ^n \lambda _kx_k \leq b, $$
proving the theorem.
\end{pf}

\begin{thm}[Jensen's Inequality]\label{thm:Jensen}Let $I \subseteqq \BBR$ be an interval and let  $f:I\rightarrow
\BBR$ be a convex function. Let $n\geq 1$ be an integer, $x_k\in I$,
and $\lambda _k\in \lcrc{0}{1}$ be such that $\sum _{k=1} ^n \lambda
_k =1$. Then $$f\left(\sum _{k=1} ^n \lambda _kx_k\right) \leq  \sum
_{k=1} ^n \lambda _kf(x_k). $$
\end{thm}
\begin{pf}
The proof is by induction on $n$. For $n=2$ we must  shew that given
 $(x_{1}, x_{2})\in \lcrc{a}{b}^2$, $$ f\left( \lambda _{1}x_{1}+\lambda
_{2}x_{2}\right) \leq \lambda _{1}f(x_{1})+\lambda _{2}f(x_{2}).$$
As $\lambda _{1}+\lambda _{2} =1$, we may put $\lambda = \lambda
_{2}=1-\lambda _{1}$ and so the above inequality becomes  $$f\left(
\lambda x_{1}+\left( 1-\lambda \right) x_{2}\right) \leq \lambda
f(x_{1})+\left( 1-\lambda \right) f(x_{2}),$$retrieving the
definition of convexity.
\bigskip

 Assume now that $f\left(
\sum_{k=1}^{n-1}\mu _{k}x_{k}\right) \leq \sum_{k=1}^{n-1}\mu
_{k}f\left( x_{k}\right) $, when $\sum_{k=1}^{n-1}\mu _{k}=1$, $\mu
_k\in\loro{0}{1}$.  We must prove that $f\left(
\sum_{k=1}^{n}\lambda _{k}x_{k}\right) \leq \sum_{k=1}^{n}\lambda
_{k}f\left( x_{k}\right) $, when $\sum_{k=1}^{n}\lambda _{k}=1$,
$\lambda _k\in\loro{0}{1}$.

\bigskip
If $\lambda _n = 1$ the assertion is trivial, since then $\lambda_1
= \cdots = \lambda _{n-1}=0$. So assume that $\lambda _n \neq 1$.
Observe that $\sum_{k=1}^{n-1}\frac{\lambda _{k}}{1-\lambda
_{n}}=\frac{\left(
\sum_{k=1}^{n}\lambda _{k}\right) -\lambda _{n}}{1-\lambda _{n}}=\frac{%
1-\lambda _{n}}{1-\lambda _{n}}=1$
so that  $\sum_{k=1}^{n-1}\frac{\lambda _{k}%
}{1-\lambda _{n}}x_{k}$ is a convex combination of the $x_k$ and
hence also belongs to  $\lcrc{a}{b}$, by Theorem
\ref{thm:convex-combinations-in-intervals}. Since $f$ is convex,

$$\begin{array}{lll}f\left( \sum_{k=1}^{n}\lambda _{k}x_{k}\right) & = & f\left(
\sum_{k=1}^{n-1}\lambda _{k}x_{k}+\lambda _{n}x_{n}\right)\\
& = & f\left( \left( 1-\lambda _{n}\right)
\sum_{k=1}^{n-1}\frac{\lambda _{k}}{1-\lambda _{n}} x_{k}+\lambda
_{n}x_{n}\right)\\
&  \leq &
 \left( 1-\lambda _{n}\right) f\left( \sum_{k=1}^{n-1}\frac{\lambda _{k}%
}{1-\lambda _{n}}x_{k}\right) +\lambda _{n}f\left( x_{n}\right)
\end{array}$$

By the inductive hypothesis, with $\mu_k =\frac{\lambda
_{k}}{1-\lambda _{n}}=1$,

$$f\left( \sum_{k=1}^{n-1}\frac{\lambda _{k}}{1-\lambda
_{n}}x_{k}\right) \leq \sum_{k=1}^{n-1}\frac{\lambda _{k}}{1-\lambda
_{n}}f\left( x_{k}\right). $$Finally, we gather,
$$\begin{array}{lll}f\left( \sum_{k=1}^{n}\lambda _{k}x_{k}\right)
&\leq &  \left( 1-\lambda _{n}\right) f\left(
\sum_{k=1}^{n-1}\frac{\lambda _{k}}{1-\lambda _{n}} x_{k}\right)
+\lambda _{n}f\left( x_{n}\right) \\ &  \leq  &
 \left( 1-\lambda _{n}\right) \sum_{k=1}^{n-1}\frac{\lambda _{k}}{%
1-\lambda _{n}}f\left( x_{k}\right) +\lambda _{n}f\left(
x_{n}\right)\\
&  = & \sum_{k=1}^{n-1}\lambda _{k}f\left( x_{k}\right) +\lambda
_{n}f\left( x_{n}\right)\\
&  = & \sum_{k=1}^{n}\lambda _{k}f\left( x_{k}\right),\end{array} $$
proving the theorem.
\end{pf}

\begin{thm}\label{thm:convex-iff-Av-Ra-Ch-increasing}
Let $I \subseteqq \BBR$ be an interval and let $f:I\rightarrow
\BBR$. For $a\in I$ we put
$$\fun{T_a}{x}{\dfrac{f(x)-f(a)}{x-a}}{I\setminus\{a\}}{\BBR}.$$Then
$f$ is convex if and only if $\forall a\in I$, $T_a$ is increasing
over $I\setminus \{a\}$.
\end{thm}
\begin{pf}Let $a<b<c$ as in figure
\ref{fig:convex-iff-Av-Ra-Ch-increasing}. Consider the points $A(a,
f(a))$, $B(b, f(b))$, and $C(c, f(c))$. The slopes

$$m_{AB} = \dfrac{f(b)-f(a)}{b-a}, \qquad  m_{BC} = \dfrac{f(c)-f(b)}{c-b}, \qquad m_{CA} = \dfrac{f(c)-f(a)}{c-a},  $$
satisfy
$$ m_{AB}\leq m_{AC}, \qquad m_{AC}\leq m_{BC}, \qquad m_{AB}\leq m_{BC},  $$
and the theorem follows. An analytic proof may be obtained by
observing that from Theorem
\ref{thm:convex-combinations-in-intervals}, any \mbox{$\lambda a +
(1-\lambda)c$} lies in the interval $\lcrc{a}{c}$ for $\lambda \in
\lcrc{0}{1}$. Conversely, given $b\in \lcrc{a}{c}$, we may solve for
$\lambda$ the equation $$b = \lambda a + (1-\lambda)c \implies
\lambda = \dfrac{c-b}{c-a}\in \lcrc{0}{1}.$$Hence
\begin{equation}f(\lambda a + (1-\lambda)c)\leq \lambda f(a) + (1-\lambda)f(c)
\iff f(b) \leq \dfrac{c-b}{c-a}f(a)+\dfrac{b-a}{c-a}f(c) \iff
\dfrac{f(b)-f(a)}{b-a}\leq \dfrac{f(c)-f(b)}{c-b}.
\label{eq:conditions-for-convex}
\end{equation}This gives
\begin{equation}
\dfrac{f(b)-f(a)}{b-a} \leq  \dfrac{f(c)-f(a)}{c-a} \leq
\dfrac{f(c)-f(b)}{c-b} \label{eq:increasing-Ave-Ra-Ch}
\end{equation}from where the theorem follows.

\end{pf}
\vspace{2cm}
\begin{figure}[h]
$$\psset{unit=1.5pc,algebraic=true}\psaxes[linewidth=1.2pt, labels=none, ticks=none](-1,0)(-2.5,-2.5)(5,5)
\pscurve[linewidth=1.5pt,linecolor=red](.1,3)(2,1.5)(3,1.7)(4,2.2)(5,5)
\psline(.1,3)(5,5)\uput[l](.1,3){A}\uput[r](5,5){C}\uput[u](3,1.7){B}
\psline(.1,3)(3,1.7)\psline(3,1.7)(5,5)\psline(.1,3)(.1,0)\uput[d](.1,0){a}
\psline(3,
1.7)(3,0)\uput[d](3,0){b}\psline(5,5)(5,0)\uput[d](5,0){c}
$$\vspace{1cm}\footnotesize\hangcaption{Theorem
\ref{thm:convex-iff-Av-Ra-Ch-increasing}.}\label{fig:convex-iff-Av-Ra-Ch-increasing}
\end{figure}

\begin{thm}
Let $I \subseteqq \BBR$ be an interval and let  $f:I\rightarrow
\BBR$ be a convex function. Then $f$ is left and right
differentiable on every point of $\interiorone{I}$ and for
$(a,b,c)\in I^3$ with $a<b<c$,
$$  \dfrac{f(b)-f(a)}{b-a} \leq f_-(b) \leq f_+(b) \leq \dfrac{f(c)-f(b)}{c-b}.$$
\end{thm}
\begin{pf}
Since $f$ is convex, $\forall b\in \interiorone{I} $,
$\fun{T_b}{x}{\dfrac{f(x)-f(b)}{x-b}}{I\setminus\{b\}}{\BBR}$ is
increasing, by virtue of Theorem
\ref{thm:convex-iff-Av-Ra-Ch-increasing}. Thus $\forall u\in
\lcro{a}{b}$, $\forall v\in \lcro{b}{c}$
$$ T_b(a)\leq T_b(u)\leq  T_b(v)\leq T_b(c). $$This means that $T_b$
is increasing on $ \lcro{b}{c}$ and bounded below by $T_b(u)$. It
follows by Theorem \ref{thm:-x+y-mono} that $T_b(b+)$ exists, and so
$f$ is right-differentiable at $b$. Moreover,
$$ T_b(a)\leq T_b(u)\leq  f' _+(b)\leq T_b(c). $$ Similarly, $T_b$
is increasing and bounded above by $ f' _+(b)$. Appealing again to
Theorem \ref{thm:-x+y-mono}, $f$ is left-differentiable at $b$ and
$$ T_b(a)\leq f' _-(b)\leq  f' _+(b)\leq T_b(c). $$
\end{pf}
\begin{cor}
If $f$ is convex on an interval $I$, then $f$ is continuous on
$\interiorone{I}$.
\end{cor}
\begin{pf}
Given $b\in \interiorone{I}$, we know that $f$ is both left and
right differentiable at $b$ (though we may have $f' _-(b)< f'
_+(b)$). Regardless, this makes $f$ left and right continuous at
$b$: hence both $f(b-)=f(b)$ and $f(b+)=f(b)$. But then
$f(b-)=f(b+)$ and so $f$ is continuous at $b$.
\end{pf}
\begin{thm}\label{thm:f'increasing-means-convex}
Let $I \subseteqq \BBR$ be an interval and let  $f:I\rightarrow
\BBR$ be differentiable on $I$. Then $f$ is convex if and only if
$f'$ is increasing on $I$.
\end{thm}
\begin{pf}
\begin{enumerate}
\item[$\implies$] Assume $f$ is convex. Let $a<x<c$. By
(\ref{eq:increasing-Ave-Ra-Ch}),
$$ \dfrac{f(x)-f(a)}{x-a} \leq  \dfrac{f(c)-f(a)}{c-a} \leq
\dfrac{f(c)-f(x)}{c-x}. $$Taking limits as $x\rightarrow a+$,
$$ f'_+(a)\leq \dfrac{f(c)-f(a)}{c-a}. $$
Taking limits as $x\rightarrow c-$,
$$ \dfrac{f(c)-f(a)}{c-a}\leq f'_-(c). $$
Thus $f'_+(a)\leq f'_-(c)$. Since $f$ is differentiable,
$f'_+(a)=f'(a)$ and $f'_-(c)=f'(c)$, and so $f'(a)\leq f'(c)$
proving that $f'$ is increasing.
\item[$\Leftarrow$] Assume $f'$ is increasing and that $a<x<b$. By
the Mean Value Theorem, there exists $\alpha \in \loro{a}{x}$ and
$\alpha ' \in \loro{x}{b}$ such that
$$\dfrac{f(x)-f(a)}{x-a} = f(\alpha), \qquad \dfrac{f(b)-f(x)}{b-x} = f(\alpha '). $$
Since $f'(\alpha)\leq f(\alpha ')$ we must have
$$\dfrac{f(x)-f(a)}{x-a} \leq \dfrac{f(b)-f(x)}{b-x},  $$and so $f$
is convex in view of (\ref{eq:conditions-for-convex}).
\end{enumerate}
\end{pf}
\begin{cor}\label{cor:convex-iff-second-deriv}
Let $I \subseteqq \BBR$ be an interval and let  $f:I\rightarrow
\BBR$ be twice differentiable on $I$. Then $f$ is convex if and only
if $f''\geq 0$.
\end{cor}
\begin{pf}
This follows from Theorems \ref{thm:increasing-iff-firstderiv-pos}
and \ref{thm:f'increasing-means-convex}.
\end{pf}

\begin{df}
An {\em inflexion point} is a point on the graph of a function where
the graph changes from convex to concave or viceversa.
\end{df}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}[Putnam 1991] A there any polynomials $p(x)$ with real
coefficients of degree $n \geq 2$ all whose $n$ roots are distinct
real numbers and all whose $n-1$ zeroes of $p'(x)$ are the midpoints
between consecutive roots of $p(x)$?
\end{pro}

\begin{pro}
Prove that the inflexion points of $x\mapsto \dfrac{x}{\tan x}$ are
aligned.
\end{pro}

\begin{pro}
By considering $\fun{f}{x}{x^k - k(x - 1)}{[0;+\infty[}{\BBR} $ for
$0<k<1$ and using first and second derivative arguments, obtain a
new proof of Young's Inequality \ref{thm:youngs-ineq}.
\begin{answer}
 Let $0 < k < 1$, and consider the function
 $$\fun{f}{x}{x^k - k(x - 1)}{[0;+\infty[}{\BBR}. $$ Then $0 =
 f'(x) = kx^{k - 1} - k \Leftrightarrow x = 1$. Since $f''(x) = k(k - 1)x^{k - 2} <
 0$ for $0 < k < 1, x \geq 0$, $x = 1$ is a maximum point. Hence
 $f(x) \leq f(1)$ for $x \geq 0$, that is $x^k \leq 1 + k(x - 1)$.
 Letting $k = \dfrac{1}{p}$ and $x = \dfrac{a^p}{b^q}$ we deduce
 $$ \frac{a}{b^{q/p}} \leq 1  + \frac{1}{p}\left(\frac{a^p}{b^q} - 1\right).  $$
Rearranging gives
$$ ab \leq b^{1 + p/q} + \frac{a^pb^{1 + p/q - p}}{p} - \frac{b^{1 + p/q}}{p}  $$
from where we obtain the inequality.
\end{answer}
\end{pro}
\end{multicols}

\section{Inequalities Obtained Through Differentiation}
\begin{thm}Let $x>0$. Then $\dfrac{x^2}{2} < \exp
(x)$.\label{thm:x^2<exp(x)}
\end{thm}
\begin{pf}
Let $f(x)=\exp (x)-\dfrac{x^2}{2}$. Then $f'(x) = \exp (x) -x$ and
$f''(x) = \exp(x) -1$. Since $x>0$, $f''(x)>0$ and so $f'$ is
strictly increasing. Thus $f'(x)>f'(0)=1>0$ and so $f$ is
increasing. Thus $$f(x)>f(0)\implies \exp
(x)-\dfrac{x^2}{2}>0,$$proving the theorem.\end{pf}
\begin{thm}\label{lem:limit-x/exp(x)}$\lim _{x\to +\infty} \dfrac{x}{\exp
(x)}=0$.
\end{thm}
\begin{pf}
From Theorem \ref{thm:x^2<exp(x)}, for $x>0$,
$$ 0<\dfrac{x}{\exp (x)}< \dfrac{2}{x} \implies 0 \leq \lim _{x\to +\infty}\dfrac{x}{\exp (x)}   \leq  \lim _{x\to +\infty}\dfrac{2}{x}  =0,$$
and the theorem follows from the Sandwich Theorem.
\end{pf}
\begin{thm}\label{thm:limit-xa/exp(x)}
Let $\alpha\in \BBR$. Then $\lim _{x\to +\infty}
\dfrac{x^\alpha}{\exp (x)}=0$.
\end{thm}
\begin{pf}
If $\alpha <1$ then
$$\dfrac{x^\alpha}{\exp (x)} = \dfrac{x}{\exp (x)}\cdot x^{\alpha -1} \to 0 \cdot 0,
$$by Lemma \ref{lem:limit-x/exp(x)}. If $\alpha \geq 1$ then
$$\dfrac{x^\alpha}{\exp (x)} = \alpha ^{-\alpha}\left(\dfrac{\alpha x}{\exp (\alpha x)}\right)^\alpha \to \alpha ^{-\alpha}\cdot  0^\alpha=0,
$$by continuity and by Lemma \ref{lem:limit-x/exp(x)}.
\end{pf}
\begin{thm}
Let $x>0$. Then $\log x < x$.\label{thm:logx<x}
\end{thm}
\begin{pf}Put $f(x)=x-\log x$. Then $f'(x) = 1-\dfrac{1}{x}$. For
$x<1$, $f'(x) <0$, for $x=1$, $f'(x)=0$, and for $x>1$, $f'(x)>0$,
which means that $f$ has a minimum at $x=1$. Thus
$$f(x)>f(1) \implies x-\log x> 1.  $$Since $x-\log x>1$ then {\em a
fortiori} we must have $x-\log x>0$ and the theorem follows.\end{pf}
\begin{lem}\label{lem:limit-logx/x} $\lim _{x\to +\infty} \dfrac{\log
x}{x}=0$.
\end{lem}
\begin{pf}
From Theorem \ref{thm:logx<x}, $\log x^2 < x^2$. For  $x>1$, $\log
x>0$ and hence,
$$x>1 \implies 0<\dfrac{\log x}{x} < \dfrac{1}{2x},  $$whence $\lim _{x\to +\infty} \dfrac{\log
x}{x}=0$ by the Sandwich Theorem.
\end{pf}
\begin{thm}\label{thm:limit-logx/xa}
Let $\alpha\in \loro{0}{+\infty}$. Then $\lim _{x\to +\infty}
\dfrac{\log x}{x^\alpha}=0$.
\end{thm}
\begin{pf}
If $\alpha >1$ then
$$\dfrac{\log x}{x^\alpha} = \dfrac{\log x}{x}\cdot x^{1-\alpha } \to 0 \cdot 0,
$$by Lemma \ref{lem:limit-logx/x}. If $0<\alpha \leq 1$ then
$$\dfrac{\log x}{x^\alpha} =\dfrac{\log x^\alpha }{\alpha x^\alpha}  \to \dfrac{1}{\alpha}\cdot 0=0,
$$by continuity and by Lemma \ref{lem:limit-logx/x}.
\end{pf}

\begin{thm}For $x\in \loro{0}{\frac{\pi}{2}}$, $\sin x < x < \tan
x$.
\end{thm}
\begin{pf}
Observe that we gave a geometrical argument for this inequality in
Theorem \ref{thm:limit-sinx/x}. First, let f(x)= $\sin x-x$. Then
$f'(x) = \cos x-1< 0$, since for $x\in \loro{0}{\frac{\pi}{2}}$, the
cosine is strictly positive. This means that $f$ is strictly
decreasing. Thus for all $x\in \loro{0}{\frac{\pi}{2}}$, $$f(0)>
f(x) \implies 0
> \sin x-x \implies \sin x < x,$$giving the first half of the
inequality.

\bigskip

For the second half, put $g(x)= \tan x -x$. Then $g'(x) = \sec ^2 x
-1$. Now, since $\absval{\cos x} < 1$ for $x\in
\loro{0}{\frac{\pi}{2}}$, $\sec^2 x > 1$. Hence $g'(x)>0$, and so
$g$ is strictly increasing. This gives $$ g(0)<g(x)\implies 0 < \tan
x -x \implies x<\tan x,
$$obtaining the second inequality.\end{pf}
\vspace{2cm}
\begin{figure}[h]
$$
\psset{unit=2pc}\psaxes[labels=none,ticks=none](0,0)(-.5,-.5)(2,1)
\psline[linewidth=1.5pt,linecolor=blue](0,0)(1.5707963267948966192313216916,1)\uput[d](1.5707963267948966192313216916,0){\pi/2}
\uput[l](0,1){1} \psplot[linewidth=1.5pt,linecolor=red,
algebraic=true]{0}{1.5707963267948966192313216916}{sin(x)}
$$
\vspace{1cm}\footnotesize\hangcaption{Jordan's Inequality}
\label{fig:jordan's-ineq}
\end{figure}
\begin{thm}[Jordan's Inequality] For $x\in
\loro{0}{\frac{\pi}{2}}$, \qquad $\dfrac{2}{\pi}x<\sin x < x$.
\end{thm}
\begin{pf}This inequality says that the straight line joining
$(0,0)$ to $\left(\frac{\pi}{2}, 1\right)$ lies below the curve
$y=\sin x$ for $x\in\loro{0}{\frac{\pi}{2}}$. See figure
\ref{fig:jordan's-ineq}. Put $ f(x) =\dfrac{\sin x}{x}$ for $x\neq
0$ and $f(0)=1$. Then $f'(x)= (\cos x)\left(\dfrac{x-\tan
x}{x^2}\right) <0$ since $\cos x
> 0$ and $x-\tan x < 0$ for $x\in
\loro{0}{\frac{\pi}{2}}$. Thus $f$ is strictly decreasing for $x\in
\loro{0}{\frac{\pi}{2}}$ and so
$$f(x)> f\left(\frac{\pi}{2}\right) \implies \dfrac{\sin x}{x} > \dfrac{2}{\pi},  $$
proving the theorem.\end{pf}

\begin{df}
If $w_1,w_2,\ldots,w_n$ are positive real numbers such that
$w_1+w_2+\cdots+w_n=1$, we define the \emph{$r$-th weighted power
mean} of the $x_i$ as:

$$M_w^r(x_1,x_2,\ldots,x_n)=\left({w_1x_1^r+w_2x_2^r+\cdots+w_nx_n^r}\right)^{1/r}.$$

When all the $w_i=\frac{1}{n}$ we get the standard power mean. The
weighted power mean is a continuous function of $r$, and taking
limit when $r\to0$ gives us
$$M_w^0=x_1^{w_1}x_2^{w_2}\cdots w_n^{w_n}.$$
\end{df}

\begin{thm}[Generalisation of the AM-GM Inequality]\label{thm:AMGM-ineq-generalised1} If $r<s$ then
$$M_w^r (x_1,x_2,\ldots,x_n)\leq M_w^s(x_1,x_2,\ldots,x_n).$$
\end{thm}
\begin{pf}
Suppose first that $0<r<s$ are real numbers, and let
$w_1,w_2,\ldots,w_n$ be positive real numbers such that
$w_1+w_2+\cdots+w_n=1$.

\bigskip

Put $t=\frac{s}{r}>1$ and $y_i=x_i^r$ for $1\leq i\leq n$. This
implies that $y_i^t=x_i^s$. The function $f:]0;+\infty[\rightarrow
]0;+\infty[$, $f(x)=x^t$ is strictly convex, since its  second
derivative  is $f''(x)=\frac{1}{t(t-1)}x^{t-2}>0$ for all $x\in
]0;+\infty[$.  By Jensen's inequality,
\begin{eqnarray*}
(w_1y_1+w_2y_2+\cdots+w_ny_n)^t&=&f(w_1y_1+w_2y_2+\cdots+w_ny_n)\\
&\le&w_1f(y_1)+w_2f(y_2)+\cdots+w_nf(y_n)\\
&=&w_1y_1^t+w_2y_2^t+\cdots+w_ny_n^t.
\end{eqnarray*}
with equality if and only if $y_1=y_2=\cdots=y_n$. By substituting
$t=\frac{s}{r}$ and $y_i=x_i^r$ back into this inequality, we get
$$
(w_1x_1^r+w_2x_2^r+\cdots+w_nx_n^r)^{s/r}\le
w_1x_1^s+w_2x_2^s+\cdots+w_nx_n^s
$$
with equality if and only if $x_1=x_2=\cdots=x_n$. Since $s$ is
positive, the function $x\mapsto x^{1/s}$ is strictly increasing, so
raising both sides to the power $1/s$ preserves the inequality:
$$
(w_1x_1^r+w_2x_2^r+\cdots+w_nx_n^r)^{1/r}\le
(w_1x_1^s+w_2x_2^s+\cdots+w_nx_n^s)^{1/s},
$$
which is the inequality we had to prove. Equality holds if and only
if all the $x_i$ are equal.

\bigskip
The cases $r<0<s$ and $r<s<0$ can be reduced to the case
$0<r<s$.\end{pf}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}\label{pro:Polya-AM-GM-proof} Complete the following steps (due to George P\'{o}lya)
in order to prove the AM-GM Inequality (Theorem
\ref{thm:AMGM-ineq}).
\begin{enumerate}
\item Prove that $\forall x\in\BBR , \  x \leq e^{x - 1}.$
\item Put $$A_k = \frac{na_k}{a_1 + a_2 + \cdots + a_n},$$ and $G_n =
a_1a_2\cdots a_n$. Prove that $$A_1A_2\cdots A_n =
\frac{n^nG_n}{(a_1 + a_2 + \cdots + a_n )^n},$$ and that
$$A_1 + A_2 + \cdots + A_n = n.$$
\item Deduce that
$$G_n \leq \left(\frac{a_1 + a_2 + \cdots +
a_n}{n}\right)^n.$$
\item Prove the AM-GM inequality by assembling the results above.
\end{enumerate}
\begin{answer}We have:
\begin{enumerate}
\item
Put $f:\BBR \rightarrow \BBR$, $f(x) = e^{x - 1} - x.$ Clearly $f(1)
= e^0 - 1 = 0$. Now,
$$f'(x) = e^{x - 1} - 1,$$
$$f''(x) = e^{x - 1}.$$
If $f'(x) = 0$ then $e^{x - 1} = 1$ implying that $x = 1$. Thus $f$
has a single minimum point at $x = 1$. Thus for all real numbers $x$
$$ 0 = f(1) \leq f(x) = e^{x - 1} - x,$$which gives the desired
result.
\item Easy Algebra!
\item Easy Algebra!
\item By the preceding results, we have
$$A_1 \leq \exp (A_1 - 1),$$
$$A_2 \leq \exp (A_2 - 1),$$
$$\vdots$$
$$A_n \leq \exp (A_n - 1).$$Since all the quantities involved are
positive, we may multiply all these inequalities together, to
obtain,
$$A_1A_2\cdots A_n \leq \exp (A_1 + A_2 + \cdots + A_n - n).$$In
view of the observations above, the preceding inequality is
equivalent to
$$\dfrac{n^nG_n}{(a_1 + a_2 + \cdots + a_n )^n} \leq \exp (n - n) = e^0 =
1.$$We deduce that
$$G_n \leq \left(\dfrac{a_1 + a_2 + \cdots +
a_n}{n}\right)^n,$$which is equivalent to
$$(a_1a_2\cdots a_n)^{1/n} \leq \dfrac{a_1 + a_2 + \cdots + a_n}{n}. $$
Now, for equality to occur, we need each of the inequalities $A_k
\leq \exp (A_k - 1)$ to hold. This occurs, in view of the preceding
lemma, if and only if $A_k = 1, \ \forall k$, which translates into
$a_1 = a_2 = \ldots = a_n$. This completes the proof.
\end{enumerate}
\end{answer}
\end{pro}
\end{multicols}

\section{Asymptotic Preponderance}
\begin{df}
Let $I\subseteqq \closR$ be an interval, and let $a\in I$. A
function $\alpha :I \rightarrow \BBR$ is said to be {\em
infinitesimal} as $x \rightarrow a$ if $\lim_{x\rightarrow a}
\alpha(x) = 0$. We say that $\alpha$ is {\em negligible} in relation
to $\beta$ as $x\rightarrow a$ or that $\beta$ is {\em preponderant}
in relation to $\alpha$ as $x\rightarrow a$, if $\forall \varepsilon
> 0, \exists \delta > 0$ such that
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{\alpha (x)} \leq \varepsilon\absval{\beta (x)}.  $$
We express the condition above with the notation $ \alpha (x) =
\smallo{x\to a}{\beta (x)}$ (read ``$\alpha$ of $x$ is small oh of
$\beta$ of $x$ as $x$ tends to $a$'').
\bigskip

Finally, we say that {\em $\alpha$ is Big Oh of $\beta$ around
$x=a$}---written $\alpha (x) = \bigo{x\to a}{\beta (x)}$, or $\alpha
(x)\ll{x\to a}{\beta (x)}$---if $\exists C>0$ and $\exists \delta>0$
such that $\forall x\in \loro{a-\delta}{a+\delta}$, $\absval{\alpha
(x)}\leq C\absval{\beta (x)}$.

\end{df}
\begin{rem}
Notice that $a$ above may be finite or $\pm \infty$. If $a$ is
understood, we prefer to write $\alpha (x)=\soo{\beta (x)}$ rather
$\alpha (x)=\smallo{x\to a}{\beta (x)}$. Also
$$\alpha = \smallo{x\to a}{\beta} \iff \lim _{x \rightarrow a} \dfrac{\alpha (x)}{\beta (x)} = 0 \quad \mathrm{and} \quad \beta (a) = 0 \implies \alpha (a) =0.$$

\end{rem}


\begin{exa}
$\sin :\BBR \rightarrow [-1; 1]$ is infinitesimal as $x \rightarrow
0$, since $\lim_{x\rightarrow 0} \sin x = 0$.
\end{exa}
\begin{exa}
$f:\BBR\setminus\{0\} \rightarrow \BBR, x \mapsto \dfrac{1}{x}$ is
infinitesimal as $x \rightarrow +\infty$, since $\lim_{x\rightarrow
+\infty} \dfrac{1}{x} = 0$.
\end{exa}
\begin{exa}
 We have $x^2 = \soo{x}$ as $x\rightarrow 0$ since $$\lim _{x\rightarrow 0} \dfrac{x^2}{x} = \lim _{x\rightarrow 0} x =
 0.$$
\end{exa}
\begin{exa}
 We have $x = \soo{x^2}$ as $x\rightarrow +\infty$ since $$\lim _{x\rightarrow +\infty} \dfrac{x}{x^2} =
 \lim _{x\rightarrow +\infty} \dfrac{1}{x} =
 0.$$
\end{exa}
\begin{df} We write $ \alpha (x) =  \gamma (x) + \soo{\beta(x)}$ as $x
\rightarrow a$ if $ \alpha (x) -  \gamma (x) = \soo{\beta(x)}$ as $x
\rightarrow a$. Similarly,  $ \alpha (x) =  \gamma (x) +
\boo{\beta(x)}$ as $x \rightarrow a$ means that  $ \alpha (x) -
\gamma (x) = \boo{\beta(x)}$ as $x \rightarrow a$.
\end{df}
\begin{exa}
 We have $\sin x = x +  \soo{x}$ as $x \rightarrow
0$ since
 $$\lim _{x\rightarrow 0} \dfrac{\sin x - x}{x} = \lim _{x\rightarrow 0} \dfrac{\sin x}{x } -  \lim _{x\rightarrow 0} 1
 = 1 - 1 =
 0.$$
\end{exa}

\begin{thm}\label{thm:properties-soo-boo}
Let $f, g, \alpha , \beta, u, v$ be real-valued functions defined on
an interval containing $a\in \closR$. Let $\lambda \in \BBR$ be a
constant. Let $h$ be a real valued function defined on an interval
containing $b\in \closR$. Then
\begin{enumerate}
\item $f=\soo{g} \implies f=\boo{g}$.
\item $f=\soo{\alpha} \implies \lambda f=\soo{\alpha}$.
\item $f=\soo{\alpha}, g=\soo{\alpha} \implies f+g=\soo{\alpha}$.
\item $f=\soo{\alpha}, g=\soo{\beta} \implies fg=\soo{\alpha\beta}$.
\item $f=\boo{\alpha} \implies \lambda f=\boo{\alpha}$.
\item $f=\boo{\alpha}, g=\boo{\alpha} \implies f+g=\boo{\alpha}$.

\item $f=\boo{\alpha}, g=\boo{\beta} \implies fg=\boo{\alpha\beta}$.
\item $f=\boo{\alpha}, g=\soo{\beta} \implies fg=\soo{\alpha\beta}$.
\item $f=\boo{\alpha}, \alpha=\boo{\beta} \implies f=\boo{\beta}$.
\item $f=\soo{\alpha}, \alpha=\boo{\beta} \implies f=\soo{\beta}$.
\item $f=\boo{\alpha}, \alpha=\soo{\beta} \implies f=\soo{\beta}$.
\item $f=\soo{\alpha}, \lim _{x\to b}h(x) = a \implies f\circ h=\smallo{x\to b}{\alpha \circ h}$.
\item $f=\boo{\alpha}, \lim _{x\to b}h(x) = a \implies f\circ h=\bigo{x\to b}{\alpha \circ h}$.

\end{enumerate}
\end{thm}
\begin{pf}
These statements follow directly from the definitions.
\begin{enumerate}
\item  If $f=\soo{g}$ then $\forall \varepsilon >0$  there exists $\delta > 0$ such that $$x\in \loro{a-\delta}{a+\delta}\implies \absval{\dfrac{f(x)}{g(x)}-0}<\varepsilon \implies
\absval{f(x)}<\varepsilon \absval{g(x)} \implies f=\boo{g},$$using
$C=\varepsilon$ in the definition of Big Oh.
\item This follows by Theorem \ref{thm:algebra-of-fun-limits}.
\item  This follows by Theorem \ref{thm:algebra-of-fun-limits}.
\item Both $\lim _{x\to a}\dfrac{f(x)}{\alpha (x)} = 0$ and $\lim _{x\to a}\dfrac{g(x)}{\beta (x)} = 0$. Hence
$\lim _{x\to a}\dfrac{f(x)g(x)}{\alpha (x)\beta (x)}=\lim _{x\to
a}\dfrac{f(x)}{\alpha (x)} \cdot \lim _{x\to a}\dfrac{g(x)}{\beta
(x)} =0 \implies fg=\soo{\alpha\beta}$.
\item If $f=\boo{\alpha}$ then there is $\delta > 0$ and $C>0$
such that $$x\in \loro{a-\delta}{a+\delta} \implies
\absval{f(x)}\leq C\absval{g(x)}\implies \absval{\lambda f(x)}\leq
C\absval{\lambda}\cdot \absval{g(x)} \implies\lambda
f=\boo{\alpha}$$
\item There exists $\delta _1>0, \delta _2 > 0$ and $C_1>0$, $C_2>0$ such that
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq C_1\absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{a-\delta _2}{a+\delta _2} \implies \absval{g(x)} \leq
C_2\absval{\alpha (x)}  .$$Thus if $\delta = \min (\delta _1, \delta
_2)$,
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{f(x)+g(x)}\leq \absval{f(x)}+\absval{g(x)} \leq C_1\alpha
(x) +C_2\alpha (x) = (C_1+C_2)\alpha (x)\implies f+g=\boo{\alpha}.$$

\item There exists $\delta _1>0, \delta _2 > 0$ and $C_1>0$, $C_2>0$ such that
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq C_1\absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{a-\delta _2}{a+\delta _2} \implies \absval{g(x)} \leq
C_2\absval{\beta (x)}  .$$Thus if $\delta = \min (\delta _1, \delta
_2)$,
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{f(x)g(x)} =\absval{f(x)}\absval{g(x)} \leq C_1\absval{\alpha
(x)}\cdot C_2\absval{\beta (x)} = (C_1C_2)\absval{\alpha (x)\beta
(x)}\implies fg=\boo{\alpha\beta}.$$
\item  There exists $\delta _1>0, \delta _2 > 0$ and $C_1>0$,  such
that $\forall \varepsilon > 0$
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq C_1\absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{a-\delta _2}{a+\delta _2} \implies \absval{g(x)} \leq
\varepsilon\absval{\beta (x)}  .$$Thus if $\delta = \min (\delta _1,
\delta _2)$,
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{f(x)g(x)} =\absval{f(x)}\absval{g(x)} \leq C_1\absval{\alpha
(x)}\cdot \varepsilon\absval{\beta (x)} = \varepsilon
(C_1)\absval{\alpha (x)\beta (x)}\implies fg=\soo{\alpha\beta}.$$
\item There exists $\delta _1>0, \delta _2 > 0$ and $C_1>0$, $C_2>0$ such that
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq C_1\absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{a-\delta _2}{a+\delta _2} \implies \absval{\alpha (x)}
\leq C_2\absval{\beta (x)}  .$$Thus if $\delta = \min (\delta _1,
\delta _2)$,
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{f(x)}\leq C_1\absval{\alpha (x)} \leq C_1C_2\absval{\beta (x)} \implies
f=\boo{\beta}.$$
\item  There exists $\delta _1>0, \delta _2 > 0$ and $C>0$,  such
that $\forall \varepsilon > 0$
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq \varepsilon\absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{a-\delta _2}{a+\delta _2} \implies \absval{\alpha (x)}
\leq C\absval{\beta (x)}  .$$Thus if $\delta = \min (\delta _1,
\delta _2)$,
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{f(x)}\leq \varepsilon\absval{\alpha (x)} \leq C\varepsilon\absval{\beta (x)} \implies
f=\soo{\beta}.$$
\item  There exists $\delta _1>0, \delta _2 > 0$ and $C>0$,  such
that $\forall \varepsilon > 0$
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq C\absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{a-\delta _2}{a+\delta _2} \implies \absval{\alpha (x)}
\leq \varepsilon\absval{\beta (x)}  .$$Thus if $\delta = \min
(\delta _1, \delta _2)$,
$$x\in \loro{a-\delta}{a+\delta} \implies \absval{f(x)}\leq C\absval{\alpha (x)} \leq C\varepsilon\absval{\beta (x)} \implies
f=\soo{\beta}.$$


\item  There exists $\delta _1>0, \delta _2 > 0$  such
that $\forall \varepsilon > 0$
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq \varepsilon \absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{b-\delta _2}{b+\delta _2} \implies \absval{h(x)-a} \leq
\varepsilon \implies h(x) \in \loro{a-\varepsilon}{a+\varepsilon}
.$$Thus if $\delta = \min (\delta _1, \delta _2, \varepsilon)$,
$$x\in \loro{b-\delta}{b+\delta} \implies \absval{(f\circ h)(x)}\leq \varepsilon\absval{(\alpha\circ h) (x)}  \implies
f\circ h=\smallo{x\to b}{\alpha \circ h}.$$







\item There exists $\delta _1>0, \delta _2 > 0, C>0$  such
that $\forall \varepsilon > 0$
$$ x\in \loro{a-\delta _1}{a+\delta _1} \implies \absval{f(x)} \leq C \absval{\alpha (x)} \qquad \mathrm{and} \qquad
x\in \loro{b-\delta _2}{b+\delta _2} \implies \absval{h(x)-a} \leq
\varepsilon \implies h(x) \in \loro{a-\varepsilon}{a+\varepsilon}
.$$Thus if $\delta = \min (\delta _1, \delta _2, \varepsilon)$,
$$x\in \loro{b-\delta}{b+\delta} \implies \absval{(f\circ h)(x)}\leq C\absval{(\alpha\circ h) (x)}  \implies
f\circ h=\bigo{x\to b}{\alpha \circ h}.$$

\end{enumerate}
\end{pf}
\begin{rem}
In the above theorem, (8), (10), and (11) essentially say that
$\boo{\mathit{o}} =\soo{\mathit{O}}=\soo{\mathit{o}} = \mathit{o} $
and (9) says that $\boo{\mathit{O}}=\mathit{O}$.
\end{rem}


The following corollary is immediate.
\begin{cor}
Let $\alpha$ and $\beta$ be infinitesimal functions as $x\rightarrow
a$. Then the following hold.
\begin{enumerate}
\item The sum of two infinitesimals is an infinitesimal:
$$\soo{\beta(x)} + \soo{\beta(x)} = \soo{\beta(x)}. $$
\item The difference of two infinitesimals is an infinitesimal:
$$\soo{\beta(x)} - \soo{\beta(x)} = \soo{\beta(x)}. $$
\item $\forall c\in \BBR\setminus \{0\}, \soo{c\beta(x)}  =
\soo{\beta(x)}. $ \item $\forall n\in\BBN, n \geq 2, 1 \leq k \leq n
- 1, \ \soo{(\beta(x))^n}  = \soo{(\beta(x))^k}$.
\item $\soo{\soo{\beta(x)}} = \soo{\beta(x)}$.
\item $\forall n\in\BBN, n \geq 1, \
(\beta(x))^n\soo{\beta(x)} = \soo{(\beta(x))^{n + 1}}$. \item
$\forall n\in\BBN, n \geq 2, \dfrac{\soo{(\beta(x))^n}}{\beta(x)} =
\soo{(\beta(x))^{n - 1}}$. \item $\dfrac{\soo{\beta(x)}}{\beta(x)} =
\soo{1}$. \item If $c_k$ are real numbers, then $\soo{\sum_{k = 1}
^n c_k(\beta(x))^k} = \soo{\beta(x)}$. \item $(\alpha\beta)(x) =
\soo{\alpha(x)}$ and $(\alpha\beta)(x) = \soo{\beta(x)}$.
\item If $\alpha \sim \beta,$ then $(\alpha - \beta)(x) =
\soo{\alpha(x)}$ and $(\alpha - \beta)(x) = \soo{\beta(x)}$.
\end{enumerate}
\label{cor:small-oh-properties}
\end{cor}
\begin{thm}[Canonical small oh Relations] The following
relationships hold
\begin{enumerate}
\item $\forall (\alpha , \beta)\in \BBR^2$, $x^\alpha = \smallo{x\to +\infty}{x^\beta} \iff \alpha <
\beta$.
\item $\forall (\alpha , \beta)\in \BBR^2$, $x^\alpha = \smallo{x\to 0+}{x^\beta} \iff \alpha
>
\beta$.
\item $\log x = \smallo{x\to +\infty}{x}$.
\item $\forall (\alpha , \beta)\in \BBR^2, \beta > 0$,  $(\log x)^\alpha = \smallo{x\to +\infty}{x^\beta}$.
\item $\forall (\alpha , \beta)\in \BBR^2, \beta < 0$,  $\absval{\log x}^\alpha = \smallo{x\to 0+}{x^\beta}$.
\item  $\forall (\alpha , a)\in \BBR^2, a >1$, $x^\alpha = \smallo{x\to +\infty}{a^x}$
\item  $\forall (\alpha , a)\in \BBR^2, a >1$, $a^x = \smallo{x\to -\infty}{\absval{x}^\alpha}$

\end{enumerate}
\label{thm:canonical-oh-relations}
\end{thm}
\begin{pf}
\begin{enumerate}
\item Immediate.
\item Immediate.
\item This follows from Lemma \ref{lem:limit-logx/x}.

\item If $\alpha = 0$ then eventually $(\log x)^\alpha =1$ and so the assertion is immediate. If $\alpha
< 0$ the assertion is also immediate, since then $(\log x)^\alpha
\to 0$ as $x\to +\infty$.  If $\alpha > 0$, by Theorem
\ref{thm:limit-logx/xa}, $$ \dfrac{\log x}{x^{\beta /\alpha}} \to
0,$$ whence $$ \dfrac{(\log x)^\alpha}{x^\beta} = \left( \dfrac{\log
x}{x^{\beta /\alpha}} \right)^\alpha \to 0^\alpha = 0. $$
\item  If $x\to 0+$ then
$\dfrac{1}{x}\to +\infty$. Hence by the preceding part and by
continuity, as $x\to 0+$ and for $\gamma > 0$,
$$ \dfrac{\left(\absval{\log \dfrac{1}{x}}\right)^\alpha}{\left(\dfrac{1}{x}\right)^\gamma} \to 0.   $$
But $$\dfrac{\left(\absval{\log
\dfrac{1}{x}}\right)^\alpha}{\left(\dfrac{1}{x}\right)^\gamma} =
\dfrac{\left(\absval{-\log
x}\right)^\alpha}{\left(\dfrac{1}{x}\right)^\gamma} = x^\gamma
\absval{\log x}^\alpha,
$$and so $
\absval{\log x}^\alpha = \smallo{x\to 0+}{x^{-\gamma}}$, and so
putting $\beta = -\gamma < 0$ we have $ \absval{\log x}^\alpha=
\smallo{x\to 0+}{x^{\beta}}$.

\item   For $\alpha < 1$ we have
$$\dfrac{x^\alpha}{a^x} =\dfrac{x\log a}{\exp \left(x\log a\right)}\cdot \dfrac{x^{\alpha-1}}{\log a}\to 0 \cdot 0,  $$
since $\dfrac{x\log a}{\exp \left(x\log a\right)} \to 0$ by
continuity and Theorem \ref{thm:limit-xa/exp(x)}, and
$\dfrac{x^{\alpha-1}}{\log a} \to 0$ since $\alpha-1<0$. If $\alpha
> 1$ then
$$ \dfrac{x^\alpha}{a^x} = \left(\dfrac{x}{\left(a^{1/\alpha}\right)^x}\right)^\alpha
=\dfrac{\alpha ^\alpha}{(\log a)^\alpha } \cdot
\left(\dfrac{x\dfrac{\log a}{\alpha}}{\exp \left( x\dfrac{\log
a}{\alpha}\right)}\right)^\alpha \to \dfrac{\alpha ^\alpha}{(\log
a)^\alpha } \cdot 0^\alpha = 0,
$$by continuity and Theorem \ref{thm:limit-xa/exp(x)}.

\item If $\alpha > 0$, $a>1$ then $\absval{x}^\alpha \to +\infty$ but $a^x\to
0$ as $x\to -\infty$, hence there is nothing to prove. If $\alpha
=0$, again the result is obvious. Assume $\alpha < 0$. If $x\to
-\infty$ then $-x\to +\infty$ and so by the preceding part $$
\dfrac{\absval{x}^{-\alpha}}{a^{-x}} \to 0$$since the above result
is valid regardless of the sign of $\alpha$. Now
$$
\dfrac{a^x}{\absval{x}^\alpha}=\dfrac{\absval{x}^{-\alpha}}{a^{-x}},$$proving
the result.
\end{enumerate}
\end{pf}
\begin{exa}
In view of Corollary \ref{cor:small-oh-properties} and Theorem
\ref{thm:canonical-oh-relations}, we have
$$\soo{-2x^3 + 8x^2} = \soo{x},$$as $x\rightarrow 0.$
\end{exa}

\begin{exa}
In view of Corollary \ref{cor:small-oh-properties} and Theorem
\ref{thm:canonical-oh-relations}, we have
$$\soo{-2x^3 + 8x^2} = \soo{x^4},$$as $x\rightarrow +\infty.$
\end{exa}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Which one is faster as $x\to +\infty$, $(\log\log x)^{\log x}$ or
$(\log x)^{\log\log x}$?
\begin{answer}
$(\log\log x)^{\log x} = \exp ((\log x)(\log\log\log x)) $ and
$(\log x)^{\log\log x} = \exp ((\log\log x)^2)$. Now,
lexicographically,
$$ (\log\log x)^2 <<(\log x)(\log\log\log x) \implies \exp((\log\log x)^2) <<\exp ((\log x)(\log\log\log x))$$
and thus $(\log\log x)^{\log x}$ is faster.
\end{answer}
\end{pro}
\end{multicols}


\section{Asymptotic Equivalence}
\begin{df}
Let $I\subseteqq \closR$ be an interval, and let $a\in I$. We say
that $\alpha$ is {\em asymptotic} to a function $\beta:I \rightarrow
\BBR$ as $x\rightarrow a$, and we write $\alpha\sim \beta$, if
$\alpha \sim \beta \iff \alpha - \beta = \smallo{a}{\beta}$.
\end{df}
\begin{rem}
If in a neighbourhood $\N{a}$ of $a$ $\beta \neq 0$ then $$ \alpha
\sim \beta \iff \left\{\begin{array}{l} \dfrac{\alpha}{\beta}\sim 1
\\ \beta (a)= 0 \implies \alpha (a) = 0\end{array}\right.
$$

\end{rem}
\begin{exa}
We have $\sin x \sim x$ as $x \rightarrow 0$, since
$\lim_{x\rightarrow 0} \dfrac{\sin x}{x} = 1$.
\end{exa}
\begin{exa}
We have $x^2 + x \sim x$ as $x \rightarrow 0$, since
$\lim_{x\rightarrow 0} \dfrac{x^2 + x}{x} = 1$.
\end{exa}
\begin{exa}
We have $x^2 + x \sim x^2$ as $x \rightarrow +\infty$, since
$\lim_{x\rightarrow +\infty} \dfrac{x^2 + x}{x^2} = 1$.
\end{exa}


\begin{thm}\label{thm:asymptotic-means-big-oh}
$$\alpha \sim \beta \implies \left\{\begin{array}{l} \alpha = \boo{\beta} \\ \beta = \boo{\alpha}\end{array}\right.  $$
\end{thm}
\begin{pf}
If $\alpha - \beta = \soo{\beta}$ there is a neighbourhood $\N{a}$
of $a$ such that $$\forall \varepsilon > 0, x\in\N{a} \implies
\absval{\alpha (x)-\beta (x)} \leq \varepsilon \absval{\beta (x)}.
$$In particular, for $\varepsilon = \dfrac{1}{2}$, we have
$$x\in\N{a} \implies
\absval{\alpha (x)-\beta (x)} \leq \dfrac{1}{2} \absval{\beta (x)}.
$$Hence
$$ x\in\N{a} \implies \absval{\alpha (x)} = \absval{\alpha (x)-\beta (x)+\beta
(x)} \leq  \absval{\alpha (x)-\beta (x)} + \absval{\beta (x)} \leq
\dfrac{3}{2} \absval{\beta (x)} \implies \alpha = \boo{\beta},
$$and
$$ x\in\N{a} \implies \absval{\beta (x)} = \absval{\beta (x)-\alpha
(x)+\alpha (x)} \leq  \absval{\beta (x)-\alpha (x)} + \absval{\alpha
(x)} \leq \dfrac{1}{2} \absval{\beta (x)} + \absval{\alpha (x)}
\implies \absval{\beta (x)} \leq 2\absval{\alpha (x) } \implies
\beta = \boo{\alpha}  .
$$

\end{pf}

\begin{thm}
The relation of asymptotic equivalence $\sim$ is an equivalence
relation on the set of functions defined on a neighbourhood of $a$.
\end{thm}

\begin{pf}
We have
\begin{enumerate}
\item[{\bf Reflexivity}] $\alpha - \alpha = 0 = \soo{\alpha}$.
\item[{\bf Symmetry}] $\alpha - \beta = \soo{\beta} \implies \beta =
\boo{\alpha}$ by Theorem \ref{thm:asymptotic-means-big-oh}. Now by
(10) of Theorem \ref{thm:properties-soo-boo},
$$\alpha - \beta = \soo{\beta} \quad \mathrm{and}  \beta =
\boo{\alpha} \implies \alpha - \beta = \soo{\alpha} \implies \beta
-\alpha = \soo{\alpha},
$$whence $\beta \sim \alpha$.
\item[{\bf Transitivity}] Assume $\alpha -\beta = \soo{\beta}$ and $\beta - \gamma =
\soo{\gamma}$. Then by Theorem \ref{thm:asymptotic-means-big-oh} we
also have $\beta = \boo{\gamma}$. Hence $\alpha - \beta =
\soo{\gamma}$ by (10) of Theorem \ref{thm:properties-soo-boo}.
Finally $\alpha - \beta = \soo{\gamma}$ and  $\beta - \gamma =
\soo{\gamma}$ give  $\alpha - \gamma = \soo{\gamma}$ by (3) of
Theorem \ref{thm:properties-soo-boo}.

\end{enumerate}
\end{pf}

The relationship between $\mathit{o}, \mathit{O},$ and $\sim$ is
displayed in figure \ref{fig:O-relations2}. \vspace{2cm}
\begin{figure}[h]
$$\psset{unit=1pc} \pscircle(-2,0){4}\pscircle(2,0){4}
\pscircle(0,0){1}\rput(0,0){f\sim g}
\pscircle(-3.8,-1){1.7}\rput(-3.8,-1){f=\soo{g}}
\pscircle(3.8,-1){1.7}\rput(3.8,-1){g=\soo{f}}
\rput(-3.3,2.5){f=\boo{g}}\rput(3.3,2.5){g=\boo{f}}
$$\vspace{1cm}\footnotesize\hangcaption{Diagram of Big Oh relations.} \label{fig:O-relations2}
\end{figure}

\begin{thm}
The relation of asymptotic equivalence $\sim$ possesses the
following properties.
\begin{enumerate}
\item $ \left\{\begin{array}{l} \alpha  \sim {\beta} \\ \gamma  \sim {\delta}\end{array}\right. \implies \alpha \gamma \sim
\beta\delta$.
\item  $\left\{\begin{array}{l} \alpha  \sim {\beta} \\ n\in \BBN \setminus
\{0\}\end{array}\right. \implies \alpha ^n \sim \beta ^n$ \item if
$\alpha \sim \beta$ and if there is a neighbourhood $\N{a}$ of $a$
where $\forall x\in \N{a}\setminus \{a\}, \beta (a)\neq 0$, then
$\dfrac{1}{\alpha}$ and $\dfrac{1}{\beta}$ are defined on
$\N{a}\setminus \{a\}$ and $\dfrac{1}{\alpha} \asympto{a}
\dfrac{1}{\beta}$.


\item $\left\{\begin{array}{l} \alpha = \soo{\beta} \\ \beta \sim
\gamma \end{array}\right. \implies \alpha =  \soo{\gamma}$.
\item $\left\{\begin{array}{l} \alpha \sim \beta \\ \beta =\soo{\gamma}
\end{array}\right. \implies \alpha  =\soo{\gamma}$.
\item if
$\alpha \sim \beta$ and if there is a neighbourhood $\N{a}$ of $a$
where $\forall x\in \N{a}\setminus \{a\}, \beta (a) > 0$, and if
$r\in \BBR$ then  $\alpha ^r \asympto{a} \beta ^r$.
\item {\bf (Dextral Composition)} If $\alpha \asympto{a} \beta$ and
if $\lim _{x\to b} \gamma (x)=a$, then $\alpha \circ \gamma
\asympto{a} \beta \circ \gamma$.

\end{enumerate}
\end{thm}
\begin{pf}
We prove the assertions in the given order.
\begin{enumerate}
\item Since $\alpha - \beta = \soo{\beta}$ and $\gamma - \delta = \soo{\delta}$ then $\alpha = \boo{\beta}$, and so
 $$\alpha \gamma - \beta \delta = \alpha (\gamma - \delta) - \delta (\beta -
\alpha) = \boo{\beta} \soo{\delta} -\delta
\soo{\beta}=\soo{\beta\delta}.$$
\item  This follows upon applying the preceding product rule $n-1$ times, using $\gamma = \alpha$ and $\delta =\beta$. \item
Observe that $$\dfrac{1}{\alpha} - \dfrac{1}{\beta} = \dfrac{\beta -
\alpha}{\alpha \beta} =\dfrac{\soo{\alpha}}{\alpha \beta} =
\soo{\dfrac{1}{\beta}},
$$upon using $\beta - \alpha = \soo{\alpha}$ and (8) of Corollary
\ref{cor:small-oh-properties}.


\item We have $\alpha = \soo{\beta}$ and $\beta - \gamma =
\soo{\gamma}$. This last implies that $\beta = \boo{\gamma}$ by
Theorem \ref{thm:asymptotic-means-big-oh}. Hence
$$\alpha = \soo{\beta} = \soo{\boo{\gamma}} = \soo{\gamma}.  $$
\item We have $\alpha  - \beta = \soo{\beta}$ and $\beta  =
\soo{\gamma}$. This last implies that $\alpha = \boo{\beta}$ by
Theorem \ref{thm:asymptotic-means-big-oh}. Hence
$$\alpha = \boo{\beta} = \boo{\soo{\gamma}} = \soo{\gamma}.  $$

\item Since $\beta$ is eventually strictly positive, so is $\alpha$.
Hence $\alpha \sim \beta \iff \dfrac{\alpha}{\beta}(x)\to 1$ as
$x\to a$. Since the function $x\mapsto x^r$ is continuous in
$\loro{0}{+\infty}$, $$\dfrac{\alpha}{\beta}(x)\to 1 \implies
\dfrac{\alpha ^r}{\beta ^r}(x)\to 1 \implies \alpha ^r \sim \beta
^r.$$

\item We have $\dfrac{\alpha (x)-\beta (x)}{\beta (x)}\to 0$ as $x\to
a$. Now if $\gamma (x)\to a$ as $x\to b$ then as $x\to b$,
$$\dfrac{\alpha (\gamma(x))-\beta (\gamma (x))}{\beta (\gamma (x))}\to 0.  $$
\end{enumerate}

\end{pf}
\begin{thm}[Exponential Composition] $\exp (\alpha) \asympto{a} \exp (\beta)\iff \alpha-\beta \asympto{a}
0$.
\end{thm}
\begin{pf}
We have $$\begin{array}{lll}\exp (\alpha) \asympto{a} \exp (\beta) &
\iff & \exp (\alpha) - \exp (\beta) =\soo{\exp (\beta)} \\
& \iff &  \left(\exp (-\beta)\right)\left(\exp (\alpha) - \exp
(\beta)\right) =  \left(\exp (-\beta)\right)\soo{\exp (\beta)} \\
& \iff & \exp (\alpha - \beta)  - 1 = \soo{1}\\
& \iff & \alpha - \beta = \soo{0}.
\end{array}$$
\end{pf}
\begin{rem}
The above theorem {\em does not say} that $\alpha \sim \beta
\implies \exp (\alpha)\sim \exp (\beta)$. That this last assertion
is false can be seen from the following counterexample: $x+1\sim x$
as $x\to 0$, but $\exp (x+1) = e\exp (x)$ is not asymptotic to $\exp
(x)$.
\end{rem}

\begin{thm}[Logarithmic Composition] Suppose  there is a neighbourhood of $a$ $\N{a}$ such that

$\forall x\in \N{a}\setminus \{a\}, \beta (x)>0 $. Suppose,
moreover, that $\alpha \asympto{a} \beta$ and that $\lim _{x\to a}
\beta (x)=l$ with $l\in \lcrc{0}{+\infty}\setminus \{1\}$. Then
$\log \circ \alpha \asympto{a}\log \circ \beta$.
\end{thm}
\begin{pf}
Either $l\in  \loro{0}{+\infty}\setminus \{1\} $ or $l=+\infty$ or
$l=0$.
\bigskip

In the first case, $\log \alpha(x)\to \log l$ and $\log \beta (x)
\to \log l$ as $x\to a$ hence
$$\log \alpha \sim \log l \sim \log \beta, \qquad \mathrm{as}\ x\to a. $$

\bigskip

In the second case $\beta (x)>1$  eventually, and thus $\log \beta
(x) \neq 0$. Hence
$$ \dfrac{\log \alpha (x)}{\log \beta (x)}-1 =   \dfrac{\log \alpha (x) - \log \beta (x)}{\log \beta (x)} =
 \dfrac{\log \dfrac{\alpha (x)}{\beta (x)} }{\log \beta (x)} \to \dfrac{\log 1}{+\infty} = 0, $$
since $\dfrac{\alpha (x)}{\beta (x)} \to 1$ and $\log \beta (x) \to
+\infty$ as $x\to +\infty$.

\bigskip

The third case becomes the second case upon considering
$\dfrac{1}{\alpha}$ and $\dfrac{1}{\beta}$.
\end{pf}
\begin{thm}[Addition of Positive Terms]
If $\alpha \sim \beta$ and $\gamma \sim \delta$ and there exists a
neighbourhood of $a$ $\N{a}$ such that $\forall x\in \N{a}\setminus
\{a\}, \beta (x)>0, \delta (x) >0$ then $$\alpha + \gamma \sim \beta
+ \delta .
$$
\end{thm}
\begin{pf}
We have $\alpha - \beta = \soo{\beta}$ and $\gamma - \delta =
\soo{\delta}$. Hence
 $$\begin{array}{lll} (\alpha +\gamma) - (\beta + \delta) & = & (\alpha -
 \beta) + (\gamma - \delta) \\
 & = & \soo{\beta} + \soo{\delta}\\
 & = & \soo{\beta + \delta},
\end{array}$$which means $\alpha + \gamma \sim \beta
+ \delta$.
\end{pf}
\begin{thm}
The following asymptotic expansions hold as $x\to 0:$
\begin{enumerate}
\item \label{property:first}$\exp (x)-1 \sim x$ and thus $\exp (x)=1+  x+ \soo{x}$
\item $\log (1+x) \sim x$ and thus $\log (1+x)=x+ \soo{x}$
\item $\sin x \sim x$ and thus $\sin (x)=x+ \soo{x}$
\item $\tan x \sim x$ and thus $\tan (x)= x+ \soo{x}$
\item $\arcsin x \sim x $ and thus $\arcsin (x)= x+ \soo{x}$
\item $\arctan x \sim x $ and thus $\tan (x)= x+ \soo{x}$
\item \label{property:penultimate} for $\alpha \in \BBR$ constant, $(1+x)^\alpha -1 \sim \alpha x
$ and thus $(1+x)^\alpha =1+  \alpha x+ \soo{x}$
\item \label{property:last} $1-\cos x \sim \dfrac{x^2}{2} $ and thus $\cos (x)=1-  \dfrac{x^2}{2}+ \soo{x^2}$
\end{enumerate}
\label{thm:baby-asymp-dev}\end{thm}
\begin{pf}
Results \ref{property:first}---\ref{property:penultimate} follow
from the fact that
$$f'(a) \neq 0, \quad \dfrac{f(x)-f(a)}{x-a} \to
f'(a) \implies f(x)-f(a)\sim f'(a)(x-a).$$Property
\ref{property:last} follows from the identity $1-\cos x =2\sin
^2\dfrac{x}{2}$.
\end{pf}
\begin{exa}
Since $\tan x = x + \soo{x}$, we have
$$\tan \dfrac{x^2}{2} =  \dfrac{x^2}{2} + \soo{\dfrac{x^2}{2}} = \dfrac{x^2}{2} + \soo{x^2},
$$as $x\rightarrow 0.$
Also,
$$(\tan x)^{3} = (x + \soo{x})^3 = x^3 + 3x^2\soo{x} +  3x\soo{x^2} + (\soo{x})^3 = x^3 + \soo{x^3}.$$
\end{exa}
\begin{exa}
Since $\cos x = 1  - \dfrac{x^2}{2} +  \soo{x^2}$, we have
$$\cos 3x^2 = 1  - \dfrac{9x^4}{2} +  \soo{x^4}.$$
\end{exa}
\begin{exa}
Find an asymptotic expansion of $\cot^2x$ of type $\soo{x^{-2}}$ as
$x\rightarrow 0$. \label{exa:cotangent}\end{exa}
\begin{solu} Since
$\tan x \sim x$ we have
$$\cot^2x \sim \dfrac{1}{x^2}.$$We can write this as $\cot^2x =
\dfrac{1}{x^2} + \soo{\dfrac{1}{x^2}}$.
\end{solu}
\begin{exa}
Calculate $$\lim_{x\rightarrow 0} \dfrac{\sin\sin\tan
\dfrac{x^2}{2}}{\log \cos 3x}.$$
\end{exa}
\begin{solu} We use theorems \ref{thm:baby-asymp-dev} and
\ref{cor:small-oh-properties}.
$$\begin{array}{lll}
\sin\sin\tan \dfrac{x^2}{2} & = & \sin\sin\left(\dfrac{x^2}{2} + \soo{x^2}\right) \\
& = & \sin\left(\dfrac{x^2}{2} + \soo{x^2} +
\soo{\dfrac{x^2}{2} + \soo{x^2}}\right) \\
& = & \sin \left(\dfrac{x^2}{2} + \soo{x^2}\right) \\
& = & \dfrac{x^2}{2} + \soo{x^2},
\end{array}$$and
$$\begin{array}{ccc}
\log \cos 3x & = & \log \left(1 - \dfrac{9x^2}{2} +
\soo{x^2}\right) \\
& = & - \dfrac{9x^2}{2} + \soo{x^2} + \soo{-
\dfrac{9x^2}{2!} + \soo{x^2}} \\
& = & - \dfrac{9x^2}{2} + \soo{x^2} \\
\end{array}$$The limit is thus equal to
$$\lim_{x\rightarrow 0} \dfrac{\dfrac{x^2}{2} + \soo{x^2}}{- \dfrac{9x^2}{2} +
\soo{x^2}} = \lim_{x\rightarrow 0} \dfrac{\dfrac{1}{2} + \soo{1}}{-
\dfrac{9}{2} + \soo{1}} = -\dfrac{1}{9}.$$
\end{solu}
\begin{exa} Find $\lim_{x\rightarrow 0} (\cos x)^{(\cot^2x)}$.
\end{exa}
\begin{solu} By example \ref{exa:cotangent}, we have $\cot^2x =
\dfrac{1}{x^2} + \soo{\dfrac{1}{x^2}}.$ Also, $$\log \cos x = \log
\left(1 - \dfrac{x^2}{2} + \soo{x^2}\right) = - \dfrac{x^2}{2} +
\soo{x^2}.
$$ Hence
$$\begin{array}{ccc}(\cos x)^{\cot^2x}&  =  &
\exp\left((\cot^2x)\log \cos x\right) \\
& = & \exp\left(\left(\dfrac{1}{x^2} +
\soo{\dfrac{1}{x^2}}\right)\left(-
\dfrac{x^2}{2} + \soo{x^2}\right)\right)\\
& = & \exp(-\dfrac{1}{2} + \soo{1}) \\
& \rightarrow & e^{-1/2}, \end{array}$$as $x\rightarrow 0.$

\end{solu}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Prove that $\dfrac{\log  (1+2\tan x)}{\sin x} \to 2$ as $x\to 0$.
\end{pro}
\begin{pro}
Prove that $\left(1+\dfrac{1}{x}\right)^x \to e$ as $x\to +\infty$.
\end{pro}
\begin{pro}
Prove that $(\tan x)^{\cot 4x} \to e^{1/2}$ as $x\to
\dfrac{\pi}{4}$.
\end{pro}
\end{multicols}

\section{Asymptotic Expansions}
\begin{df}
Let $n\in \BBN$ and let $f:\N{0}\to \BBR$ where  $\N{0}$ is a
neighbourhood of  $0$. We say that $f$ admits an {\em asymptotic
expansion} of order $n$ about $x=0$ if there exists a polynomial $p$
of degree $n$ such that
$$ \forall x\in\N{0}, \quad f(x)=p(x)+\smallo{0}{x^n}. $$The
polynomial $p$ is called the {\em regular part of the asymptotic
expansion about $x=0$ of $f$}.
\end{df}
\begin{thm}
If $f$ admits an asymptotic expansion about $0$, its regular part is
unique.
\end{thm}
\begin{pf}
Assume $ f(x)=p(x)+\smallo{0}{x^n}$ and $
f(x)=q(x)+\smallo{0}{x^n}$, where $p(x) =p_nx^n+\cdots + p_1x+p_0$
and $q(x) =q_nx^n+\cdots + q_1x+q_0$ are polynomials of degree $n$.
If $p\neq q$ let $k$ be the largest $k$ for which $p_k\neq q_k$.
Then subtracting both equivalencies, as $x\to 0$,
$$p(x)-q(x)= \soo{x^n} \implies (p_n-q_n)x^n + (p_{n-1}-q_{n-1})x^{n-1}+\cdots + (p_1-q_1)x
=\soo{x^n} \implies  (p_k-q_k)x^k +\cdots + \cdots  =\soo{x^n} .
$$  But $(p_k-q_k)x^k +\cdots + \cdots  \boo{x^k} $ as $x\to 0$, a
contradiction, since $k \leq n$.
\end{pf}


\begin{df}
Let $n\in \BBN$, $a\in \BBR$, and let $f:\N{a}\to \BBR$ where
$\N{a}$ is a neighbourhood of  $a$. We say that $f$ admits an {\em
asymptotic expansion} of order $n$ about $x=a$ if there exists a
polynomial $p$ of degree $n$ such that
$$ \forall x\in\N{a}, \quad f(x)=p(x-a)+\smallo{a}{(x-a)^n}. $$The
polynomial $p$ is called the {\em regular part of the asymptotic
expansion about $x=a$ of $f$}.
\end{df}
\begin{df}
Let $n\in \BBN$, and let $f:\N{+\infty}\to \BBR$ where $\N{+\infty}$
is a neighbourhood of  $+\infty$. We say that $f$ admits an {\em
asymptotic expansion} of order $n$ about $+\infty$ if there exists a
polynomial $p$ of degree $n$ such that
$$ \forall x\in\N{a}\cap \loro{0}{+\infty}, \quad f(x)=p\left(\dfrac{1}{x}\right)+\smallo{+\infty}{\dfrac{1}{x^n}}. $$The
polynomial $p$ is called the {\em regular part of the asymptotic
expansion about $+\infty$ of $f$}.
\end{df}
\begin{thm}
Let $f:\N{0}\rightarrow \BBR$ be a function with an asymptotic
expansion $f(x)=p(x) + \smallo{0}{x^n}$, where $p$ is a polynomial.
Then, if $f$ is even, then $p$ is even and if $f$ is odd, then $p$
is odd.
\end{thm}
\begin{pf}
Let $f(x)=p(x)+\soo{x^n}$ as $x\to 0$, where $p$ is a polynomial of
degree $n$. Then $f(-x)=p(-x) + \soo{x^n}$. If $f$ is even then
$$p(x)+\soo{x^n}=f(x)=f(-x)=p(-x)+\soo{x^n},$$ and so by uniqueness of
the regular part of an asymptotic expansion we must have
$p(x)=p(-x)$, so $p$ is even. Similarly if $f$ is odd then
$$-p(x)+\soo{x^n}=-f(x)=f(-x)=p(-x)+\soo{x^n},$$ and so by uniqueness of
the regular part of an asymptotic expansion we must have
$-p(x)=p(-x)$, so $p$ is odd.

\end{pf}


We want to expand the function $f$ in powers of $x-a$:
$$f(x) = a_0 + a_1(x-a) + a_2(x-a)^2 + \cdots + a_n(x-a)^n + \cdots , $$
and that we will truncate at the $n$-th term, obtaining thereby a
polynomial of degree $n$ in powers of $x-a$. We must determine what
the coefficients $a_k$ are, and what the remainder
$$f(x) - a_0 - a_1(x-a) - a_2(x-a)^2 - \cdots - a_n(x-a)^n = R(x) $$
is.  We hope that this remainder is $\smallo{a}{(x-a)^n}$. The
coefficients $a_k$ are easily found. For $0\leq k \leq n$ since $f$
is $n+1$ times differentiable, differentiating $k$ times,
$$f^{(k)}(x) = k!a_k + ((k+1)(k)\cdots 2)a_{k+1}(x-a) + ((k+2)(k+1)\cdots 3)a_{k+2}(x-a)^2 + \cdots + R^{(k)}(x),   \implies \dfrac{f^{(k)}(a)}{k!} = a_k,$$
as long as $R(a)=R'(a)=R''(a) = \cdots =R^{(n)}(a) = 0$. We write
our ideas formally in the following theorems.
\begin{thm}[Taylor-Lagrange Theorem] Let $I\subseteqq \BBR$, $I \neq \varnothing$ be an interval of $\BBR$ and let
 $f:I\rightarrow \BBR$ be
$n+1$ times differentiable in $I$. Then if $(x, a)\in I^2, $, there
exist $c$ with $\inf (x, a)<c<\sup (x, a)$ such that
$$f(x) = f(a)+f'(a)(x-a) + \dfrac{f''(a)}{2!}(x-a)^2  +
\dfrac{f^{(3)}(a)}{3!}(x-a)^3+\cdots  +
\dfrac{f^{(n)}(a)}{n!}(x-a)^n+\dfrac{f^{(n+1)}(c)}{(n+1)!}(x-a)^{n+1}.
$$
\label{thm:Taylor-Lagrange}
\end{thm}
\begin{pf}
If $x=a$ then there is nothing to prove. If $x<a$ then replace
$x\mapsto f(x)$ with $x\mapsto f(-x)$, which then verifies the same
hypotheses given in the theorem. Thus it remains to prove the
theorem for $x>a$. Consider the function $\phi :
\lcrc{a}{x}\rightarrow \BBR$ with
$$ \phi (t) = f(x)- \sum_{k=0}^{n} f^{(k)}(t)\, \dfrac{(x-t)^k}{k!} -R\dfrac{(x-t)^{n+1}}{(n+1)!},  $$
where $R$ is a constant. Observe that $\phi (x)=0$. We now choose
the constant $R$  so that $\phi (a) =0$. Observe that $\phi$ is
differentiable and that it satisfies the hypotheses of Rolle's
Theorem on $\lcrc{a}{x}$. Therefore,  there exists $c\in
\loro{a}{x}$ such that $\phi ' (c) = 0$. Now
$$\phi '(t)=
- \sum_{k=1}^{n}\left( f^{(k+1)}(t) \dfrac{(x-t)^k}{k!}
-f^{(k)}(t)\dfrac{(x-t)^{k-1}}{(k-1)!} \right) +
R\dfrac{(x-t)^n}{n!} = -\dfrac{(x-t)^n}{n!}f^{(n+1)}(t) +
R\dfrac{(x-t)^n}{n!},  $$from where we gather, that $R =
f^{(n+1)}(c)$ and the theorem follows.
\end{pf}




\begin{cor}[Taylor-Young Theorem] Let $f:\N{a}\rightarrow \BBR$ be
$n+1$ times differentiable in $\N{a}$. Then $f$ admits the
asymptotic expansion of order $n$ about $a$:
$$f(x) = f(a)+f'(a)(x-a) + \dfrac{f''(a)}{2!}(x-a)^2  + \dfrac{f^{(3)}(a)}{3!}(x-a)^3+\cdots  + \dfrac{f^{(n)}(a)}{n!}(x-a)^n+\smallo{a}{(x-a)^n}.  $$
\label{cor:Taylor-Young}\end{cor}
\begin{pf}Follows at once from Theorem \ref{thm:Taylor-Lagrange}.
\end{pf}
The following theorem follows at once from Corollary
\ref{cor:Taylor-Young}.
\begin{thm}
Let $x \rightarrow 0$. Then
\begin{enumerate}
\item $\sin x = x  - \dfrac{x^3}{3!} + \dfrac{x^5}{5!} - \cdots + (-1)^n\dfrac{x^{2n + 1}}{(2n + 1)!} +  \soo{x^{2n + 2}}$. \\
 \item $\cos x = 1 -
\dfrac{x^2}{2!} + \dfrac{x^4}{4!} - \cdots +
(-1)^n\dfrac{x^{2n}}{(2n)!} + \soo{x^{2n + 1}}$.\item $\tan x = x +
\dfrac{x^3}{3} + \dfrac{2x^5}{15} +  \soo{x^5}$. \item $e^x = 1 + x
+ \dfrac{x^2}{2!} +  \dfrac{x^3}{3!} + \cdots + \dfrac{x^{n}}{n!} +
\soo{x^n}$
\item $\log  (1 + x) = x  - \dfrac{x^2}{2} + \dfrac{x^3}{3} -
\cdots  + (-1)^{n + 1}\dfrac{x^n}{n} + \soo{x^n}$. \item $(1 +
x)^{\tau} = 1 + \tau x   + \dfrac{\tau (\tau - 1)}{2}x^2 + \cdots +
\dfrac{\tau (\tau - 1)(\tau - 2) (\tau - 3)\cdots (\tau - n +
1)}{n!}x^n   + \soo{x^n}$.
\end{enumerate}
\label{thm:maclaurin}\end{thm}
\begin{exa}
Find an asymptotic development of $\log (2\cos x + \sin x)$ around
$x = 0$ of order $\soo{x^4}$.
\end{exa}
\begin{solu} By theorem \ref{thm:maclaurin},
$$\begin{array}{ccc}2\cos x + \sin x &  = & 2\left(1 - \dfrac{x^2}{2} + \dfrac{x^4}{24} + \soo{x^5}\right) +
 \left(x - \dfrac{x^3}{6} + \soo{x^4} \right) \\
 & = & 2 + x - x^2 - \dfrac{x^3}{6} + \dfrac{x^4}{12} +
 \soo{x^4}\\
 & = & 2\left(1 + \dfrac{x}{2} - \dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +
 \soo{x^4}\right) ,
\end{array}$$
and so,
$$\begin{array}{cccl}
\log (2\cos x + \sin x) & = &  & \log  2\left(1 + \dfrac{x}{2} -
\dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +
 \soo{x^4}\right) \\
 & = &  & \log 2 + \log \left(1 + \dfrac{x}{2} - \dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +
 \soo{x^4}\right) \\
 & = &  & \log 2 + \left(\dfrac{x}{2} - \dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +  \soo{x^4}\right) \\
 & &  - &
   \dfrac{1}{2}\left(\dfrac{x}{2} - \dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +
 \soo{x^4}\right)^2  \\
 & &  +  & \dfrac{1}{3}\left(\dfrac{x}{2} - \dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +
 \soo{x^4}\right)^3 \\
 & &  - &
 \dfrac{1}{4}\left(\dfrac{x}{2} - \dfrac{x^2}{2} - \dfrac{x^3}{12} + \dfrac{x^4}{24} +
 \soo{x^4}\right)^4  + \soo{x^4} \\

& = &  &  \log 2  +  \left(\dfrac{x}{2} -  \dfrac{x^2}{2} -
\dfrac{x^3}{12} + \dfrac{x^4}{24} \right)
- \dfrac{1}{2}\left(\dfrac{x^2}{4} -  \dfrac{x^3}{2} + \dfrac{x^4}{6} \right)        \\
&  & + & \dfrac{1}{3}\left(\dfrac{x^3}{8} -  \dfrac{3x^4}{8} \right)
- \dfrac{1}{4}\cdot\dfrac{x^4}{16} + \soo{x^4} \\
 & = &  & \log 2 + \dfrac{x}{2} - \dfrac{5x^2}{8} + \dfrac{5x^3}{24} - \dfrac{35x^4}{192} +
 \soo{x^4}
\end{array}$$
as $x \rightarrow 0$.
\end{solu}




\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small \begin{pro} Prove that the limit
$$\lim _{n\to +\infty} \left(1+\dfrac{1}{2}+\dfrac{1}{3}+\cdots + \dfrac{1}{n}\right)-\log n,
$$exists. The constant
$$\gamma =\lim _{n\to +\infty} \left(1+\dfrac{1}{2}+\dfrac{1}{3}+\cdots + \dfrac{1}{n}\right)-\log n
$$is called the {\em Euler-Mascheroni} constant. It is not known
whether $\gamma$ is irrational.
\end{pro}


\end{multicols}










\chapter{Integrable Functions}

\section{The Area Problem}


\begin{df}
Let $f:\lcrc{a}{b}\rightarrow \BBR$ be bounded, say with $m \leq
f(x)\leq M$ for all $x\in \lcrc{a}{b}$. Corresponding to each
partition $\curlyP = \{x_0, x_1, \ldots , x_n\}$ of $\lcrc{a}{b}$,
we define the {\em upper Darboux sum}
$$U(f, \curlyP) = \sum _{k=1} ^n (\sup _{x_{k-1} \leq x\leq x_k}f(x))(x_k-x_{k-1}),$$
and the {\em lower Darboux sum}
$$L(f, \curlyP) = \sum _{k=1} ^n (\inf _{x_{k-1} \leq x\leq x_k}f(x))(x_k-x_{k-1}).$$
Clearly
$$L(f, \curlyP) \leq U(f, \curlyP).    $$


 Finally, we put
$$ \overline{\dint} _a ^b f(x)\d{x} = \inf _{\curlyP\ \mathrm{is\ a \ partition\ of}\ \lcrc{a}{b}} U(f,
\curlyP),$$which we call the {\em upper Riemann integral of $f$} and
$$ \qquad \underline{\dint} _a ^b f(x)\d{x} = \sup _{\curlyP\
\mathrm{is\ a \ partition\ of}\ \lcrc{a}{b}} L(f, \curlyP).
 $$which we call the {\em lower Riemann integral of $f$}.
\end{df}
\begin{df}
Let $f:\lcrc{a}{b}\rightarrow \BBR$ be bounded. We say that {\em $f$
is Riemann integrable} if $ \overline{\dint} _a ^b
f(x)\d{x}=\underline{\dint\limits} _a ^b f(x)\d{x} $. In this case,
we denote their common value by $\dint _a ^b f(x)\d{x} $ and call it
the {\em Riemann integral of $f$ over $\lcrc{a}{b}$}.
\end{df}

\begin{thm}\label{thm:refinement-ineqs-upper-lower-sums}
Let $f$ be a bounded function on $\lcrc{a}{b}$ and let
$\curlyP\subseteqq \curlyP'$ be two partitions of $\lcrc{a}{b}$.
Then
$$ L(f, \curlyP )\leq L(f, \curlyP ')\leq U(f, \curlyP ' )\leq U(f,
\curlyP ). $$
\end{thm}
\begin{pf}
Clearly is enough to prove this when $\curlyP '$ has exactly one
more point than $\curlyP$. Let $$ \curlyP =\{x_0, x_1, \ldots ,
x_n\}
$$with $a=x_0<x_1<\cdots < x_{n-1}<x_n=b$. Let $\curlyP '$ have the
extra point $x_*$ with $x_i <x_*<x_{i+1}$.  Observe that we have
both  $\inf _{x_{i} \leq x\leq x_{i+1}}f(x) \leq \inf _{x_{i} \leq
x\leq x_{*}}f(x)$ and $\inf _{x_{i} \leq x\leq x_{i+1}}f(x) \leq
\inf _{x_{*} \leq x\leq x_{i+1}}f(x)$ since the larger interval may
contain smaller values of $f$. Then
$$\begin{array}{lll}\inf _{x_{i} \leq
x\leq x_{i+1}}f(x)(x_{i+1}-x_i) &= & \inf _{x_{i} \leq x\leq
x_{i+1}}f(x)(x_{i+1}-x_*+x_*-x_i)\\
& = &  \inf _{x_{i} \leq x\leq x_{i+1}}f(x)(x_{*}-x_i) +\inf _{x_{i}
\leq x\leq
x_{i+1}}f(x)(x_{i+1}-x_*)\\
& \leq  &  \inf _{x_{i} \leq x\leq x_{*}}f(x)(x_{*}-x_i) +\inf
_{x_{*} \leq x\leq x_{i+1}}f(x)(x_{i+1}-x_*).\\   \end{array}$$ Thus

$$\begin{array}{lll}
L(f, \curlyP) &  =  & (\inf _{x_{0} \leq x\leq x_1}f(x))(x_1-x_{0})
+ \cdots + (\inf _{x_{i} \leq x\leq x_{i+1}}f(x))(x_{i+1}-x_{i})  +
\cdots+ (\inf _{x_{n-1} \leq x\leq
x_n}f(x))(x_n-x_{n-1}) \\
&  \leq   & (\inf _{x_{0} \leq x\leq x_1}f(x))(x_1-x_{0}) + \cdots +
(\inf _{x_{i} \leq x\leq x_{*}}f(x))(x_{*}-x_{i}) + (\inf _{x_{*}
\leq x\leq x_{i+1}}f(x))(x_{i+1}-x_{*}) + \cdots+ (\inf _{x_{n-1}
\leq x\leq
x_n}f(x))(x_n-x_{n-1}) \\
& = &L(f, \curlyP ').
\end{array} $$
A similar argument shews that $ U(f, \curlyP ' )\leq U(f, \curlyP
)$. The we have
$$ L(f, \curlyP )\leq L(f, \curlyP ')\leq U(f, \curlyP ' )\leq U(f,
\curlyP ) $$ proving te theorem.\end{pf}


\begin{thm} \label{thm:upper-lower-riemann-sums-ineq}
Let $f$ be a bounded function on $\lcrc{a}{b}$ and let $\curlyP _1$
and $\curlyP _2$ be any two partitions of $\lcrc{a}{b}$. Then
$$L(f, \curlyP _1)\leq U(f, \curlyP _2)\ $$
\end{thm}
\begin{pf}
Let $\curlyP' = \curlyP _1 \cup \curlyP _2$ be a common refinement
for  $\curlyP _1$ and $\curlyP _2$. By Theorem
\ref{thm:refinement-ineqs-upper-lower-sums},
$$ L(f, \curlyP _1) \leq  L(f, \curlyP _1\cup \curlyP _2)\leq U(f, \curlyP _1\cup \curlyP _2) \leq U(f,\curlyP _1),  $$
and
$$ L(f, \curlyP _2) \leq  L(f, \curlyP _1\cup \curlyP _2)\leq U(f, \curlyP _1\cup \curlyP _2) \leq U(f,\curlyP _2),  $$
whence the theorem follows.\end{pf}


\begin{thm}\label{thm:riemann-on-top-dominates-riemann-on-bottom}
Let $f$ be a bounded function on $\lcrc{a}{b}$. Then $
\underline{\dint} _a ^b f(x)\d{x}\leq \overline{\dint} _a ^b
f(x)\d{x} $.
\end{thm}
\begin{pf}
By Theorem \ref{thm:upper-lower-riemann-sums-ineq},
$$L(f, \curlyP _1)\leq U(f, \curlyP _2) \implies  \underline{\dint} _a ^b f(x)\d{x} = \sup _{\curlyP _1\
\mathrm{is\ a \ partition\ of}\ \lcrc{a}{b}}L(f, \curlyP _1)\leq
U(f, \curlyP _2),   $$ and so
$$\underline{\dint} _a ^b f(x)\d{x}   \leq
U(f, \curlyP _2.$$Taking now the infimum,
$$\underline{\dint} _a ^b f(x)\d{x}   \leq
\inf _{\curlyP _2\ \mathrm{is\ a \ partition\ of}\ \lcrc{a}{b}}U(f,
\curlyP _2) = \overline{\dint} _a ^b f(x)\d{x},$$and the result is
established.\end{pf}


\begin{thm}\label{thm:riemann-int-if-upper-lower-sums-small}
Let $f$ be a bounded function on $\lcrc{a}{b}$. Then $f$ is Riemann
integrable if and only if $\forall \varepsilon > 0$, $\exists
\curlyP$ a partition of $\lcrc{a}{b}$ such that  $$ U(f,
\curlyP)-L(f, \curlyP) < \varepsilon .
$$
\end{thm}
\begin{pf}
\begin{description}
\item[$\Leftarrow$] If for all $\varepsilon > 0$, $ U(f,
\curlyP)-L(f, \curlyP) < \varepsilon  $ then by Theorem
\ref{thm:riemann-on-top-dominates-riemann-on-bottom},
$$ L(f, \curlyP)\leq  \underline{\dint} _a ^b
f(x)\d{x} \leq  \overline{\dint} _a ^b f(x)\d{x} \leq  U(f, \curlyP)
\implies 0 \leq \overline{\dint} _a ^b f(x)\d{x} - \underline{\dint}
_a ^b f(x)\d{x} <\varepsilon,
$$ and so $\overline{\dint} _a ^b f(x)\d{x} = \underline{\dint} _a ^b
f(x)\d{x}$, which means that $f$ is Riemann-integrable.
\item[$\implies$] Suppose $f$ is Riemann integrable. By the
Approximation property of the supremum and infimum, for all
$\varepsilon > 0$ there exist partitions $\curlyP_1$ and $\curlyP_2$
such that
$$ U(f, \curlyP_2) -\int _a ^b f(x)\d{x}<\dfrac{\varepsilon}{2}, \qquad \int _a ^b f(x)\d{x}-L(f, \curlyP_1)<\dfrac{\varepsilon}{2}.  $$
Hence by taking $\curlyP = \curlyP_1 \cup \curlyP_2$ then
$$U(f, \curlyP)\leq U(f, \curlyP_2) <\int _a ^b f(x)\d{x} + \dfrac{\varepsilon}{2} < L(f, \curlyP_1) + \varepsilon < L(f, \curlyP)+\varepsilon,   $$
from where $ U(f, \curlyP)-L(f, \curlyP) < \varepsilon  $.
\end{description}

\end{pf}
\begin{exa}
\begin{itemize}
\item $ f(x)=
\begin{cases} 0& \text{$ x $ irrational,} \\ 1&
\text{$ x $ rational.}
\end{cases} \quad  x \in [0;1] $

Then $ U(f, \curlyP)=1, L(f, \curlyP)=0 $, for any partition
$\curlyP$, and so $ f $ is not Riemann integrable.

\item $ f(x)=
\begin{cases} 0& \text{$ x $ irrational,} \\
\frac{1}{q}& \text{$ x $ rational = $ \frac{p}{q} $ in lowest
terms.}
\end{cases}
\quad  x \in [0;1] $

is Riemann integrable with $$  \int_{0}^{1}f(x)\,\d{x} =0 $$
\end{itemize}
\end{exa}

\begin{df}
Let $f$ be a bounded function on $\lcrc{a}{b}$ and let $\curlyP =
\{x_0, x_1, \ldots , x_n\}$ be a partition of $\lcrc{a}{b}$. If
$t_k$ are selected so that $x_{k-1}\leq t_k\leq x_k$, put
$$ S(f, \curlyP) = \sum _{k=1} ^n f(t_k)(x_k-x_{k-1}), $$is the
{\em Riemann sum of $f$ associated with $\curlyP$.}\end{df}



\begin{thm}\label{thm:integrable-if-dominated-by-linear-combination}
Let $f_1, f_2, \ldots , f_m$ be Riemann integrable over
$\lcrc{a}{b}$, and let $f:\lcrc{a}{b}\rightarrow \BBR$. If for any
subinterval $I \subseteqq \lcrc{a}{b}$ there exists strictly
positive numbers $a_1, a_2, \ldots , a_m$ such that
$$ \omega (f, I) \leq a_1\omega (f_1, I)+ a_2 \omega (f_2, I)+ \cdots + a_m\omega (f_m,I),
$$then $f$ is also Riemann integrable.
\end{thm}

\begin{pf}
Let $\curlyP = \{a=x_0<x_1<\cdots < x_n=b\}$ be a partition of
$\lcrc{a}{b}$ selected so that for all $j$, $$U(f_j, \curlyP)-L(f_j,
\curlyP) < \dfrac{\varepsilon}{a_1+a_2+\cdots + a_m}.$$ Using the
notation of the preceding theorem,
$$\begin{array}{lll}  U(f, \curlyP)-L(f, \curlyP) & = & Z(f,
\curlyP)\\
& = &  \sum _{k=1} ^n \omega (f, \lcrc{x_{k-1}}{x_k})(x_k-x_{k-1})
\\
 & \leq & \sum _{k=1} ^n  \sum _{j=1} ^m a_j\omega (f_j,
 \lcrc{x_{k-1}}{x_k})(x_k-x_{k-1}) \\
  & =&  \sum _{j=1} ^m a_j\sum _{k=1} ^n \omega (f_j,
 \lcrc{x_{k-1}}{x_k})(x_k-x_{k-1}) \\
& =&\sum _{j=1} ^m a_j\left(U(f_j, \curlyP)-L(f_j, \curlyP)\right)
\\
& < & \varepsilon,
\end{array}
$$and the theorem follows from  Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}.
\end{pf}

\begin{thm}[Algebra of Riemann Integrable Functions] Let $f$ and $g$
be Riemann integrable functions on $\lcrc{a}{b}$ and let $\lambda
\in \BBR$ be a constant. Then the following are also Riemann
integrable
\begin{enumerate}
\item $f+\lambda g$
\item $\absval{f}$
\item $fg$
\item provided $\inf _{x\in [a;b]} \absval{g(x)}>0$,  also $\dfrac{1}{g}$
\item provided $\inf _{x\in [a;b]} \absval{g(x)}>0$,  also $\dfrac{f}{g}$
\end{enumerate}\label{thm:algebra-of-int-functions}
\end{thm}

\begin{pf}
Since $$\absval{f(x)+\lambda g(x) -f(t)-\lambda g(t)} \leq
\absval{f(x)-f(t)}+\absval{\lambda}\absval{g(x)-g(t)}, \qquad
\mathrm{and} \qquad \absval{\absval{f(x)-\absval{f(t)}}} \leq
\absval{f(x)-f(t)},
$$we have
$$ \omega (f+\lambda g, I) \leq   \omega (f, I)+\absval{\lambda}  \omega (g, I) \qquad \mathrm{and} \qquad \omega (\absval{f}, I) \leq \omega (f, I),$$
from where the first two assertions follow, upon appealing to
Theorem \ref{thm:integrable-if-dominated-by-linear-combination}.

\bigskip


To prove the third assertion, put $a_1=\sup _{u\in
[a;b]}\absval{f(u)}$ and $a_2=\sup _{u\in [a;b]}\absval{g(u)}$
$$\begin{array}{lll} \absval{f(x)g(x)-f(t)g(t)} & = & \absval{f(x)(g(x)-g(t))+g(t)(f(x)-f(t))} \\
& \leq & \absval{f(x)}\absval{g(x)-g(t)}+\absval{g(t)}\absval{f(x)-f(t)} \\
& \leq & \left(\sup _{u\in
[a;b]}\absval{f(u)}\right)\absval{g(x)-g(t)} + \left(\sup _{u\in
[a;b]}\absval{g(u)}\right) \absval{f(x)-f(t)}\\
& = & a_1\absval{g(x)-g(t)}+a_2\absval{f(x)-f(t)},\\
\end{array}$$
which gives $$\omega (fg, I) \leq a_1\omega (f, I) + a_2\omega (g,
I),
$$and so the third assertion follows from Theorem \ref{thm:integrable-if-dominated-by-linear-combination}.

\bigskip
 To prove the fourth assertion, with $a= \inf _{x\in [a;b]} \absval{g(x)}>0$, observe that we have
 $$\begin{array}{lll} \absval{\dfrac{1}{g(x)}-\dfrac{1}{g(t)}} & = &
 \dfrac{1}{\absval{g(x)g(t)}}\absval{g(x)-g(t)} \\
 & \leq & \dfrac{1}{a^2}\absval{g(x)-g(t)}, \\
 \end{array}$$and this gives $\omega (\dfrac{1}{g}, I)\leq \dfrac{1}{a^2}\omega (g,
 I)$. The fourth assertion now follows by again appealing to Theorem \ref{thm:integrable-if-dominated-by-linear-combination}.

 \bigskip

 The fifth assertion follows from the third and the fourth.\end{pf}

\begin{thm}\label{thm:additivity-of-integrals}
Let $f$ and $g$ be Riemann integrable functions on $\lcrc{a}{b}$ and
let $\lambda \in \BBR$ be a constant.  Then
$$ \dint _a ^b (f(x) + \lambda g(x))\d{x} = \dint _a ^b f(x) \d{x}+ \lambda\dint _a ^bg(x) \d{x}.  $$
\end{thm}

\begin{pf}
Let $\curlyP = \{a=x_0<x_1<\cdots < x_n=b\}$ be a partition of
$\lcrc{a}{b}$ and choose $t_k$ such that $t_k\in
\lcrc{x_{k-1}}{x_k}$. Then for any $\varepsilon > 0$ there exist
$\delta >0$ and $\delta'>0$ such that
$$\absval{\sum _{k=1} ^n f(t_k)(x_k-x_{k-1}) -\int _a ^b f(x)\d{x}}<\dfrac{\varepsilon}{2} \qquad \mathrm{if}\quad \norm{\curlyP}<\delta,  $$
$$\absval{\lambda\sum _{k=1} ^n g(t_k)(x_k-x_{k-1}) -\lambda \int _a ^b g(x)\d{x}}<\dfrac{\varepsilon}{2} \qquad \mathrm{if}\quad \norm{\curlyP}<\delta'.  $$
Hence, if $\norm{\curlyP}<\min (\delta , \delta')$,
$$\begin{array}{l}\absval{\sum _{k=1} ^n \left(f(t_k)+\lambda g(t_k)\right)(x_k-x_{k-1}) -\int _a ^b f(x)\d{x}-\lambda \int _a ^b
g(x)\d{x}} \\
 \qquad \leq   \absval{\sum _{k=1} ^n f(t_k)(x_k-x_{k-1}) -\int _a ^b f(x)\d{x}} +\absval{\lambda \sum _{k=1} ^n g(t_k)(x_k-x_{k-1}) -\lambda\int _a ^b g(x)\d{x}}     \\
  \qquad <  \varepsilon
\end{array}$$proving the theorem.\end{pf}

\begin{thm}\label{thm:monotonicity-of-integral-operator}
Let $f$ and $g$ be Riemann integrable functions on $\lcrc{a}{b}$
with  $f(x)\leq g(x)$ for all $x\in \lcrc{a}{b}$.  Then
$$ \dint _a ^b f(x)\d{x} \leq \dint _a ^bg(x) \d{x}.  $$
\end{thm}
\begin{pf}
The function $h=g-f$ is positive for all $x\in [a;b]$ and hence
$L(h, \curlyP)\geq 0$ for all partitions $\curlyP$. It is also
integrable by Theorem \ref{thm:additivity-of-integrals}. Thus
$$ \int _a ^b h(x)\d{x} = \underline{\int} _a ^b h(x)\d{x}\geq
0.$$But
$$ \int _a ^b h(x)\d{x}  \geq
0 \implies 0\leq \int _a ^b (g(x)-f(x))\d{x}=\int _a ^b
g(x)\d{x}-\int _a ^b f(x)\d{x},$$ and so $\dint _a ^b f(x)\d{x} \leq
\dint _a ^bg(x) \d{x}$, as claimed.
\end{pf}

\begin{thm}[Triangle Inequality for Integrals]
Let $f$ be a Riemann integrable function on $\lcrc{a}{b}$.  Then
$$ \absval{\dint _a ^b f(x)\d{x}} \leq \dint _a ^b\absval{f(x)} \d{x}.  $$
\end{thm}
\begin{pf}
By Theorem \ref{thm:algebra-of-int-functions}, $\absval{f}$ is
integrable. Now, since $-\absval{f}\leq f \leq \absval{f}$ we just
need to apply Theorem \ref{thm:monotonicity-of-integral-operator}
twice.
\end{pf}

\begin{thm}[Chasles' Rule]\label{thm:chasles} Let $f$ be a Riemann integrable function on
$\lcrc{a}{b}$ and let $c\in \loro{a}{b}$. Then $f$ is Riemann
integrable function on $\lcrc{a}{c}$ and   $\lcrc{c}{b}$. Moreover,
$$ \dint _a ^b f(x)\d{x} = \dint _a ^c f(x)\d{x} + \dint _c ^b f(x)\d{x}.
$$ Conversely, if $c\in \loro{a}{b}$ and $f$ is Riemann
integrable  on $\lcrc{a}{c}$ and   $\lcrc{c}{b}$ then $f$ is Riemann
integrable  on $\lcrc{a}{b}$ and
$$ \dint _a ^b f(x)\d{x} = \dint _a ^c f(x)\d{x} + \dint _c ^b f(x)\d{x}.
$$
\end{thm}

\begin{pf}
Consider the partitions
$$\curlyP = \{a=x_0<x_1<\cdots < x_m=c<x_{m+1}<\cdots < x_n=b\},
\quad \curlyP' = \{a=x_0<x_1<\cdots < x_m=c\},\quad \curlyP'' =
\{c=x_m<x_{m+1}<\cdots < x_n=b\}.
$$
where by virtue of Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}, given
$\varepsilon>0$, we choose $\curlyP$ so that
$$ U(f, \curlyP)-L(f, \curlyP)<\varepsilon. $$It follows that
$$ \left( U(f, \curlyP')-L(f, \curlyP')\right)+  \left( U(f, \curlyP'')-L(f, \curlyP'')\right)=  U(f, \curlyP)-L(f, \curlyP)<\varepsilon.$$
Hence $f$ is Riemann-integrable over both $[a;c]$ and $[c;b]$.
Observe that
$$0 \leq U(f, \curlyP') -\int _a ^c f(x)\d{x} <\varepsilon, \qquad  0 \leq U(f, \curlyP'') -\int _c ^b f(x)\d{x} <\varepsilon ,$$
$$0 \leq \int _a ^c f(x)\d{x} -L(f, \curlyP') <\varepsilon, \qquad  0 \leq \int _c ^b f(x)\d{x}- L(f, \curlyP'')  <\varepsilon ,$$
and upon addition,
$$ 0 \leq U(f, \curlyP)- \left(\int _a ^c f(x)\d{x}+\int _c ^b f(x)\d{x}\right) < 2\varepsilon,  $$
$$ 0 \leq \left(\int _a ^c f(x)\d{x}+\int _c ^b f(x)\d{x}\right)-L(f, \curlyP) < 2\varepsilon,  $$
whence
$$ \dint _a ^b f(x)\d{x} = \dint _a ^c f(x)\d{x} + \dint _c ^b f(x)\d{x},$$as required.
\end{pf}
\begin{thm}[Converse of Chasles' Rule]\label{thm:chasles-converse} Let $f$ be a function defined
on the interval $[a;b]$ and let $c\in]a;b[$. If $f$ is
Riemann-integrable on $[a;c]$ and $[c;b]$ then it is also Riemann
integrable in  $[a;b]$ and $$ \dint _a ^b f(x)\d{x} = \dint _a ^c
f(x)\d{x} + \dint _c ^b f(x)\d{x}. $$
\end{thm}
\begin{pf}
Since $f$ is Riemann-integrable on both subintervals, it is bounded
there, and so it is bounded on the larger subinterval. By  Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}, given $\varepsilon
> 0$ there exist partitions $\curlyP'$ and $\curlyP''$ such that
$$U_{[a;c]}(f, \curlyP')-L_{[a;c]}(f, \curlyP')<\varepsilon, \qquad U_{[c;b]}(f, \curlyP'')-L_{[c;b]}(f, \curlyP'')<\varepsilon.  $$
The above inequalities also hold in the refinement $\curlyP =
\curlyP' \cup \curlyP''$, and
$$U(f, \curlyP)= U_{[a;c]}(f, \curlyP)+U_{[c;b]}(f, \curlyP), \qquad L(f, \curlyP)= L_{[a;c]}(f, \curlyP)+L_{[c;b]}(f, \curlyP).  $$
We then deduce that
$$\begin{array}{lll}U(f, \curlyP)-L(f, \curlyP) & = &  \left(U_{[a;c]}(f, \curlyP)+U_{[c;b]}(f, \curlyP)\right)- \left(L_{[a;c]}(f, \curlyP)+L_{[c;b]}(f, \curlyP)\right)\\
 & = &  \left(U_{[a;c]}(f, \curlyP)-L_{[a;c]}(f, \curlyP)\right)- \left(U_{[a;c]}(f, \curlyP)-L_{[c;b]}(f, \curlyP)\right)\\
& < & 2\varepsilon,
\end{array}$$and so $f$ is Riemann integrable in $[a;b]$ by virtue of Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}.
 Now
$$ \begin{array}{lll}\int _a ^b f(x)\d{x} & \leq & U(f, \curlyP) \\
& < & L(f, \curlyP)+\varepsilon\\
& = & L_{[a;c]}(f, \curlyP)+L_{[c;b]}(f, \curlyP)+\varepsilon\\
& \leq & \int _a ^c f(x)\d{x}+\int _c ^b f(x)\d{x}+\varepsilon, \\
 \end{array}$$
 and similarly
$$ \begin{array}{lll}\int _a ^b f(x)\d{x} & \geq & L(f, \curlyP) \\
& > & U(f, \curlyP)-\varepsilon\\
& = & U_{[a;c]}(f, \curlyP)+U_{[c;b]}(f, \curlyP)-\varepsilon\\
& \geq & \int _a ^c f(x)\d{x}+\int _c ^b f(x)\d{x}-\varepsilon, \\
 \end{array}$$
 hence
 $$\int _a ^c f(x)\d{x}+\int _c ^b f(x)\d{x}-\varepsilon \leq \int _a ^b f(x)\d{x}\leq \int _a ^c f(x)\d{x}+\int _c ^b f(x)\d{x}+\varepsilon  $$
giving the desired equality between integrals.\end{pf}

\begin{thm}
Let $f$ be Riemann integrable over $[a;b]$ and let $g:\lcrc{\inf
_{u\in [a;b]}f(u)}{\sup _{u\in [a;b]}f(u)}\rightarrow \BBR$ be
continuous. Then $g\circ f$ is Riemann integrable on $[a;b]$.
\end{thm}
\begin{pf}
Since $g$ is uniformly continuous on the compact interval
$\lcrc{\inf _{u\in [a;b]}f(u)}{\sup _{u\in [a;b]}f(u)}$, for given
$\varepsilon > 0$ we may find $\delta'$ such that
$$ (s,t)\in \lcrc{\inf
_{t\in [a;b]}f(t)}{\sup _{u\in [a;b]}f(u)}^2; \quad
\absval{s-t}<\delta' \implies \absval{f(s)-f(t)}<\varepsilon.
$$Let $\delta = \min (\delta', \varepsilon)$. Since $f$ is
Riemann-integrable, we may choose a partition $\curlyP =
\{a=x_0<x_1<\cdots < x_n =b\}$ such that
\begin{equation}\label{eq:lower-upper-composition}U(f,
\curlyP)-L(f,\curlyP)<\delta ^2, \end{equation} by virtue of Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}. Let
$$ m_k = \inf _{x_{k-1}\leq x \leq x_k}f(x); \quad M_k = \sup _{x_{k-1}\leq x \leq x_k}f(x);  $$
$$ m_k ^* = \inf _{x_{k-1}\leq x \leq x_k}(g\circ f)(x); \quad M_k ^* = \sup _{x_{k-1}\leq x \leq x_k}(g\circ f)(x).  $$
We split the set of indices $\{1,2,\ldots , n\}$ into two classes:
$$A = \{k: 1\leq k \leq n,  M_k-m_k <\delta \}; \qquad B =  \{k: 1\leq k \leq n,  M_k-m_k \geq \delta \}. $$
If $k\in A$ and $x_{k-1}\leq x \leq y \leq x_k$, then
$$\absval{f(x)-f(y)} \leq M_k - m_k < \delta \leq \delta' \implies \absval{(g\circ f)(x)-(f\circ g)(y)}< \varepsilon,  $$
whence $M_k ^* -m_k ^* \leq \varepsilon$. Therefore
$$\sum _{k\in A} \left(M_k ^* -m_k ^*\right) (x_{k}-x_{k-1}) \leq \varepsilon \sum _{k=1} ^n (x_k - x_{k-1}) = \varepsilon (b-a).  $$
If $k\in B$ then $M_k-m_k \geq \delta$ and by virtue of
(\ref{eq:lower-upper-composition}),
$$\delta \sum _{k\in B} (x_{k}-x_{k-1}) \leq \sum _{k\in B} (M_k-m_k)(x_{k}-x_{k-1}) \leq
\sum _{1\leq k \leq n} (M_k-m_k)(x_{k}-x_{k-1}) = U(f, \curlyP)-L(f,
\curlyP)<\delta ^2,
$$whence
$$\sum _{k\in B} (x_{k}-x_{k-1}) <\delta \leq \varepsilon.  $$
Upon assembling all these inequalities, and letting $M = \sup
_{t\in\lcrc{\inf _{u\in [a;b]}f(u)}{\sup _{u\in [a;b]}f(u)}}
\absval{g(t)}$, we obtain
$$\begin{array}{lll}
U(g\circ f, \curlyP)-L(g\circ f, \curlyP) & = & \sum _{k\in A}
\left(M_k ^* -m_k ^*\right)(x_{k}-x_{k-1}) +\sum _{k\in B} \left(M_k
^* -m_k ^*\right)(x_{k}-x_{k-1})\\
& \leq & \varepsilon (b-a) +2M\sum _{k\in
B}(x_{k}-x_{k-1}) \\
& \leq & \varepsilon (b-a) + 2M\varepsilon \\ & = &  \varepsilon
(b-a+2M),
\end{array}$$whence the result follows from Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}.
\end{pf}


\begin{df}
If $b<a$ we define $\int _a ^bf(x)\d{x} = -\int _b ^a f(x)\d{x}$.
Also, $\int _a ^af(x)\d{x}=0. $
\end{df}


\begin{thm}A  function $f$ on $\lcrc{a}{b}$ is Riemann integrable on
$\lcrc{a}{b}$ if and only if its set of discontinuities forms a set
of Lebesgue  measure
$0$.\label{thm:riemann-int-iff-continuous-almost-everywhere}
\end{thm}

\begin{pf}
\begin{description}
\item[$\implies$] Given $\gamma > 0$ and $\delta > 0$, put $\varepsilon = \gamma
\delta$. Let $f$ be Riemann integrable. There is a partition
$\curlyP = \{a=x_0< x_1< \cdots < x_n=b\}$ such that $$ U(f,
\curlyP)-L(f, \curlyP)< \varepsilon.
$$Let $x\in\loro{x_i}{x_{i+1}}$ be such that $\omega (f, x)\geq
\gamma$. Then $$\sup _{\loro{x_i}{x_{i+1}}}f(x) - \inf
_{\loro{x_i}{x_{i+1}}}f(x) \geq \gamma.  $$Now observe that
$$\{x\in[a;b]: \omega (f, x)\geq \delta\} = \left( \bigcup _{\sup f-\inf f \geq \gamma} \loro{x_i}{x_{i+1}}\right) \cup \{x_0, x_1, \ldots , x_n\}. $$
Hence
$$\begin{array}{lll} \mu \left(\{x\in[a;b]: \omega (f, x)\geq
\gamma\}\right)& \leq & \sum  _{\sup _{]x_i;x_{i+1}[}f(x) -
\inf _{]x_i;x_{i+1}[}f(x) \geq \gamma}\absval{x_{i+1}-x_i}\\
& \leq & \dfrac{1}{\gamma}\sum _i \absval{x_{i+1}-x_i}\left(\sup
_{\loro{x_i}{x_{i+1}}}f(x) - \inf _{\loro{x_i}{x_{i+1}}}f(x)\right)
\\
& \leq & \dfrac{1}{\gamma} \left(U(f, \curlyP)-L(f,
\curlyP)\right)\\
& < & \dfrac{\varepsilon}{\gamma}\\
& = & \delta.
\end{array}$$
Letting $\delta \to 0+$ and $\gamma\to 0+$ we get $\mu
\left(\{x\in[a;b]: \omega (f, x)\geq 0\}\right) = 0$, and in
particular, $\mu \left(\{x\in[a;b]: \omega (f, x)> 0\}\right) = 0$
which means hat the set of discontinuities is a set of measure $0$.

\item[$\Leftarrow$]

Conversely,  assume $\meas{\{x\in[a;b]: \omega (f, x)> 0\}} = 0$. We
can write
$$\{x\in[a;b]: \omega (f, x)>
0\} = \bigcup _{K \geq 1}  \{x\in[a;b]: \omega (f, x)>
\dfrac{1}{K}\}.
$$
Fix $K$ large enough so that $\dfrac{1}{K}<\varepsilon$. Since
$\meas{\{x\in[a;b]: \omega (f, x)\geq \dfrac{1}{K}\}}=0$, we can
find open intervals $I_j(K)$ such that
$$\{x\in[a;b]: \omega (f, x)\geq \dfrac{1}{K}\} \subseteqq \bigcup _{j\geq 1}I_j(K), \qquad \sum _{j\geq 1}\meas{I_j(K)}<\varepsilon.   $$
It is easy to shew that $\left\{x\in[a;b]: \omega (f, x)>
\dfrac{1}{K}\right\}$ is closed and bounded and hence compact, so we
may find a finite subcover with
$$ \{x\in[a;b]: \omega (f, x)> \dfrac{1}{K}\} \subseteqq I_{j_1}\cup I_{j_2}\cup \cdots \cup I_{j_N}.  $$
Now
$$\lcrc{a}{b}\setminus \left(I_{j_1}\cup I_{j_2}\cup \cdots \cup I_{j_N}\right)
$$is a finite disjoint union of closed intervals, say $J_1\cup J_2\cup \cdots \cup
J_M$. If $x\in J_i$ then $\omega (f, x)<\dfrac{1}{K}$. Thus on each
of the $J_i$ we may find so fine a partition that $\omega (f,
L)<\dfrac{1}{K}$ for every interval such partition. All these
partitions and the endpoints of  the $I_{j_k}$ form a partition, say
$\curlyP$. Write ${\cal S} = S_1\cup S_2 \cup \cdots \cup S_M$ for
the intervals of the partition $\curlyP$ that are not the $I_{j_k}$.
Observe that $\omega (f, S_k) < \dfrac{1}{K}$. Then
$$\begin{array}{lll} U(f, \curlyP)-L(f, \curlyP) & = & \sum _{I_{j_k}} \left(\sup
_{I_{j_k}}f - \inf _{I_{j_k}}f\right)\left(\meas{I_{j_k}}\right) +
 \sum _{S_k} \left(\sup
_{S_k}f - \inf _{S_k}f\right)\left(\meas{S_k}\right)\\
 & \leq
& 2\sup _{[a;b]} \absval{f}\sum _{k=1} ^N \meas{I_{j_k}}+ \dfrac{1}{K}\sum _{S_k} \meas{S_k} \\
& \leq & 2\sup _{[a;b]} \absval{f}\varepsilon +  (b-a)\varepsilon\\
& = & \left(2\sup _{[a;b]} \absval{f} +  (b-a)\right)\varepsilon. \\
\end{array}$$
This proves the theorem.
\end{description}
\end{pf}


\begin{cor}
Every continuous function $f$ on $[a;b]$ is Riemann integrable on
$\lcrc{a}{b}$.
\end{cor}

\begin{pf}
This is immediate from Theorem
\ref{thm:riemann-int-iff-continuous-almost-everywhere}.
\end{pf}
\begin{cor}
Every monotonic function $f$ on $\lcrc{a}{b}$ is Riemann integrable
on $\lcrc{a}{b}$.
\end{cor}

\begin{pf}
Since a countable set has measure $0$, and since the set of
discontinuities of a monotonic function is countable (Theorem
\ref{thm:discontinuities-of-monotone-functions}), the result is
immediate.\end{pf}



\begin{pspicture}[plotpoints=200]( -0.5 , -3)(10 ,2.5)%
\psStep[algebraic , fillstyle=solid , fillcolor= yellow ](0.001,9.5){40}{2* sqrt(x)*cos(ln(x))*sin(x)}%
\psStep[algebraic ,StepType= Riemann , fillstyle= solid , fillcolor=blue ](0.001 ,9.5) {40}{2*sqrt(x)* cos(ln(x))* sin(x)}%
\psaxes{->}(0 ,0)(0 , -2.75)(10 ,2.5) \psplot [ algebraic , linecolor= white]{0.001}{9.75}{2* sqrt(x)* cos(ln(x))* sin(x)}%
\uput [90](6 ,1.2){$f(x)=2\cdot\sqrt{x}\cdot\cos {(\log{x})}\cdot\sin{x}$}%
\end{pspicture}

\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}\label{pro:epsilon-delta-riemann-integrability}
Let $f$ be a bounded function on $\lcrc{a}{b}$. Then $f$ is Riemann
integrable if and only if $\forall \varepsilon > 0$, $\exists \delta
> 0$ such that for all partitions $\curlyP$ of $\lcrc{a}{b}$,
$$ \norm{\curlyP}< \delta \implies  U(f, \curlyP)-L(f,
\curlyP) < \varepsilon .
$$
\begin{answer}
\begin{description}
\item[$\Leftarrow$] This follows directly from Theorem \ref{thm:riemann-int-if-upper-lower-sums-small}.
\item[$\implies$] If $f$ is Riemann integrable, let $\varepsilon >
0$ and let $\curlyP' = \{a=y_0< y_1 < \ldots < y_m=b\}$ be a
partition with $m+1$ points such that
$$U(f, \curlyP')-L(f, \curlyP')<\dfrac{\varepsilon}{2}.
$$
As $f$ is bounded, there is $M>0$ such that $\forall x\in
\lcrc{a}{b}, \quad\absval{f(x)}\leq M$. Take $\delta =
\dfrac{\varepsilon}{8mM}$ and consider now an arbitrary partition
$\curlyP = \{a=x_0< x_1 < \ldots < x_n=b\}$ with norm
$\norm{\curlyP}<\delta$. Put $\curlyP'' = \curlyP \cup \curlyP'$.
Arguing as in Theorem \ref{thm:refinement-ineqs-upper-lower-sums},
we obtain
$$L(f, \curlyP'')-L(f, \curlyP)< 2mM\norm{\curlyP}<2mM\delta=\dfrac{\varepsilon}{4}.  $$
Since by Theorem \ref{thm:upper-lower-riemann-sums-ineq} $L(f,
\curlyP')\leq L(f, \curlyP'')$ we gather
$$ L(f, \curlyP')-L(f, \curlyP)<\dfrac{\varepsilon}{4}. $$
In a similar fashion we establish that
$$ U(f, \curlyP)-U(f, \curlyP')<\dfrac{\varepsilon}{4}, $$
and upon assembling the inequalities,
$$U(f, \curlyP)-L(f, \curlyP)< U(f, \curlyP')-L(f, \curlyP')+\dfrac{\varepsilon}{2}<\varepsilon,
$$since we had assumed that $ U(f, \curlyP')-L(f, \curlyP')<\dfrac{\varepsilon}{2}$.
\end{description}\end{answer}

\end{pro}
\begin{pro}
Let $f$ be a bounded function on $\lcrc{a}{b}$. Then $f$ is Riemann
integrable on $\lcrc{a}{b}$ if and only if
$$ \lim _{\norm{\curlyP}\rightarrow 0}S(f, \curlyP) $$exists and is
finite. In this case we write $ \lim _{\norm{\curlyP}\rightarrow
0}S(f, \curlyP) = \dint _a ^b f(x)\d{x}$.
\begin{answer}
\begin{description}
\item[$\implies$] Assume $f$ is Riemann-integrable. For $\varepsilon >
0$ let $\delta > 0$ be chosen so that the conditions of Theorem
\ref{thm:epsilon-delta-riemann-integrability} be fulfilled. By
definition of a Riemann sum,
$$L(f, \curlyP)\leq S(f, \curlyP)\leq U(f, \curlyP),  $$and
therefore
$$ U(f, \curlyP)< L(f, \curlyP)+\varepsilon \leq \underline{\int} _a ^b f(x)\d{x} + \varepsilon = \int _a ^b f(x)\d{x} + \varepsilon $$
and
$$L(f, \curlyP) > U(f, \curlyP) - \varepsilon \geq \overline{\int} _a ^b f(x)\d{x} - \varepsilon =\int  _a ^b f(x)\d{x} - \varepsilon .
$$These inequalities give
$$ \absval{S(f, \curlyP) - \int  _a ^b f(x)\d{x}}< \varepsilon ,
$$whence $ \lim _{\norm{\curlyP}\rightarrow 0}S(f, \curlyP) =   \int  _a ^b f(x)\d{x}. $
 \item[$\Leftarrow$] Suppose that $\lim _{\norm{\curlyP}\rightarrow 0}S(f, \curlyP)
 =L$,  existing and  finite. Given $\varepsilon > 0$ there is $\delta >
 0$ such that $\norm{\curlyP}<\delta$ implies
 \begin{equation}\label{eq:ineq-lower-upper-darboux} L-\dfrac{\varepsilon}{3}< S(f, \curlyP)< A + \dfrac{\varepsilon}{3}.
 \end{equation}
Now, choose $\curlyP = \{a=x_0<x_1<\cdots < x_n=b\}$. By letting
$t_k$ range over $\lcrc{x_{k-1}}{x_k}$ we gather, from
(\ref{eq:ineq-lower-upper-darboux})
$$L-\dfrac{\varepsilon}{3} \leq L(f, \curlyP) \leq U(f,\curlyP) \leq L+\dfrac{\varepsilon}{3}, $$
whence $$U(f,\curlyP)-L(f,\curlyP)\leq \dfrac{2}{3}\varepsilon <
\varepsilon,
$$meaning that $f$ is Riemann-integrable over $\lcrc{a}{b}$ by Theorem \ref{thm:riemann-int-if-upper-lower-sums-small}. Thus
$$L(f, \curlyP) \leq \int _a ^b f(x)\d{x} \leq U(f, \curlyP),  $$and
so $\lim _{\norm{\curlyP}\rightarrow 0}S(f, \curlyP) = \dint _a ^b
f(x)\d{x}$.
\end{description}
\end{answer}
\end{pro}

\begin{pro}
Let $f$ be bounded on $\lcrc{a}{b}$. Then $f$ is Riemann integrable
on $\lcrc{a}{b}$ if and only if for every $\varepsilon > 0,
\varepsilon ' >0$ there is a partition $\curlyP$ of $\lcrc{a}{b}$
such that $$ \sum _{k=1} ^n (x_k-x_{k-1})\chi_{\{x\in[a;b]:\omega
(f, [x_{k-1}; x_k]) \geq \varepsilon' \}}<\varepsilon .
$$Here $\chi (.)$ is the {\em indicator function} defined on a set
$E$ as
$$ \chi _E(x) = \left\{\begin{array}{ll}1 & \mathrm{if}\ x\in E \\
0 & \mathrm{if}\ x\not\in E \\ \end{array}\right. .$$
\begin{answer}
\begin{description}
\item[$\implies$]
Let $\curlyP = \{a=x_0<x_1<\cdots < x_n=b\}$ be a partition of
$\lcrc{a}{b}$. Set
$$Z(f, \curlyP) = \sum _{k=1} ^n \omega (f, \lcrc{x_{k-1}}{x_k})(x_k-x_{k-1}) = U(f, \curlyP)-L(f, \curlyP), \qquad \Omega = \sup _{x\in [a;b]}f(x)-\inf _{x\in [a;b]}f(x). $$
Let $$ \delta = \sum _{k=1} ^n (x_k-x_{k-1})\chi_{\{x\in[a;b]:\omega
(f, [x_{k-1}; x_k]) \geq \varepsilon' \}}. $$Then $Z(f, \curlyP)
\geq \delta\varepsilon'$. Since we are assuming that $f$ is
Riemann-integrable, there exists a partition $\curlyP$ (by Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}) such that $$ Z(f,
\curlyP) \leq \varepsilon'\varepsilon.
$$ Thus we have
$\delta \varepsilon'<\varepsilon\varepsilon'$ from where $\delta <
\varepsilon$.

 \item[$\Leftarrow$] Assume there is a partition $\curlyP$ for which
$\delta < \varepsilon $. In the intervals $I=[x_{k-1}; x_k]$ where
$\omega (f, I)\geq \varepsilon'$ the oscillation of $f$ is at most
$\Omega$, and in the remaining intervals (the sum of which is
$b-a-\delta$, the oscillation is less than $\varepsilon'$. Hence
$$Z(f, \curlyP) \leq \delta\Omega + (b-a-\delta)\varepsilon'. $$
Choose now
$$ \varepsilon' =\dfrac{\varepsilon ''}{2(b-a)}  , \qquad \delta = \dfrac{\varepsilon ''}{2\Omega}.
$$Since $b-a-\delta\leq b-a$,  $$Z(f, \curlyP) \leq \delta\Omega + (b-a-\delta)\varepsilon'
\leq \dfrac{\varepsilon''}{2} + \dfrac{\varepsilon''}{2}=
 \varepsilon '',$$
whence $f$ is Riemann-integrable by Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}.
\end{description}
\end{answer}
\end{pro}

\end{multicols}

\section{Integration}
\begin{thm}[First Fundamental Theorem of Calculus] Let $f:\lcrc{a}{b}\rightarrow
\BBR$ be Riemann integrable on $\lcrc{a}{b}$. If there exists a
differentiable function $F:\lcrc{a}{b}\rightarrow \BBR$ such that
$F'=f$ then
$$ \dint _a ^b f(x)\d{x}=F(b)-F(a). $$
\end{thm}
\begin{pf}
Given $\varepsilon > 0$, in view of Theorem
\ref{thm:riemann-int-if-upper-lower-sums-small}, there is a
partition $\curlyP = \{a=x_0<x_1< \cdots < x_n=b\}$ such that
$$U(f, \curlyP)-L(f,\curlyP)<\varepsilon.  $$Since $F$ is
differentiable on $[a;b]$, it is continuous on $[a;b]$. Applying the
Mean Value Theorem to each partition subinterval $[x_{k-1};x_k]$, we
obtain $c_k\in  ]x_{k-1};x_k[$ such that $$F(x_k)-F(x_{k-1}) =
f(c_k)(x_k-x_{k-1}).
$$This gives
$$F(b)-F(a) = \sum _{1\leq k \leq n} \left(F(x_k)-F(x_{k-1})\right) = \sum _{1\leq k \leq n} f(c_k)\left(x_k-x_{k-1}\right),  $$
and since $\inf _{u\in [x_{k-1};x_k]}f(u) \leq f(c_k)\leq \sup
_{u\in [x_{k-1};x_k]}f(u)$, we deduce that
$$L(f, \curlyP)\leq F(b)-F(a) \leq U(f, \curlyP).  $$
Furthermore, we know that $L(f, \curlyP)\leq \int _a ^b f(x)\d{x}
\leq U(f, \curlyP).$ Hence, combining these two last inequalities,
$$\absval{F(b)-F(a)-\int _a ^b f(x)\d{x}}<\varepsilon,  $$and the
theorem follows.\end{pf}

\begin{thm}[Second Fundamental Theorem of Calculus] Let $f:\lcrc{a}{b}\rightarrow
\BBR$ be Riemann integrable on $\lcrc{a}{b}$, and let
$$F(x) =  \dint _a ^x f(t)\d{t}, \qquad x\in \lcrc{a}{b}. $$Then $F$
is continuous on $\lcrc{a}{b}$. Moreover, if $f$ is continuous at
$c\in\loro{a}{b}$, then $F$ is differentiable at $c$ and $F'(c) =
f(c)$.
\end{thm}
\begin{pf}
There is $M>0$ such that $\forall x\in [a;b], \quad
\absval{f(x)}\leq M$. Now, if $a \leq x < y \leq b$ with
$\absval{x-y}<\dfrac{\varepsilon}{M}$, then
$$ \absval{F(y)-F(x)} = \absval{\int _x ^y f(t)\d{t}} \leq \int _x ^y \absval{f(t)}\d{t}\leq \int _x ^y M\d{t} =M(y-x)<\varepsilon $$
Thus $F$ is continuous on $[a;b]$ and by Heine's Theorem, uniformly
continuous on $[a;b]$. Now, take $u\in ]a;b[ $, and observe that
$$x\neq u \implies \dfrac{F(x)-F(u)}{x-u} = \dfrac{1}{x-u} \int _u ^x f(t)\d{t}.$$
Moreover,
$$f(u) =   \dfrac{1}{x-u} \int _u ^x f(u)\d{t},$$and therefore,
$$  \dfrac{F(x)-F(u)}{x-u} -f(u) = \dfrac{1}{x-u} \int _u ^x \left(f(t)-f(u)\right)\d{t}. $$
Since $f$ is continuous at $u$, there is $\delta > 0$ such that
$$t\in [a;b], \absval{t-u}<\delta \implies \absval{f(t)-f(u)}<\varepsilon.  $$
This gives
$$\absval{\dfrac{F(x)-F(u)}{x-u} -f(u)}<\varepsilon  $$ for $x\in
]a;b[$ with $\absval{x-u}< \delta$. From this it follows that $F'(u)
= f(u)$.\end{pf}
\begin{thm}[Young's Inequality for Integrals] Let $f$ be a strictly
increasing continuous function on $\lcro{0}{+\infty}$ and let
$f(0)=0$. If $A>0$ and $B>0$ then
$$AB \leq \dint _0 ^A f(x)\d{x} + \dint _0 ^B f^{-1}(x)\d{x}.  $$
\label{thm:young's-ineq-integrals}
\end{thm}
\begin{pf}
The inequality is evident from Figure
\ref{fig:young's-ineq-integrals}. The rectangle of area $AB$ fits
nicely in the areas under the curves $y=f(x), x\in [0;A]$ and
$x=f^{-1}(y), y\in [0;B]$.
\end{pf}


\begin{thm}[H\"{o}lder's Inequality for Integrals]
Let $p>1$ and put $\dfrac{1}{q} = 1-\dfrac{1}{p}$. If $f$ and $g$
are Riemann integrable on $\lcrc{a}{b}$ then
$$  \absval{\dint _a ^bf(x)g(x)\d{x}}  \leq \left(\dint _a ^b \absval{f(x)}^p\d{x} \right)^{1/p}
\left(\dint _a ^b \absval{g(x)}^q\d{x} \right)^{1/q}.$$
\end{thm}
\begin{pf}
First observe that all of $\absval{fg}, \absval{f}^p$ and
$\absval{g}^q$ are Riemann-integrable, in view of Theorem
\ref{thm:algebra-of-int-functions}. Now, with  $f(x) = x^{p-1}$ in
Young's Inequality (Theorem \ref{thm:young's-ineq-integrals}), we
obtain, \begin{equation}\label{eq:young} AB \leq \dfrac{A^{p}}{p} +
\dfrac{B^{1/(p-1)+1}}{1/(p-1)+1} =
\dfrac{A^{p}}{p}+\dfrac{B^q}{q}.\end{equation} If any of the
integrals in the statement of the theorem is zero, the result is
obvious. Otherwise put $A^p=\int _a ^b \absval{f(x)}^p\d{x}$,
$B^q=\int _a ^b \absval{g(x)}^p\d{x}$. Then by (\ref{eq:young}),
$$ \dfrac{\absval{f(x)g(x)}}{AB} \leq \dfrac{A^{-p}\absval{f(x)}^p}{p}+ \dfrac{B^{-q}\absval{g(x)}^q}{q}. $$
Integrating throughout the above inequality,
$$ \dfrac{1}{AB}\int _a ^b \absval{f(x)g(x)}\d{x}\leq \dfrac{1}{pA^p}\int _a ^b \absval{f(x)}^p\d{x}
+ \dfrac{1}{qB^q}\int _a ^b \absval{g(x)}^q\d{x} =
\dfrac{1}{p}+\dfrac{1}{q}=1,
$$whence the theorem follows.
\end{pf}

\vspace{2cm}
\begin{figure}[h]
\centering \psset{unit=1pc}
\psaxes[labels=none,ticks=none]{->}(0,0)(0,0)(5,5) \uput[d](1,0){A}
 \uput[l](0,4){B}
 \psline[linestyle=dotted](1,0)(1,1.420735492)
 \psline[linestyle=dotted](0,4)(4.487398067,4)
\psplot[linewidth=2pt,algebraic]{0}{4.487398067}{x + sin(x)/2}
\pscustom[fillstyle=solid,fillcolor=red]{\psplot[algebraic]{0}{1}{x+sin(x)/2}\psline(1,1.420735492)(1,0)(0,0)}
\pscustom[fillstyle=solid,fillcolor=blue]{\psplot[algebraic]{1}{4.487398067}{x+sin(x)/2}\psline(4.487398067,4)(0,4)(0,0)}
\vspace{1cm} \hangcaption{Young's Inequality (Theorem
\ref{thm:young's-ineq-integrals}).}\label{fig:young's-ineq-integrals}
\end{figure}
\vspace{1cm}
\begin{thm}
Let $f:\lcrc{a}{b}\rightarrow \BBR$. Then
\begin{enumerate}
\item If $f$ is continuous on $\lcrc{a}{b}$, $\forall x\in \lcrc{a}{b},  f(x)\geq
0$, $\exists c\in \lcrc{a}{b}$ with $f(c)>0$ then $\dint _a ^b
f(x)\d{x}>0$.
\item If $f,g$ are continuous on $\lcrc{a}{b}$, $\forall x\in \lcrc{a}{b}, f(x)\leq
g(x)$, and  $\exists c\in \lcrc{a}{b}$ with $f(c)<g(c)$ then $\dint
_a ^b f(x)\d{x}<\dint _a ^b g(x)\d{x}$.
\end{enumerate}
\end{thm}
\begin{pf}
The second part follows from the first by considering $f-g$. Let us
prove the first part. \bigskip

 Assume first that $c\in ]a;b[$. Then there is a neighourhood $]c-\delta; c+\delta[ \subseteqq
 ]a;
 b[$ of $c$, with $\delta > 0$, such that $\forall x\in ]c-\delta; c+\delta[, f(x) \geq
 \dfrac{f(c)}{2}$. Therefore
 $$ \dint _a ^b
f(x)\d{x}\geq  \dint _{c-\delta} ^{c+\delta} f(x)\d{x}>\dint
_{c-\delta} ^{c+\delta} \dfrac{f(c)}{2}\d{x} = \delta f(c)>0. $$ If
$c=a$ then we consider a neighbourhood of the form $]a;a+\delta[$,
and similarly if $c=b$, we consider a neighbourhood of the form
$]b-\delta;b[$
\end{pf}
\begin{thm}[First Mean Value Theorem for Integrals]
Let $f, g$ be continuous on $\lcrc{a}{b}$, with $g$ of constant sign
on $\lcrc{a}{b}$. Then there exists $c\in \loro{a}{b}$ such that
$$\dint _a ^b f(x)g(x)\d{x} = f(c)\dint _a ^b g(x)\d{x}.  $$
\label{thm:first-mvt-integrals}
\end{thm}
\begin{pf}
If $g$ is identically $0$, there is nothing to prove. Similarly, if
$f$ is constant in $[a;b]$ there is nothing to prove. Otherwise, $g$
is always strictly positive or strictly negative in the interval
$[a;b]$. Let
$$m =\inf _{x\in [a;b]}f(x); \quad M =\sup _{x\in [a;b]}f(x).   $$
Then
$$ m< \dfrac{\int _a ^b f(x)g(x)\d{x}}{\int _a ^bg(x)\d{x}} <M.
$$By the Intermediate Value Theorem, there is $c\in ]a;b[$ such that
$$ f(c) =  \dfrac{\dint _a ^b f(x)g(x)\d{x}}{\dint _a ^bg(x)\d{x}} ,$$
proving the theorem.\end{pf}
\begin{thm}[Integration by Parts]\label{thm:do-me-by-parts}
Let $f, g$ be differentiable functions on $[a;b]$ with $f'$ and $g'$
integrable on $[a;b]$. Then
$$ \int _a ^b f'(x)g(x)\d{x} +  \int _a ^b f(x)g'(x)\d{x} = f(x)g(x)\Big| _a ^b = f(b)g(b)-f(a)g(a). $$


\end{thm}
\begin{pf}
This follows at once from the Product Rule for Derivatives and the
Second Fundamental Theorem of Calculus,
since
$$ (fg)' = f'g+fg' \implies  f(b)g(b)-f(a)g(a) = \int _a ^b \left(f(x)g(x)\right)'\d{x}  \int _a ^b f'(x)g(x)\d{x} +  \int _a ^b f(x)g'(x)\d{x}.$$
\end{pf}

\begin{cor}[Repeated Integration by
Parts]\label{cor:baby-do-me-by-parts} Let $n\in\BBN$. If the
$n+1$-th derivatives $f^{(n+1)}$ and $g^{(n+1)}$ are continuous on
$[a;b]$ then
$$\int _a ^b f(x)g^{(n+1)}(x)\d{x} = \left(f(x)g^{(n)}(x)-f'(x)g^{(n-1)}(x)+f''(x)g^{(n-1)}(x) - \cdots + (-1)^n f^{(n)}(x)g(x)\right)\Big| _a ^b + (-1)^{n+1}\int _a ^b f^{(n+1)}(x)g(x)\d{x}.  $$
\end{cor}
\begin{pf}
Follows by inducting on $n$ and applying Theorem
\ref{thm:do-me-by-parts}.
\end{pf}

\begin{thm}[Integration by Substitution] Let $g$ be a differentiable
function on an open interval  $I$ such that $g'$ is continuous on
$I$. If $f$ is continuous on $g(I)$ then $f\circ g$ is continuous on
$I$ and $\forall (a, b)\in I^2$,
$$ \dint _a ^b (f\circ g)(x)g'(x)\d{x} = \dint _{g(a)} ^{g(b)} f(u)\d{u}. $$

\end{thm}
\begin{pf}
Fix $c\in I$ and put $F(x) = \dint _c ^x f(u)\d{u}$. By The Second
Fundamental Theorem of Calculus, $\forall x\in I, F'(x) = f(x)$.
Furthermore, let $t(x) = F(g(x))$. By The Chain Rule, $t' = (F'\circ
g)g' = (f\circ g)g'$. Therefore
$$\begin{array}{lll}
\dint _a ^b (f\circ g)(x)g'(x)\d{x} & = & \dint _a ^b t'(x)\d{x}
\\
& = & t(b)-t(a) \\
& = & F(g(b))-F(g(a))\\
& = & \dint _c ^{g(b)}f(u)\d{u} -\dint _c ^{g(a)}f(u)\d{u}\\
& = & \dint _{g(a)} ^{g(b)}f(u)\d{u},
\end{array}$$as was to be shewn.
\end{pf}

\begin{thm}[Second Mean Value Theorem for Integrals]
Let $f, g$ be continuous on $\lcrc{a}{b}$, with $g$ monotonic on
$\lcrc{a}{b}$. Then there exists $c\in \loro{a}{b}$ such that
$$\dint _a ^b f(x)g(x)\d{x} = g(a)\dint _a ^c f(x)\d{x} + g(b)\dint _c ^b f(x)\d{x}.  $$
\end{thm}
\begin{pf}
Put $F(x) = \int _a ^x f(t)\d{t}$. Then $F'(x) = f(x)$. Hence
$$\int _a ^b f(x)g(x)\d{x} =\int _a ^b F'(x)g(x)\d{x} = F(x)g(x)\Big| _a ^b -\int _a ^b F(x)g'(x)\d{x}
$$and therefore $$ \int _a ^b f(x)g(x)\d{x} = F(b)g(b)-F(a)g(a) - \int _a ^b
F(x)g'(x)\d{x}.   $$By the First Mean Value Theorem for Integrals
and by the First Fundamental Theorem of Calculus, there is a $c\in
]a;b[$ such that
$$\int _a ^b F(x)g'(x)\d{x} = F(c)\int _a ^bg'(x)\d{x} = F(c)(g(b)-g(a)).  $$
Assembling all the above,
$$\begin{array}{lll}\int _a ^b f(x)g(x)\d{x} &= &  F(b)g(b)-F(a)g(a) -F(c)(g(b)-g(a)) \\
&= & g(b)(F(b) -F(c))+g(a)(F(c)-F(a))\\
& = &g(b)\int _c ^b f(x)\d{x} + g(a)\int _a ^c f(x)\d{x},\end{array}
$$as desired.\end{pf}

\begin{thm}[Generalisation of the AM-GM
Inequality]\label{thm:AMGM-ineq-generalised2} Let $a_i\geq 0$,
$p_i\geq 0$ with $p_1+p_2+\cdots + p_n=1$. Then
$$G= a_1 ^{p_1} a_2 ^{p_2}\cdots a_n ^{p_n}\leq  p_1a_1+p_2a_2+\cdots + p_na_n=A.   $$
(Here we interpret $0^0=1$.)
\end{thm}
\begin{pf}
There is a subindex $k$ such that $a_k\leq G\leq a_{k+1}$. Hence
$$\sum _{i=1} ^k p_i\dint _{a_i} ^G
\left(\dfrac{1}{x}-\dfrac{1}{G}\right)\d{x} + \sum _{i=k+1} ^n
p_i\dint ^{a_i} _G \left(\dfrac{1}{G}-\dfrac{1}{x}\right)\d{x} \geq
0,  $$as all the integrands are $\geq 0$. Upon rearranging
$$ \sum _{i=1} ^n p_i\dint _{a_i} ^G
\dfrac{1}{x}\d{x} \leq   \sum _{i=1} ^n p_i\dint ^{a_i} _G
\dfrac{1}{G}\d{x} \implies  \sum _{i=1} ^n p_i(\log a_i-\log G) \leq
\sum _{i=1} ^n p_i\cdot \dfrac{a_i-G}{G} \implies 0 \leq
\dfrac{A}{G}-1,$$obtaining the inequality
\end{pf}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Let $p$ be a polynomial of degree at most $4$ such that
$p(-1)=p(1)=0$ and $p(0)=1$. If $p(x) \leq 1$ for $x\in [-1;1]$,
find the largest value of $\int _{-1} ^1 p(x) \d{x}$.
\begin{answer}
$\dfrac{8}{5}$
\end{answer}
\end{pro}
\begin{pro}
Compute $ \dint _0 ^3 x\floor{x}\d{x}$.
\begin{answer}
$$\begin{array}{lll}
 \dint _0 ^3 x\floor{x}\ \ \d{x} &  = & \int _0 ^1 x\floor{x}\ \ \d{x} + \int _1 ^2 x\floor{x}\ \ \d{x}  +
\int _2 ^3 x\floor{x}\ \ \d{x} \\
& = & 0\int _0 ^1 x \ \ \d{x} + 1\int _1 ^2 x \ \ \d{x} + 2\int _2
^3 x \ \ \d{x} \\ & = & \dfrac{x^2}{2}\Big| _1 ^2 + x^2\Big| _2 ^3
\\ & = & (2 - \frac{1}{2}) + (9-4) \\ & = & \dfrac{13}{2}.
\end{array}$$
\end{answer}
\end{pro}
\begin{pro}
Let $f$ be a differentiable function such that $$f(x + h) - f(x) =
e^{x+h}-h-e^x$$ and $f(0) = 3$. Find $f(x)$.
\begin{answer}
We have $$f'(x) = \lim _{h \rightarrow 0}\dfrac{f(x + h) - f(x)}{h}
= \lim _{h \rightarrow 0} \dfrac{e^{x+h}-h-e^x}{h} = \lim _{h
\rightarrow 0} \dfrac{e^{x+h}-e^x}{h} - \lim _{h \rightarrow 0}
\dfrac{h}{h} = e^x -1,$$whence $f(x) = e^x - x + C$. Since  $3 =
f(0) = e^0 - 0 + C\implies C = 2$, we deduce that $f(x) = e^x - x +
2$.
\end{answer}
\end{pro}
\begin{pro}
Let $f$ be a continuous function such that $f(x)f(a - x) = 1$ and
let  $a
> 0$. Find $\dint _0 ^a \dfrac{1}{f(x) + 1}\d{x}$.
\begin{answer}
Put $I = \int _0 ^a \frac{1}{f(x) + 1}{\rm d}x$. We have $$I = \int
_0 ^a \frac{1}{f(u) + 1}{\rm d}u =\int _0 ^a \frac{f(u)f(a-u)}{f(u)
+ f(u)f(a-u)}{\rm d}u = \int _0 ^a \frac{f(a-u)}{1 + f(a-u)}{\rm d}u
= -\int _a ^0 \frac{f(v)}{1 + f(v)}{\rm d}v = \int _0 ^a
\frac{f(u)}{1 + f(u)}{\rm d}u  ,$$whence
$$2I = \int _0 ^a \frac{f(u)}{1+f(u)}{\rm d}u +    \int _0 ^a \frac{f(a-u)}{1
+ f(a-u)}{\rm d}u = \int _0 ^a \frac{2 + f(u) + f(a-u)}{2 + f(u) +
f(a-u)}{\rm d}u = a,
$$and so $I = \dfrac{a}{2}$.
\end{answer}
\end{pro}
\begin{pro}
Let $f$ be a Riemann integrable function over every bounded interval
and such that $f(a+b)=f(a)+f(b)$ for all $(a, b)\in\BBR^2$.
Demonstrate that $f(x)=xf(1)$.
\begin{answer}Observe first that $f(0+0)=f(0)+f(0)$ and so $f(0)=0$.
Integrate $f(u+y)=f(u)+f(y)$  for $u\in[0;x]$keeping $y$ constant,
getting
$$\int _0 ^{x}f(u+y)\d{u}=\int _0 ^{x}f(u)\d{u}+ \int _0 ^{x}f(y)\d{u} = \int _0 ^{x}f(u)\d{u}+xf(y). $$
Also, by substitution, $$\int _0 ^{x}f(u+y)\d{u} = \int _y
^{y+x}f(u)\d{u} = \int _0 ^{y+x}f(u)\d{u}-\int _0 ^yf(u)\d{u}.$$
Hence \begin{equation}\label{eq:Cauchy-int-equation} xf(y) = \int _0
^{y+x}f(u)\d{u}-\int _0 ^yf(u)\d{u}- \int _0
^{x}f(u)\d{u}.\end{equation}Exchanging $x$ and $y$:
\begin{equation}\label{eq:Cauchy-int-equation2} yf(x) = \int _0
^{y+x}f(u)\d{u}-\int _0 ^xf(u)\d{u}- \int _0
^{y}f(u)\d{u}.\end{equation} From (\ref{eq:Cauchy-int-equation}) and
(\ref{eq:Cauchy-int-equation2}) we gather that $xf(y)=xf(y)$. If
$xy\neq 0$ then $\dfrac{f(x)}{x} = \dfrac{f(y)}{y}$. This means that
for $\dfrac{f(x)}{x}$ is constant, and so for $x\neq 0$, $f(x)=cx$
for some constant $c$. Since $f(0)=0$, $f(x)=cx$ for all $x$. Taking
$x=1$, $f(1)=c$.
\end{answer}


\end{pro}

\begin{pro}
Compute $\dint _0 ^2 x\floor{x^2}\d{x}$.
\end{pro}
\begin{pro}Find $\dint _{-1} ^2 |x^2-1| \ \ \d{x} $.
\begin{answer}We have
$$\begin{array}{lll}
\dint _{-1} ^2 |x^2-1| \ \ \d{x}  & = & \int _{-1} ^1 (1-x^2) \ \
\d{x} + \int _{1} ^2 (x^2-1) \ \ \d{x} \\
& = & (x - \frac{x^3}{3})\Big| _{-1} ^1 +  (\frac{x^3}{3} - x)\Big| _1 ^2 \\
& = & 2(1 - \frac{1}{3}) + (\frac{8}{3} -2) - (\frac{1}{3}-1)\\
& = & \dfrac{4}{3} + \dfrac{2}{3} + \dfrac{2}{3} \\
& = & \dfrac{8}{3}
\end{array}$$
\end{answer}
\end{pro}
\begin{pro}
Let $n$ be a fixed integer. Let $f:\BBR \rightarrow \BBR$ be given
by
$$f(x)\quad = \quad \left\{\begin{array}{ll}x & \mathrm{if}\ x \leq 0\\ 2^n& \mathrm{if}\ 2^n-2^{n-2} <x \leq 2^{n+1}-2^{n-1} \end{array}\right.  $$ Prove that $\dint _0 ^{2^n} f(x)\d{x} =\dint _0 ^{2^n} x\d{x}=2^{2^n-1}$.
\end{pro}
\begin{pro}[Putnam 1938] Evaluate the limit $$ \lim _{t\rightarrow 0} \dfrac{\dint _0 ^t
(1+\sin 2x)^{1/x}\d{x}}{t}.
$$
\end{pro}
\begin{pro}
Find the value of $\dint _0 ^1 \max (x^2, 1-x)\d{x}$.
\end{pro}



\begin{pro}Let $a>0$.
Let $f$ be a continuous function on $\lcrc{0}{a}$ such that
$f(x)+f(a-x)$ does not vanish on $\lcrc{0}{a}$. Evaluate $\dint _0
^a\dfrac{f(x)\d{x}}{f(x)+f(a-x)}$.
\end{pro}
\begin{pro}Let $a>0$.
Let $F$ be a differentiable function such that $\forall x\in
\lcrc{0}{a}$ $F'(a-x)=F'(x)$. Evaluate $\dint _0 ^a F(x)\d{x}$.
\end{pro}
\begin{pro}Let $n \geq 0$ be an integer.
Let $a$ be the unique  differentiable function such that $\forall
x\in \BBR$ $$(a(x))^{2n+1}+a(x)=x.$$ Evaluate $\dint _0 ^x
a(t)\d{t}$.
\end{pro}

\begin{pro}
Find $\dint _0 ^{\pi/2} \dfrac{\sin x\d{x}}{\sin x + \cos x}$.
\end{pro}
\begin{pro}
Find $\dint _0 ^{\pi/2} \dfrac{1\d{x}}{1 + (\tan x)^{\sqrt{2}}}$.
\end{pro}
\begin{pro}
Find $\dint  \dfrac{1}{x\sqrt{x^2-1}} \d{x}$.
\begin{answer} Put $u = \sqrt{x^2-1}; u^2 = x^2-1$ so that
$2u\mathrm{d}u = 2x\d{x}$ and $\dfrac{\d{x}}{x} =
\dfrac{x\d{x}}{x^2} = \dfrac{u\mathrm{d}u}{u^2 +1}$. Thus
$$\dint  \dfrac{1}{x\sqrt{x^2-1}} \d{x} = \dint  \dfrac{u}{(u^2 + 1)u} \mathrm{d}u = \dint  \dfrac{1}{u^2 + 1} \mathrm{d}u
= \arctan u + C = \arctan\sqrt{x^2-1} + C.  $$
\end{answer}
\end{pro}

\begin{pro} Find $\dis{\dint  \dfrac{1}{1 + \sqrt{x + 1}} \d{x}}$. \begin{answer} Put $u = \sqrt{x+1}; u^2 = x + 1$; from where $\d{x} =
2u\mathrm{d}u$. Whence
$$ \dint  \dfrac{1}{1 + \sqrt{x + 1}} \d{x}= \dint  \dfrac{2u}{1 + u} \ \mathrm{d}u =
\dint \left(2- \dfrac{2}{1 + u}\right) \ \mathrm{d}u = 2u - 2\log|1
+ u| + C = 2\sqrt{1+x}-2\log |1+\sqrt{1+x}| + C.$$
\end{answer}
\end{pro}


\begin{pro} Find $\dis{\dint  \dfrac{x^{1/2}}{x^{1/2}-x^{1/3}} \d{x}}$. \begin{answer} Put $x = u^6; \d{x} =6u^5\mathrm{d}u$, giving
$$ \begin{array}{lll}
 \dint  \dfrac{x^{1/2}}{x^{1/2}-x^{1/3}} \d{x}  & = & \dint
\dfrac{(u^3)(6u^5)}{u^3-u^2} \mathrm{d}u\\ & = & \dint
\dfrac{6u^6}{u-1} \mathrm{d}u \\ & = & 6\dint  \left(u^5 + u^4 + u^3
+ u^2 + u + 1 + \dfrac{1}{u-1}\right) \mathrm{d}u \\ & = &
6\left(\dfrac{u^6}{6}+\dfrac{u^5}{5}+\dfrac{u^4}{4}+\dfrac{u^3}{3}+\dfrac{u^2}{2}+u
+ \log|u-1|\right) + C \\
& = &
x+\dfrac{6x^{5/6}}{5}+\dfrac{3x^{2/3}}{2}+2x^{1/2}+3x^{1/3}+6x^{1/6}
+ 6\log|x^{1/6}-1| + C.
\end{array}
$$
\end{answer}
\end{pro}


\begin{pro} Find $\dis{\dint  \dfrac{a^{2x}}{\sqrt{a^x + 1}} \d{x}}$, $a>
0$. \begin{answer} Put $u^2 = a^x + 1; 2u\mathrm{d}u = (\log a)a^x
\d{x}$ and so
$$\dint  \dfrac{a^{2x}}{\sqrt{a^x + 1}} \d{x} = \dint  \dfrac{2u(u^2-1)}{u\log a} \mathrm{d}u
=\dint  \dfrac{2u^2-2}{\log a} \mathrm{d}u = \dfrac{2u^3}{3\log a} -
\dfrac{2u}{\log a} + C = \dfrac{2(a^x+1)^{3/2}}{3\log a} -
\dfrac{2(a^x+1)^{1/2}}{ \log a} + C.$$
\end{answer}
\end{pro}



\begin{pro} Find $\dis{\dint
\dfrac{1}{(e^x - e^{-x})^2} \d{x}}$. \begin{answer} Observe that
$(e^x - e^{-x})^2 = (e^{-x}(e^{2x} - 1))^2 = e^{-2x}(e^{2x} - 1)^2$,
and so$$\dint  \dfrac{1}{(e^x - e^{-x})^2} \d{x} = \dint
\dfrac{e^{2x}}{(e^{2x} - 1)^2} \d{x} = \dint \dfrac{1}{2u^2}
\mathrm{d}u = -\dfrac{1}{2u} + C = -\dfrac{1}{2(e^{2x}-1)} + C,
$$on putting $u = e^{2x}-1$.

\end{answer}
\end{pro}
\begin{pro} Prove that $\dis{\dint  _1 ^{5} \dfrac{\lfloor x \rfloor}{x}
\d{x}} = 4\log(5) - 3 \log(2) - \log(3)$. \begin{answer} We have
$$\begin{array}{lll} \dint  _1 ^{5} \dfrac{\lfloor x \rfloor}{x}
\d{x}  & =  & \dint  _1 ^{2} \dfrac{\lfloor x \rfloor}{x} \d{x} +
\dint  _2 ^{3} \dfrac{\lfloor x \rfloor}{x} \d{x} + \dint  _3 ^{4}
\dfrac{\lfloor x \rfloor}{x} \d{x} + \dint  _4 ^5 \dfrac{\lfloor x
\rfloor}{x}
\d{x}  \\
& =  & \dint  _1 ^{2} \dfrac{1}{x} \d{x} +  \dint  _2 ^{3}
\dfrac{2}{x} \d{x} + \dint  _3 ^{4} \dfrac{3}{x} \d{x} + \dint _4 ^5
\dfrac{4}{x}
\d{x}  \\
& = & (\log 2 - \log 1) + 2(\log 3 - \log 2) + 3(\log 4 - \log 3) +
4(\log 5 - \log 4) \\ &  = & 4\log(5) - 3 \log(2) - \log(3)
.\end{array}
$$

\end{answer}
\end{pro}



\begin{pro}  Find $\dint  e^{e^x+x} \ \d{x}$.
\begin{answer} Put $u= e^x$, etc.
$$\dint   e^{e^x+x}\d{x} = \dint  e^x e^{e^x} \d{x}=\dint  e^{e^x} \d{e^x} = e^{e^x}+C$$

\end{answer}
\end{pro}
\begin{pro}  Find $\dis{\dint  \tan x \log (\cos x) \ \d{x}}$.\\
\begin{answer}Put $u = \log (\cos x)$, etc.
$$\dint  \tan x \log (\cos x)\d{x} = \dint  (\log (\cos x))\d{(-\log (\cos(x)))} = -\dfrac{(\log (\cos x))^2}{2}+C$$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{\log\log x}{x\log x} \ \d{x}}$.\\
\begin{answer} Put $u = \log\log x$, etc.
$$\dint   \dfrac{\log\log x}{x\log x} \d{x} = \dint  \log\log x\d{\left(\log\log x\right)} = \dfrac{\log\log x}{2} +C $$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{x^{18}-1}{x^3-1} \ \d{x}}$.\\
\begin{answer} Carry out the long division.
$$\dint   \dfrac{x^{18}-1}{x^3-1} \d{x} = \dint  (x^{15}+x^{12}+x^9+x^6+x^3+1)\d{x} = \dfrac{x^{16}}{16}+\dfrac{x^{13}}{13} +
\dfrac{x^{10}}{10} +\dfrac{x^7}{7}+\dfrac{x^4}{4}+x+C $$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{1}{x^{8}+x} \ \d{x}}$.\\
\begin{answer}After an algebraic trick, put $u=1+x^{-7}$, etc.
$$\dint   \dfrac{1}{x^{8}+x}\d{x} = \dint  \dfrac{x^{-8}}{1+x^{-7}}\d{x} =
-\frac{1}{7}\dint  \dfrac{\d{(1+x^{-7})}}{1+x^{-7}} =
-\frac{1}{7}\log |1+x^{-7}|+C$$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{4^x}{2^x + 1} \ \d{x}}$.\\
\begin{answer}Put $u = 2^x+1$
$$ \dint  \dfrac{2^x2^x}{2^x+1}\d{x} =\frac{1}{\log 2}\dint  \dfrac{2^x}{2^x+1}\d{(2^x+1)} =
\frac{1}{\log 2}\dint  \dfrac{u-1}{u}\d{u} =\frac{1}{\log 2}\left(u
- \log |u|\right) +C =\frac{1}{\log 2}\left(2^x+1 - \log
|2^x+1|\right) +C
$$
\end{answer}
\end{pro}

\begin{pro}  Find $\dint  \dfrac{x^2}{(x+1)^{10}} \ \d{x}$.\\
\begin{answer}Put $u=x+1$. Then $x^2= (u-1)^2 = u^2-2u+1$, and hence
$$ \begin{array}{lll}\dint \dfrac{x^2}{(x+1)^{10}} \d{x} & =&
 \dint  \dfrac{u^2-2u+1}{u^{10}} \d{u}\\ & = & \dint  u^{-8} -2u^{-9} +u^{-10} \d{u} \\ &
 = &
-\frac{u^{-7}}{7} +\frac{u^{-8}}{4}-\frac{u^{-9}}{9}+C\\
&= &-\frac{(x+1)^{-7}}{7}
+\frac{(x+1)^{-8}}{4}-\frac{(x+1)^{-9}}{9}+C\\\end{array}$$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{1}{1 + e^x} \ \d{x}}$.
\begin{answer}Algebraic trick, and then $u=e^{-x}+1$, etc.
$$ \dint  \dfrac{1}{1 + e^x} \d{x} = \dint  \dfrac{e^{-x}}{e^{-x}+1}\d{x} = -\dint  \dfrac{1}{e^{-x}+1} \d{(e^{-x}+1)} = -\log |e^{-x}+1|+C  $$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{1}{1-\sin x} \ \d{x}}$.\\
\begin{answer}
$$ \dint   \dfrac{1}{1-\sin x} \d{x} =  \dint   \dfrac{1+\sin x}{1-\sin^2 x} \d{x} = \dint   \dfrac{1+\sin x}{\cos^2 x} \d{x}
=\dint  \sec^2 x + \sec x\tan x \d{x} = \tan x +\sec x + C$$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \sqrt{1+\sin 2x} \ \d{x}}$.\\
\begin{answer}
$$ \begin{array}{lll}\dint  \sqrt{1+\sin 2x}\d{x} & = &
 \dint  \sqrt{\sin^2x+2\sin x\cos x+\cos^2x}\d{x} \\ &= &
 \dint  \sqrt{(\sin x + \cos x)^2}\d{x}\\ & = & \dint  |\sin x + \cos x|\d{x}\\
 &  = & \mp\cos x \pm\sin x +C \end{array}$$
\end{answer}
\end{pro}

\begin{pro}  Find $\dis{\dint  \dfrac{x}{\sqrt{1-x^4}} \ \d{x}}$.
\begin{answer} Put $u =x^2$, etc.
$$ \dint   \dfrac{x}{\sqrt{1- (x^2)^2}} \d{x} = \frac{1}{2} \dint  \dfrac{1}{\sqrt{1-u^2}}\d{u} = \frac{1}{2}\arcsin u +C = \frac{1}{2}\arcsin x^2 +C $$
\end{answer}
\end{pro}


\begin{pro} Find $\dint \sec^4x\d{x}$.\\
\begin{answer}
We have
$$ \begin{array}{lll}\dint \sec^4x\d{x} & = & \dint \sec^2x (\tan^2x+1)\d{x} \\
& = & \dint \sec^2x \tan^2x\d{x}+\dint \sec^2x \d{x} \\
& = & \dint (\tan x)^2\d{(\tan x)} + \dint \sec^2x\d{x} \\
& = & \dfrac{\tan^3x}{3} + \tan x +C.
\end{array}.
$$
\end{answer}

\end{pro}
\begin{pro} Find $\dint \sec^5x\d{x}$.\\
\begin{answer}
 We have
$$ \begin{array}{lll}\dint \sec^5x \d{x} & = &  \dint \sec^3x\sec^2x\d{x}\\
& = &  \dint \sec^3x\d{(\tan x)} \\
& = & \sec^3x\tan x - \dint \tan x \d{(\sec^3x)} \\
 & = & \sec^3x\tan x - 3\dint \tan^2 x\sec^2x\sec x \d{x} \\
  & = & \sec^3x\tan x - 3\dint (\sec^2x-1)\sec^3x \d{x} \\

& = & \sec^3x\tan x - 3\dint \sec^5x\d{x}+3\dint \sec^3x \d{x} \\
\end{array} $$ The above implies that
$$ \begin{array}{lll}\dint \sec^5x \d{x}
& = & \dfrac{\tan x\sec^3x}{4} + \frac{3}{4}\dint \sec^3x \d{x} \\
 & = & \dfrac{\tan x\sec^3x}{4} +
\dfrac{3\tan x\sec x}{8} + \dfrac{3}{8}\log |\sec x + \tan x|+C,
\\
\end{array} $$upon recalling from class that
$$ \dint \sec^3x \d{x} = \dfrac{\tan x\sec x}{2} + \dfrac{1}{2}\log |\sec x + \tan x|+C  $$
   \end{answer}

\end{pro}
\begin{pro} Find $\dint e^{x^{1/3}}\d{x}$.\\
\begin{answer}
First put $t = x^{1/3}$, then $t^3 = x \implies 3t^2\d{t} =\d{x}$.
Thus
$$ \begin{array}{lll}\dint e^{x^{1/3}}\d{x} & = & \dint 3t^2e^{t}\d{t}\\
& = & 3t^2e^t -6te^t - 6e^t +C \\
& = &  3x^{2/3}e^{x^{1/3}} -6x^{1/3}e^{x^{1/3}} -6e^{x^{1/3}} +C,
\end{array} $$where the penultimate step results from tabular
integration by parts.
\end{answer}

\end{pro}
\begin{pro} Find $\dint \log (x^2+1)\d{x}$.\\
\begin{answer}
We have
$$ \begin{array}{lll}\dint \log (x^2+1)\d{x} & = & x\log (x^2+1) - \dint x\d{(\log(x^2+1))} \\
& = & x\log (x^2+1) - 2\dint \dfrac{x^2}{x^2+1}\d{x} \\
& = & x\log (x^2+1) - 2\dint \dfrac{x^2+1-1}{x^2+1}\d{x} \\
& = & x\log (x^2+1) - 2\dint \left(1-\dfrac{1}{x^2+1}\right)\d{x} \\
& = & x\log (x^2+1) - 2(x-\arctan x)+C \\
\end{array}
$$
\end{answer}

\end{pro}
\begin{pro} \label{pro:xecos}Find $\dint xe^x\cos x\d{x}$.\\
\begin{answer}
 Put
$$I = \dint xe^x\cos x := (Ax+B)e^x\cos x + (Cx+D)e^x\sin x+ K.
$$Differentiating both sides,
$$ xe^x\cos x = Ae^x\cos x + (Ax+B)e^x\cos x -(Ax+B)e^x\sin x + Ce^x\sin x + (Cx+D)e^x\sin x + (Cx+D)e^x\cos x. $$
Equating coefficients,
$$\begin{array}{lll} xe^x\cos x & : & 1 = A+C \\
xe^x\sin x & : & 0 = -A+C \\
e^x\cos x & : & 0 = A+B+D \\
e^x\sin x & : & 0 = -B+C+D\\\end{array}  $$ From the first two
equations $C = \frac{1}{2}, A = \frac{1}{2}.$ Then the third and
fourth equations become $-\frac{1}{2} = B+D; -\frac{1}{2} = -B+D$,
whence $D=-\frac{1}{2}$, and $B=0$. We conclude that
$$\dint xe^x\cos x = \dfrac{x}{2}e^x\cos x + \left(\dfrac{x-1}{2}\right)e^x\sin x+ K.
$$
\end{answer}

\end{pro}
\begin{pro} Find $\dint x^{2/3}\log x\d{x}$.\\
\begin{answer}
We will do this one two ways: first, by making the substitution $$t
= \log x\implies e^t = x \implies  e^t\d{t}=\d{x}.$$Observe also
that $x^{2/3} = e^{2t/3}$. Then
$$ \begin{array}{lll} \dint x^{2/3}\log x\d{x}& = & \dint te^{2t/3}e^t\d{t} \\
& = & \dfrac{3t}{5}e^{5t/3} - \dfrac{9}{25}e^{5t/3} + C \\
& = & \dfrac{3(\log x)}{5}x^{5/3} - \dfrac{9}{25}x^{5/3}+C.
\end{array}
$$
{\em Aliter:} By directly integrating by parts,
$$ \begin{array}{lll} \dint x^{2/3}\log x\d{x}& = & \dint \log x\d{\left(\dfrac{3x^{5/3}}{5}\right)} \\
& = & \dfrac{3x^{5/3}}{5}\log x - \dfrac{3}{5}\dint x^{5/3}\d{(\log x)}\\
& = & \dfrac{3(\log x)}{5}x^{5/3} - \dfrac{3}{5}\dint x^{2/3}\d{ x}\\
& = & \dfrac{3(\log x)}{5}x^{5/3} - \dfrac{9}{25}x^{5/3}+C,
\end{array}
$$as before.

\end{answer}

\end{pro}
\begin{pro} Find $\dint \sin (\log x)\d{x}$.\\
\begin{answer}
This integral can be done multiple ways. For example, you may
integrate by parts directly and then ``solve'' for the integral.
Another way is the following. Start by putting $$t = \log x\implies
e^t = x \implies e^t\d{t}=\d{x}.$$ Then
$$ \dint \sin (\log x)\d{x} = \dint e^t\sin t\d{t},$$an integral that
we found in class. We will find it again, using a method similar of
problem \ref{pro:xecos}. Put $$I = \dint e^t\cos t\d{t} := Ae^t\cos
t + Be^t\sin t + K.$$ Differentiating both sides
$$ e^t\cos t = Ae^t\cos t -Ae^t\sin t + Be^t\sin t + Be^t\cos t.  $$
Equating coefficients,
$$\begin{array}{lll}  e^t\cos t& : & 1 = A+B \\
e^t\sin t & : & 0 = -A + B\end{array}  $$ and so $A = B =
\frac{1}{2}$. We have thus
$$ \begin{array}{lll}\dint \sin (\log x)\d{x} & = & \dint e^t\sin t\d{t}  \\
& = & \frac{1}{2}e^t\cos t + \frac{1}{2}e^t\sin t +K \\
& = & \frac{1}{2}x\cos \log x + \frac{1}{2}x\sin \log x +K. \\
\end{array}$$

\end{answer}

\end{pro}
\begin{pro} Find $\dint \dfrac{\log\log x}{x}\d{x}$.\\
\begin{answer}
Put $t = \log\log x \implies e^{e^t} =x \implies e^te^{e^t}\d{t} =
\d{x}$. Hence
$$ \begin{array}{lll}\dint \dfrac{\log\log x}{x}\d{x} & = & \dint \dfrac{te^te^{e^t}}{e^{e^t}}\d{t} \\
& = & te^t-e^t +C \\
& = & (\log x)(\log\log x) - (\log x) +C,   \end{array} $$where the
penultimate equality follows from a tabular integration by parts.
\end{answer}

\end{pro}
\begin{pro}[$\dint \sec x\d{x}$ in three ways]
A traditional indefinite integral is
$$\dint \sec x \d{x} = \log (\tan x + \sec x) +C.$$Justify this formula.

\bigskip
Now, prove that $\dfrac{1}{\cos x} = \dfrac{\cos x}{2(1+\sin x)} +
\dfrac{\cos x}{2(1-\sin x)}.$ Use this to find a second formula for
$\dint \sec x\d{x}$.

\bigskip
A third way is as follows.  Using $\sin 2\theta = 2\sin\theta \cos
\theta$ shew that $\dint \csc x \d{x} = \log |\tan \frac{x}{2}| +
C$. Now use $\csc (\frac{\pi}{2}+x) = \sec x$ to find yet another
formula for $\dint \sec x\d{x}$.
\begin{answer}
Observe that
$$\dint \sec x \d{x} =\dint \dfrac{\sec x \tan x + \sec^2x}{\tan x + \sec x} \d{x}
= \dint \d{\left(\log (\tan x + \sec x)\right)} =  \log (\tan x +
\sec x) +C,$$

\bigskip
For the second way, simple algebra will yield the identity. We have
$$ \begin{array}{lll} \dint \sec x\d{x}& = & \dint \dfrac{\cos x}{2(1+\sin x)}\d{x} + \dint \dfrac{\cos x}{2(1-\sin
x)}\d{x} \\
& = & \frac{1}{2}\log |1+\sin x| -\frac{1}{2}\log |1-\sin x| +C\\
& = & \dfrac{1}{2}\log \Big|\dfrac{1+\sin x}{1-\sin x}\Big| + C
\end{array}.
$$

\bigskip For the third way, we have
$$\begin{array}{lll} \dint \csc x \d{x}& = & \dint \dfrac{1}{\sin x} \d{x}\\
& = & \dint \dfrac{1}{2\sin \frac{x}{2}\cos \frac{x}{2}} \d{x}
\\
& = & \dint \dfrac{\cos \frac{x}{2}}{2\sin \frac{x}{2}\cos ^2
\frac{x}{2}} \d{x}\\
& = & \dint \dfrac{\sec^2 \frac{x}{2}}{2\tan \frac{x}{2}} \d{x}\\
& \stackrel{u = \tan \frac{x}{2}}{=} & \dint \dfrac{\d{u}}{u}\\
& = & \log |\tan \frac{x}{2}|+C.
\end{array}$$
Thus $$\dint \sec x\d{x} = \dint \csc (\frac{\pi}{2}+x) \d{x} =\dint
\csc (\frac{\pi}{2}+x) \d{(\frac{\pi}{2}+x)} = \log \Big|\tan
(\frac{\pi}{4}+\frac{x}{2})\Big|+C.   $$
\end{answer}
\end{pro}
\begin{pro} Find $\dint (\arcsin x)^2\d{x}$. \\
\begin{answer}
 Putting $t=\arcsin x$ we have
$$ \sin t = x \implies \cos t \d{t} = \d{x},  $$whence
$$\begin{array}{lll}\dint (\arcsin x)^2\d{x} & = & \dint t^2\cos t\d{t} \\
& = & t^2\sin t + 2t\cos t - 2\sin t + C \\
& = & (\arcsin x)^2x + 2(\arcsin x)\cos (\arcsin x) - 2x +C \\
 & = & (\arcsin x)^2x + 2(\arcsin x)\sqrt{1-x^2} - 2x +C \\
\end{array}
$$

\end{answer}

\end{pro}
\begin{pro} Find $\dint  \dfrac{\d{x}}{\sqrt{x + 1} + \sqrt{x - 1}}$. \\
\begin{answer}
We have
$$ \begin{array}{lll} \dint  \dfrac{\d{x}}{\sqrt{x + 1} + \sqrt{x - 1}}& = &
\dint  \dfrac{(\sqrt{x + 1} - \sqrt{x - 1})\d{x}}{2} \\
& = & \dfrac{1}{3}(x+1)^{3/2} - \dfrac{1}{3}(x-1)^{3/2}+C
\end{array}.
$$
\end{answer}

\end{pro}
 \begin{pro} $\dint x\arctan x\d{x}$. \\
\begin{answer}
We have
$$ \begin{array}{lll}\dint x\arctan x\d{x} & = &  \dint \arctan x\d{\left(\dfrac{x^2}{2}\right)} \\
& = &\dfrac{x^2}{2}\arctan x - \dint \dfrac{x^2}{2}\d{(\arctan x)} \\
& = &\dfrac{x^2}{2}\arctan x - \dint \frac{1}{2}\dfrac{x^2}{1 + x^2}\d{x} \\
& = &\dfrac{x^2}{2}\arctan x - \dint \frac{1}{2}\dfrac{x^2+1-1}{1 + x^2}\d{x} \\
& = &\dfrac{x^2}{2}\arctan x - \dfrac{x}{2} + \frac{1}{2}\arctan x+C \\

\end{array}.
$$
\end{answer}

\end{pro}
\begin{pro} Find $\dint \sqrt{\tan x} \d{x} $.\\
\begin{answer}
Put $u=\sqrt{\tan x}$ and so $u^2 = \tan x$, $2u\d{u} = \sec^2x\d{x}
= (\tan^2x+1)\d{x} = (u^4+1)\d{x}$. Hence the integral becomes
$$\dint \sqrt{\tan x} \d{x} = 2\dint \dfrac{u^2}{u^4+1}\d{u}.  $$To
decompose  the above fraction into partial fractions observe (Sophie
Germain's trick) that $u^4+1 = u^4+2u^2+1 -2u^2=
(u^2+u\sqrt{2}+1)(u^2-u\sqrt{2}+1)$ and hence
$$\begin{array}{lll} \dint \sqrt{\tan x} \d{x} & = & 2\dint \dfrac{u^2}{u^4+1}\d{u} \\
& = & -\dfrac{\sqrt{2}}{2}\dint \dfrac{u}{u^2+u\sqrt{2}+1}\d{u}+
\dfrac{\sqrt{2}}{2}\dint \dfrac{u}{u^2-u\sqrt{2}+1}\d{u}\\
& = & -\dfrac{\sqrt{2}}{4}\log (u^2+u\sqrt{2}+1) +
\dfrac{\sqrt{2}}{4}\log (u^2-u\sqrt{2}+1) +
\dfrac{\sqrt{2}}{2}\arctan (\sqrt{2}u+1)-\dfrac{\sqrt{2}}{2}\arctan
(-\sqrt{2}u+1)+C\\
& = &-\dfrac{\sqrt{2}}{4}\log (\tan x+\sqrt{2\tan x}+1) +
\dfrac{\sqrt{2}}{4}\log (\tan x-\sqrt{2\tan x}+1) \\ & & \qquad  +
\dfrac{\sqrt{2}}{2}\arctan (\sqrt{2\tan
x}+1)-\dfrac{\sqrt{2}}{2}\arctan (-\sqrt{2\tan x}+1)+C
\end{array}$$
\end{answer}

\end{pro}
\begin{pro} Find $\dint \dfrac{2x+1}{x^2(x-1)} \d{x}$.\\
\begin{answer}
Put $$\dfrac{2x+1}{x^2(x-1)} = \dfrac{A}{x} + \dfrac{B}{x^2} +
\dfrac{C}{x-1} \implies 2x+1 = Ax(x-1) + B(x-1) + Cx^2.
$$Letting $x=1$ we get $3=C$. Letting $x=0$ we get $1 = -B\implies
B=-1$. To get $A$ observe that equating the coefficients of $x^2$ on
both sides we get $0 = A+C$, whence $A=-3$. Thus
$$\begin{array}{lll}\dint \dfrac{2x+1}{x^2(x-1)} \d{x} & = &-3\dint \dfrac{1}{x}\d{x} - \dint \dfrac{1}{x^2}\d{x}
+3\dint \dfrac{1}{x-1}\d{x}\\
& = & -3\log |x| +\dfrac{1}{x} + 3\log |x-1| +C \\
& = & 3\log \Big|\frac{x-1}{x}\Big| + \dfrac{1}{x}+C.
 \end{array}$$
\end{answer}

\end{pro}
\begin{pro} Find $\dint \log (x + \sqrt{x}) \d{x}$.\\
\begin{answer}Integrating by parts,
$$\begin{array}{lll}\dint \log (x + \sqrt{x}) \d{x} & =  &x\log (x + \sqrt{x})-\dint x \d{\log(x + \sqrt{x})} \\
& = &  x\log (x + \sqrt{x}) -\dint \dfrac{x(1 + \dfrac{1}{2\sqrt{x}})}{x+\sqrt{x}} \d{x}\\
& = &x\log (x + \sqrt{x}) -\dint \left(  1
-\dfrac{1}{2}\cdot\dfrac{\sqrt{x}}{x+\sqrt{x}} \right) \d{x}
\\
& = & x\log (x + \sqrt{x}) -x + \dfrac{1}{2}\dint
\dfrac{\sqrt{x}}{x+\sqrt{x}} \d{x}
\\
& \stackrel{u = \sqrt{x}}{=} & x\log (x + \sqrt{x}) -x + \dint
\dfrac{u^2}{u^2+u} \d{u}
\\
& \stackrel{u = \sqrt{x}}{=} & x\log (x + \sqrt{x}) -x + \dint
1-\dfrac{1}{u+1} \d{u}
\\

& = & x\log (x + \sqrt{x}) -x + u - \log (u+1)+C
\\
& = & x\log (x + \sqrt{x}) -x + \sqrt{x}-\log (\sqrt{x}+1)+C
\\
\end{array}$$
\end{answer}
\end{pro}

\begin{pro} Find $\dint \dfrac{1}{x^4+1} \d{x}$.\\
\begin{answer}
We use Sophie Germain's trick to factor $$x^4+1 = x^4+2x^2+1-2x^2 =
(x^2+1)^2 - 2x^2 = (x^2-\sqrt{2}x+1)(x^2+\sqrt{2}x+1),$$and seek the
partial fraction decomposition
$$\dfrac{1}{x^4+1} = \dfrac{Ax+B}{x^2-\sqrt{2}x+1} +\dfrac{Cx+D}{x^2+\sqrt{2}x+1} \implies 1 = (Ax+B)(x^2+\sqrt{2}x+1) + (Cx+D)(x^2-\sqrt{2}x+1).  $$
Equating coefficients
$$\begin{array}{lll} x^3 & : & 0 = A +C \\
x^2 & : & 0 =  B +D + \sqrt{2}(A -C) \\
x & : & 0 = A+ C+\sqrt{2}(B-D) \\
x^0 & : & 1 = B +D \\
\end{array}$$From the first and third equation it follows that
$A=-C$ and that $B=D$. From the fourth equation $B=D = \frac{1}{2}$
and from the second equation $A = -\dfrac{1}{2\sqrt{2}} = -C$. Hence
we must integrate
$$\begin{array}{lll}\dint \dfrac{1}{x^4+1} \d{x} & = &
\dint\dfrac{\sqrt{2}x+2}{4(x^2+\sqrt{2}x+1)}\d{x} -
\dint\dfrac{\sqrt{2}x-2}{4(x^2-\sqrt{2}x+1)}\d{x} \\
& = &
\dfrac{\sqrt{2}}{8}\dint\dfrac{2x+\sqrt{2}}{x^2+\sqrt{2}x+1}\d{x} +
\dfrac{1}{4}\dint\dfrac{1}{x^2+\sqrt{2}x+1}\d{x} -
\dfrac{\sqrt{2}}{8}\dint\dfrac{2x+\sqrt{2}}{x^2-\sqrt{2}x+1}\d{x} +
\dfrac{1}{4}\dint\dfrac{1}{x^2-\sqrt{2}x+1}\d{x} \\
& = & \dfrac{\sqrt{2}}{8}\log (x^2 +
x\sqrt{2}+1)-\dfrac{\sqrt{2}}{8}\log (x^2 - x\sqrt{2}+1) +
\dfrac{1}{2}\dint \dfrac{\d{x}}{(x\sqrt{2}+1)^2+1}
+\dfrac{1}{2}\dint \dfrac{\d{x}}{(-x\sqrt{2}+1)^2+1} \\
& = & \dfrac{\sqrt{2}}{8}\log (x^2 +
x\sqrt{2}+1)-\dfrac{\sqrt{2}}{8}\log (x^2 - x\sqrt{2}+1)
+\dfrac{\sqrt{2}}{4}\arctan (x\sqrt{2}+1)-\dfrac{\sqrt{2}}{4}\arctan
(-x\sqrt{2}+1)+C
\end{array}$$
\end{answer}

\end{pro}
\begin{pro} Find $\dint \dfrac{1}{x^3+1} \d{x}$.\\
\begin{answer}
We begin by observing that $$\dfrac{1}{x^3+1} = \dfrac{A}{x+1} +
\dfrac{Bx+C}{x^2-x+1} \implies 1 = A(x^2-x+1) + (Bx+C)(x+1).
$$Letting $x=-1$ we obtain $1 = 3A \implies A = \frac{1}{3}$. Letting $x=0$ we
obtain $1 = A + C \implies C = 1-A =  \dfrac{2}{3}$. Finally, we
must have $A+B = 0$, since the coefficient of $x^2$ must be zero.
thus $B = -\frac{1}{3}$. We must then integrate
$$\begin{array}{lll} \dint \dfrac{\d{x}}{3(x+1)} - \dint \dfrac{x-2}{3(x^2-x+1)}\d{x}&  = & \frac{1}{3}\log |x+1|
- \dint \dfrac{x-\frac{1}{2}}{3(x-\frac{1}{2})^2 + \frac{3}{4}}
+\dfrac{1}{2}\dint \dfrac{1}{(x-\frac{1}{2})^2
+ \frac{3}{4}}\\
& = & \frac{1}{3}\log |x+1| -\frac{1}{6}\log |(x-\frac{1}{2})^2 +
\frac{3}{4}| +\dfrac{2}{3}\dint \dfrac{1}{\frac{4}{3}(x-\frac{1}{2})^2 + 1}\\
& = & \frac{1}{3}\log |x+1| -\frac{1}{6}\log |(x-\frac{1}{2})^2 +
\frac{3}{4}| +\dfrac{2}{3}\cdot \dfrac{\sqrt{3}}{2}\arctan
(x-\frac{1}{2}) \\
& = & \frac{1}{3}\log |x+1| -\frac{1}{6}\log |x^2-x+1|
+\dfrac{\sqrt{3}}{3}\arctan
\frac{2}{\sqrt{3}}(x-\frac{1}{2}) \\

\end{array}$$
\end{answer}

\end{pro}

\begin{pro}
Demonstrate that for all strictly positive integers $n$,
$$\left(1 + \frac{1}{n}\right)^{n}\left(1 + \frac{1}{4n}\right) <e<\left(1 + \frac{1}{n}\right)^{n}\left(1 + \frac{1}{2n}\right),
$$that is, $e$ is contained in the second quarter of the interval
$\lcrc{\left(1 + \frac{1}{n}\right)^{n}}{\left(1 +
\frac{1}{n}\right)^{n+1}}$.
\end{pro}

\end{multicols}


\section{Riemann-Stieltjes Integration}

\section{Euler's Summation Formula}

\chapter{Sequences and Series of Functions}
\section{Pointwise Convergence}
\begin{df}
We say that a sequence of functions $\seq{f_n}{n=1}{+\infty}$
$f_n:I\rightarrow \BBR$ defined on an interval $I\subseteqq \BBR$
{\em converges pointwise to the function $f$} if $\forall x\in I$,
$\forall \varepsilon >0$ $\exists N>0 $ (depending on $\varepsilon$
and on $x$) such that
$$ n\geq N \implies \absval{f_n(x)-f(x)}<\varepsilon. $$
\end{df}
\begin{exa}
The sequence of functions $x\mapsto x^n, n=1,2,\ldots$ converges
pointwise on the interval $\lcrc{0}{1}$ to the function
$f:\lcrc{0}{1}\rightarrow \{0,1\}$ with $$ f(x) =
\left\{\begin{array}{ll} 0 & \mathrm{if}\ x\in \lcro{0}{1}\\ 1 &
\mathrm{if}\ x=1\\
\end{array}\right.
$$
\end{exa}


\section{Uniform Convergence}
\begin{df}
We say that a sequence of functions $\seq{f_n}{n=1}{+\infty}$
$f_n:I\rightarrow \BBR$ defined on an interval $I\subseteqq \BBR$
{\em converges uniformly to the function $f$} if $\forall x\in I$,
$\forall \varepsilon >0$ $\exists N>0 $ (depending only on
$\varepsilon$) such that
$$ n\geq N \implies \absval{f_n(x)-f(x)}<\varepsilon. $$
In this case we write  $f_n \unif f$.
\end{df}


\begin{thm}
Let $\seq{f_n}{n=1}{+\infty}$ be a sequence of functions defined
over a common domain $I$. If there exists a numerical sequence
$\seq{a_n}{n=1}{+\infty}$ with $a_n\to 0$ as $\ngroes$, and a
function $f$ defined over $I$ such that eventually$$
\absval{f_n(x)-f(x)}\leq a_n,
$$then $f_n \unif f$.
\end{thm}




\begin{thm}
If the sequence of continuous functions $\seq{f_n}{n=1}{+\infty}$
$f_n:I\rightarrow \BBR$ defined on an open interval $I\subseteqq
\BBR$ converges uniformly to $f$ on $I$, then $f$ is continuous on
$I$. Moreover, if $x_0\in I$ then we may exchange the limits, as in
$$ \lim _{\ngroes} \left( \lim _{x\rightarrow x_0}f_n(x) \right) =  \lim _{x\rightarrow x_0} \left( \lim _{\ngroes}f_n(x) \right) = \lim _{x\rightarrow x_0} f(x). $$
\end{thm}
\begin{thm}
If the sequence of integrable functions $\seq{f_n}{n=1}{+\infty}$
$f_n:I\rightarrow \BBR$ defined on an open interval $I\subseteqq
\BBR$ converges uniformly to $f$ on $I$, then $f$ is integrable on
$I$. Moreover, if $(a, b)\in I^2$ then we may exchange the limit
with the integral, as in
$$ \lim _{\ngroes} \left( \int _a ^b f_n(x)\d{x} \right) =  \int _a ^b  \left( \lim _{\ngroes}f_n(x) \right)\d{x} = \int _a ^b f(x)\d{x}. $$
\end{thm}











\section{Integrals and Derivatives of Sequences of Functions}




\section{Power Series}
A {\em power series} about $x=a$ is a series of the form
$$f(x) =\sum _{n=0} ^{+\infty} a_n(x-a)^n.$$This is a function of
$x$, and truncating it gives polynomial approximations to $f$. The
goal is to approximate ``decent'' functions about a given point
$x=a$.

\bigskip

These expansions don't necessarily make sense for all $x$. The
region where the power series converges is called the {\em interval
of convergence.}


\begin{exa}
Find the interval of convergence of the series
$\displaystyle{\sum_{n=1}^\infty \dfrac{2^n(x-3)^n}{\sqrt{n}}}$.
\end{exa}
\begin{solu}
By the ratio test, the series will converge if

$$\Big|\dfrac{2^{n+1}(x-3)^{n+1}}{\sqrt{n+1}}\cdot
\dfrac{\sqrt{n}}{2^n(x-3)^n}\Big| = 2\sqrt{\dfrac{n}{n+1}}|x-3|
\rightarrow r <1,
$$
that is when $$ 2|x-3|<1 \implies \dfrac{5}{2}<x<\dfrac{7}{2}.$$

The series converges absolutely when $\dfrac{5}{2}<x<\dfrac{7}{2}.$
We must also test the endpoints. At $x=\dfrac{5}{2}$ the series is
$\sum_{n=1}^\infty \dfrac{(-1)^n}{\sqrt{n}}$, which converges
conditionally by Leibniz's Test.  At $x=\dfrac{5}{2}$ the series is
$\sum_{n=1}^\infty \dfrac{1}{\sqrt{n}}$, which diverges.
\end{solu}
\section{Maclaurin Expansions to know by inspection}
\begin{itemize}
\item $$e^x = 1 + x + \dfrac{x^2}{2!} +\dfrac{x^3}{3!}+ \cdots $$
\item The sine is an odd function: $$\sin x =  x - \dfrac{x^3}{3!} +\dfrac{x^5}{5!}-\dfrac{x^7}{7!}+   \cdots $$
\item The cosine is an even function: $$\cos x = 1 - \dfrac{x^2}{2!} +\dfrac{x^4}{4!}-\dfrac{x^6}{6!}+   \cdots $$
\item If $a$ is a real constant,
$$\left(1+x\right)^a  = 1 + ax + \dfrac{a(a-1)}{2!}x^2 + \dfrac{a(a-1)(a-2)}{3!}x^3 + \dfrac{a(a-1)(a-2)(a-3)}{4!}x^4+\cdots $$
\end{itemize}

\begin{exa}
Expand $f(x) = \cos x$ around $x=1$.
\end{exa}
\begin{solu}
We have
$$\begin{array}{lll}\cos x  & = & \cos (x-1+1) \\
& = & \cos (x-1)\cos 1 - \sin (x-1)\sin 1 \\
& = & (\cos 1)\left(1-\dfrac{(x-1)^2}{2!}+
\dfrac{(x-1)^4}{4!}-\cdots\right) - (\sin
1)\left((x-1)-\dfrac{(x-1)^3}{3!}+ \dfrac{(x-1)^5}{5!}-\cdots\right)
\end{array}$$
\end{solu}




\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small
\begin{pro}
Given a finite collection of closed squares of total area $3$, prove
that they can be arranged to cover the unit square.
\end{pro}
\begin{pro}
Given a finite collection of closed squares of total area
$\frac{1}{2}$, prove that they can be arranged to cover the unit
square, with no overlaps
\end{pro}


\end{multicols}

\section{Comparison Tests}



\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Let $\{a_n\}_{n=1} ^\infty$ be a sequence of real numbers satisfying
$0 < a_n <1$ for all $n$. Assume that $\sum _{n=1} ^\infty a_n$
diverges but  $\sum _{n=1} ^\infty a_n ^2$ converges. Let $f$ be a
function defined on $\lcrc{0}{1}$ whose second derivative exists and
is bounded on $\lcrc{0}{1}$. Prove that if  $\sum _{n=1} ^\infty
f(a_n)$ converges, so does  $\sum _{n=1} ^\infty\absval{f(a_n)}$.
\end{pro}


\end{multicols}

\section{Taylor Polynomials}


\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small


\begin{pro}
Evaluate $\dint _0 ^1 (\log x)(\log (1-x))\d{x}$.
\end{pro}

\begin{pro}
Evaluate the infinite series $\sum _{n=1} ^\infty \arctan
\dfrac{2}{n^2}$.
\end{pro}
\begin{pro}
Find the sum of the infinite series $$1-\dfrac{1}{4}+\dfrac{1}{6}
-\dfrac{1}{9}+\dfrac{1}{11}-\dfrac{1}{14}+\cdots .$$
\end{pro}
\end{multicols}

\section{Abel's Theorem}
\subsection*{Homework}\addcontentsline{toc}{subsection}{Homework}
\begin{multicols}{2}\columnseprule 1pt \columnsep
25pt\multicoltolerance=900\small

\begin{pro}
Put $$a_n=1-\dfrac{1}{2}+\dfrac{1}{3} -\dfrac{1}{4}+\cdots
+\dfrac{(-1)^{n+1}}{n}-\log 2 .$$Prove that $\sum _{n=1} ^\infty
a_n$ converges and find its sum.
\begin{answer}

\end{answer}
\end{pro}
\begin{pro}
Evaluate the sum $$\sum _{n=1} ^\infty
\dfrac{1+\dfrac{1}{2}+\dfrac{1}{3}+\cdots +\dfrac{(1}{n}}{n(n+1)}.$$
\begin{answer}

\end{answer}
\end{pro}
\begin{pro}
Evaluate the sum $$\sum _{n=0} ^\infty
\left(\dfrac{1}{4n+1}+\dfrac{1}{4n+3}-\dfrac{1}{2n+2}\right).$$
\begin{answer}

\end{answer}
\end{pro}
\begin{pro}
Evaluate the limit $$\lim _{\alpha \rightarrow
0}\dfrac{1}{\alpha}\cdot \dint _0 ^\pi \tan\left(\alpha\sin
x\right)\d{x}.$$
\begin{answer}

\end{answer}
\end{pro}


\end{multicols}


\Closesolutionfile{calculillo}

\appendix
\renewcommand{\chaptername}{Appendix}
\chapter{Answers and Hints}\addcontentsline{toc}{section}{Answers and Hints}\markright{Answers and Hints}
 {\tiny\input{calculillo1}}

\begin{thebibliography}{9}
\bibitem[Apo]{Apo} Apostol, T. M., {\bf Calculus}, Vol 1 \& 2, 2nd
ed., Waltham: Xerox, 1967.


\bibitem[Har]{Har}Hardy, G. H., {\bf Pure Mathematics}, 10th ed.,
New York: Cambridge University Press, 1952.
\bibitem[Kla]{Kla} Klambauer, Gabriel, {\bf Aspects of Calculus},
New York: Springer-Verlag, 1986.

\bibitem[Lan]{Lan} Landau, E., {\bf Differential and Integral
Calculus}, New York, Chelsea Publishing Company, 1950.

\bibitem[Olm]{Olm} Olmstead, J. M. H., {\bf Calculus with Analytic
Geometry},  Vol 1 \& 2, New York: Appleton-Century-Crofts, 1966.

\bibitem[Spi]{Spi} Spivak, Michael {\bf Calculus}, 3rd ed.,
Houston, Texas: Publish or Perish, Inc., 1994.


\end{thebibliography}





\end{document}
