diff --git a/docs/generate_api_navbar_and_symbols.py b/docs/generate_api_navbar_and_symbols.py
index 18fe10f8d..048ef52ef 100644
--- a/docs/generate_api_navbar_and_symbols.py
+++ b/docs/generate_api_navbar_and_symbols.py
@@ -29,210 +29,9 @@
'parent_pages': [],
'child_pages': [],
},
- {
- 'page': 'data.tex',
- 'title': 'Data',
- 'parent_pages': [],
- 'child_pages': [],
- },
- {
- 'page': 'model.tex',
- 'title': 'Model',
- 'parent_pages': [],
- 'child_pages': [
- 'model-compositionality.tex',
- 'model-development.tex',
- ],
- },
- {
- 'page': 'model-compositionality.tex',
- 'title': 'Compositionality',
- 'parent_pages': [
- 'model.tex'
- ],
- 'child_pages': [],
- },
- {
- 'page': 'model-development.tex',
- 'title': 'Development',
- 'parent_pages': [
- 'model.tex'
- ],
- 'child_pages': [],
- },
- {
- 'page': 'inference.tex',
- 'title': 'Inference',
- 'parent_pages': [],
- 'child_pages': [
- 'inference-classes.tex',
- 'inference-compositionality.tex',
- 'inference-data-subsampling.tex',
- 'inference-development.tex',
- ],
- },
- {
- 'page': 'inference-classes.tex',
- 'title': 'Classes',
- 'parent_pages': [
- 'inference.tex'
- ],
- 'child_pages': [],
- },
- {
- 'page': 'inference-compositionality.tex',
- 'title': 'Compositionality',
- 'parent_pages': [
- 'inference.tex'
- ],
- 'child_pages': [],
- },
- {
- 'page': 'inference-data-subsampling.tex',
- 'title': 'Data Subsampling',
- 'parent_pages': [
- 'inference.tex'
- ],
- 'child_pages': [],
- },
- {
- 'page': 'inference-development.tex',
- 'title': 'Development',
- 'parent_pages': [
- 'inference.tex'
- ],
- 'child_pages': [],
- },
- {
- 'page': 'criticism.tex',
- 'title': 'Criticism',
- 'parent_pages': [],
- 'child_pages': [],
- },
- {
- 'page': 'reference.tex',
- 'title': 'Reference',
- 'parent_pages': [],
- 'child_pages': [],
- },
]
-def generate_navbar(page_data):
- """Return a string. It is the navigation bar for ``page_data``."""
- def generate_top_navbar():
- # Create top of navbar. (Note this can be cached and not run within a loop.)
- top_navbar = """\\begin{abstract}
-\section{API and Documentation}
-\\begin{lstlisting}[raw=html]
-
-
"""
- for page_data in PAGES:
- title = page_data['title']
- page_name = page_data['page']
- parent_pages = page_data['parent_pages']
- if len(parent_pages) == 0 and \
- page_name not in ['index.tex', 'reference.tex']:
- top_navbar += '\n'
- top_navbar += '
'
- top_navbar += title
- top_navbar += ''
-
- top_navbar += '\n'
- top_navbar += '
'
- return top_navbar
-
- page_name = page_data['page']
- title = page_data['title']
- parent_pages = page_data['parent_pages']
- child_pages = page_data['child_pages']
-
- navbar = generate_top_navbar()
- # Create bottom of navbar if there are child pages for that section.
- if len(child_pages) > 0 or len(parent_pages) > 0:
- if len(parent_pages) > 0:
- parent = parent_pages[0]
- parent_page = [page_data for page_data in PAGES
- if page_data['page'] == parent][0]
- pgs = parent_page['child_pages']
- else:
- pgs = child_pages
-
- navbar += '\n'
- navbar += '
'
-
- navbar += '\n'
- navbar += """
-\end{lstlisting}
-\end{abstract}"""
-
- # Set primary button in navbar. If a child page, set primary buttons
- # for both top and bottom of navbar.
- search_term = '" href="/api/' + page_name.replace('.tex', '') + '">'
- navbar = navbar.replace(search_term, ' button-primary' + search_term)
- if len(parent_pages) > 0:
- parent = parent_pages[0]
- search_term = '" href="/api/' + parent.replace('.tex', '') + '">'
- navbar = navbar.replace(search_term, ' button-primary' + search_term)
-
- return navbar
-
-
-def generate_models():
- import edward.models as module
- from edward.models import RandomVariable
- objs = [getattr(module, name) for name in dir(module)]
- objs = [obj for obj in objs
- if (isinstance(obj, type) and
- issubclass(obj, RandomVariable) and
- obj != RandomVariable
- )
- ]
- objs = sorted(objs, key=lambda cls: cls.__name__)
-
- links = [('@{{ed.models.{}}}').format(cls.__name__) for cls in objs]
- return '\n\item'.join(links)
-
-
-def generate_criticisms():
- import edward.criticisms as module
- objs = [getattr(module, name) for name in dir(module)]
- objs = [obj for obj in objs
- if (hasattr(obj, '__call__') or
- isinstance(obj, type))
- ]
- objs = sorted(objs, key=lambda cls: cls.__name__)
-
- links = [('@{{ed.criticisms.{}}}').format(cls.__name__) for cls in objs]
- return '\n\item'.join(links)
-
-
-def generate_util():
- import edward.util as module
- objs = [getattr(module, name) for name in dir(module)]
- objs = [obj for obj in objs
- if (hasattr(obj, '__call__') or
- isinstance(obj, type))
- ]
- objs = sorted(objs, key=lambda cls: cls.__name__)
-
- links = [('@{{ed.util.{}}}').format(cls.__name__) for cls in objs]
- return '\n\item'.join(links)
-
-
def get_tensorflow_version():
import tensorflow
return str(getattr(tensorflow, '__version__', ''))
@@ -248,24 +47,9 @@ def get_tensorflow_version():
path = os.path.join(out_dir, 'api', page_name)
print(path)
- # Generate navigation bar.
- navbar = generate_navbar(page_data)
-
+ # TODO do for /api/ed
# Insert autogenerated content into page.
document = open(path).read()
- assert '{{navbar}}' in document, \
- ("File found for " + path + " but missing {{navbar}} tag.")
- document = document.replace('{{navbar}}', navbar)
-
- if '{{models}}' in document:
- document = document.replace('{{models}}', generate_models())
-
- if '{{criticisms}}' in document:
- document = document.replace('{{criticisms}}', generate_criticisms())
-
- if '{{util}}' in document:
- document = document.replace('{{util}}', generate_util())
-
if '{{tensorflow_version}}' in document:
document = document.replace('{{tensorflow_version}}',
get_tensorflow_version())
diff --git a/docs/generate_api_toc.py b/docs/generate_api_toc.py
index dfd00d90d..20d8902af 100644
--- a/docs/generate_api_toc.py
+++ b/docs/generate_api_toc.py
@@ -23,9 +23,6 @@
toc = ''
for entry in data_map['toc']:
title = entry['title']
- if title == 'ed':
- continue
-
section = entry['section']
assert section[0]['title'] == 'Overview'
path = section[0]['path']
diff --git a/docs/parser/generate_lib.py b/docs/parser/generate_lib.py
index be95ec7c3..8c3ca2158 100644
--- a/docs/parser/generate_lib.py
+++ b/docs/parser/generate_lib.py
@@ -165,10 +165,10 @@ def write_docs(output_dir, parser_config, yaml_toc):
symbol_to_file[full_name] + '\n')
# Write a global index containing all full names with links.
- # with open(os.path.join(output_dir, 'index.md'), 'w') as f:
- # f.write(
- # parser.generate_global_index('Edward', parser_config.index,
- # parser_config.reference_resolver))
+ with open(os.path.join(output_dir, 'overview.md'), 'w') as f:
+ f.write(
+ parser.generate_global_index('Edward', parser_config.index,
+ parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
diff --git a/docs/tex/api/criticism.tex b/docs/tex/api/criticism.tex
deleted file mode 100644
index 32fd286ac..000000000
--- a/docs/tex/api/criticism.tex
+++ /dev/null
@@ -1,44 +0,0 @@
-\title{Criticism}
-
-{{navbar}}
-
-\subsubsection{Criticism}
-
-We can never validate whether a model is true. In practice, ``all
-models are wrong'' \citep{box1976science}. However, we can try to
-uncover where the model goes wrong. Model criticism helps justify the
-model as an approximation or point to good directions for revising the
-model.
-For background, see the criticism \href{/tutorials/criticism}{tutorial}.
-
-Edward explores model criticism using
-\begin{itemize}
- \item point evaluations, such as mean squared error or
- classification accuracy;
- \item posterior predictive checks, for making probabilistic
- assessments of the model fit using discrepancy functions.
-\end{itemize}
-
-% \subsubsection{Developing new criticism techniques}
-
-% Criticism is defined simply with utility functions. They take random
-% variables as input and output NumPy arrays.
-% Criticism techniques are simply functions which take as input data,
-% the probability model and variational model (binded through a latent
-% variable dictionary), and any additional inputs.
-
-% \begin{lstlisting}[language=Python]
-% def criticize(data, latent_vars, ...)
-% ...
-% \end{lstlisting}
-
-% Developing new criticism techniques is easy. They can be derived from
-% the current techniques or built as a standalone function.
-
-\begin{center}\rule{3in}{0.4pt}\end{center}
-
-\begin{itemize}
- \item {{criticisms}}
-\end{itemize}
-
-\subsubsection{References}\label{references}
diff --git a/docs/tex/api/index.tex b/docs/tex/api/index.tex
index 971f01845..ced76268a 100644
--- a/docs/tex/api/index.tex
+++ b/docs/tex/api/index.tex
@@ -1,6 +1,6 @@
\title{API}
-{{navbar}}
+\section{API and Documentation}
Edward's design reflects the building blocks for probabilistic
modeling. It defines interchangeable components, enabling rapid
@@ -36,6 +36,6 @@
Navigate modules enabling this analysis above.
See the
-\href{/api/reference}{reference page} for a list of the API.
+\href{/api/overview}{overview page} for a list of the API.
\subsubsection{References}\label{references}
diff --git a/docs/tex/api/inference.tex b/docs/tex/api/inference.tex
deleted file mode 100644
index 56f3abf18..000000000
--- a/docs/tex/api/inference.tex
+++ /dev/null
@@ -1,186 +0,0 @@
-\title{Inference}
-
-{{navbar}}
-
-\subsubsection{Inference}
-
-We describe how to perform inference in probabilistic models.
-For background, see the
-\href{/tutorials/inference}{Inference tutorial}.
-
-Suppose we have a model $p(\mathbf{x}, \mathbf{z}, \beta)$ of data $\mathbf{x}_{\text{train}}$ with latent variables $(\mathbf{z}, \beta)$.
-Consider the posterior inference problem,
-\begin{equation*}
-q(\mathbf{z}, \beta)\approx p(\mathbf{z}, \beta\mid \mathbf{x}_{\text{train}}),
-\end{equation*}
-in which the task is to approximate the posterior
-$p(\mathbf{z}, \beta\mid \mathbf{x}_{\text{train}})$
-using a family of distributions, $q(\mathbf{z},\beta; \lambda)$,
-indexed by parameters $\lambda$.
-
-In Edward, let \texttt{z} and \texttt{beta} be latent variables in the model,
-where we observe the random variable \texttt{x} with
-data \texttt{x_train}.
-Let \texttt{qz} and \texttt{qbeta} be random variables defined to
-approximate the posterior.
-We write this problem as follows:
-
-\begin{lstlisting}[language=Python]
-inference = ed.Inference({z: qz, beta: qbeta}, {x: x_train})
-\end{lstlisting}
-
-\texttt{Inference} is an abstract class which takes two inputs. The
-first is a collection of latent random variables \texttt{beta} and
-\texttt{z}, along with ``posterior variables'' \texttt{qbeta} and
-\texttt{qz}, which are associated to their respective latent
-variables. The second is a collection of observed random variables
-\texttt{x}, which is associated to the data \texttt{x_train}.
-
-Inference adjusts parameters of the distribution of \texttt{qbeta}
-and \texttt{qz} to be close to the
-posterior $p(\mathbf{z}, \beta\,|\,\mathbf{x}_{\text{train}})$.
-
-Running inference is as simple as running one method.
-
-\begin{lstlisting}[language=Python]
-inference = ed.Inference({z: qz, beta: qbeta}, {x: x_train})
-inference.run()
-\end{lstlisting}
-
-Inference also supports fine control of the training procedure.
-
-\begin{lstlisting}[language=Python]
-inference = ed.Inference({z: qz, beta: qbeta}, {x: x_train})
-inference.initialize()
-
-tf.global_variables_initializer().run()
-
-for _ in range(inference.n_iter):
- info_dict = inference.update()
- inference.print_progress(info_dict)
-
-inference.finalize()
-\end{lstlisting}
-
-\texttt{initialize()} builds the algorithm's update rules
-(computational graph) for $\lambda$;
-\texttt{tf.global_variables_initializer().run()} initializes $\lambda$
-(TensorFlow variables in the graph);
-\texttt{update()} runs the graph once to update
-$\lambda$, which is called in a loop until convergence;
-\texttt{finalize()} runs any computation as the algorithm
-terminates.
-
-The \texttt{run()} method is a simple wrapper for this procedure.
-
-\subsubsection{Other Settings}
-
-We highlight other settings during inference.
-
-\textbf{Model parameters}.
-Model parameters are parameters in a model that we will always compute
-point estimates for and not be uncertain about.
-They are defined with \texttt{tf.Variable}s, where the inference
-problem is
-\begin{equation*}
-\hat{\theta} \leftarrow^{\text{optimize}}
-p(\mathbf{x}_{\text{train}}; \theta)
-\end{equation*}
-
-\begin{lstlisting}[language=Python]
-from edward.models import Normal
-
-theta = tf.Variable(0.0)
-x = Normal(loc=tf.ones(10) * theta, scale=1.0)
-
-inference = ed.Inference({}, {x: x_train})
-\end{lstlisting}
-
-Only a subset of inference algorithms support estimation of model
-parameters.
-(Note also that this inference example does not have any latent
-variables. It is only about estimating \texttt{theta} given that we
-observe $\mathbf{x} = \mathbf{x}_{\text{train}}$. We can add them so
-that inference is both posterior inference and parameter estimation.)
-
-For example, model parameters are useful when applying neural networks
-from high-level libraries such as Keras and TensorFlow Slim. See
-the \href{/api/model-compositionality}{model compositionality} page
-for more details.
-
-\textbf{Conditional inference}.
-In conditional inference, only a subset of the posterior is inferred
-while the rest are fixed using other inferences. The inference
-problem is
-\begin{equation*}
-q(\mathbf{z}\mid\beta)q(\beta)\approx
-p(\mathbf{z}, \beta\mid\mathbf{x}_{\text{train}})
-\end{equation*}
-where parameters in $q(\mathbf{z}\mid\beta)$ are estimated and
-$q(\beta)$ is fixed.
-%
-In Edward, we enable conditioning by binding random variables to other
-random variables in \texttt{data}.
-\begin{lstlisting}[language=Python]
-inference = ed.Inference({z: qz}, {x: x_train, beta: qbeta})
-\end{lstlisting}
-
-In the \href{/api/inference-compositionality}{compositionality page},
-we describe how to construct inference by composing
-many conditional inference algorithms.
-
-\textbf{Implicit prior samples}.
-Latent variables can be defined in the model without any posterior
-inference over them. They are implicitly marginalized out with a
-single sample. The inference problem is
-\begin{equation*}
-q(\beta)\approx
-p(\beta\mid\mathbf{x}_{\text{train}}, \mathbf{z}^*)
-\end{equation*}
-where $\mathbf{z}^*\sim p(\mathbf{z}\mid\beta)$ is a prior sample.
-
-\begin{lstlisting}[language=Python]
-inference = ed.Inference({beta: qbeta}, {x: x_train})
-\end{lstlisting}
-
-For example, implicit prior samples are useful for generative adversarial
-networks. Their inference problem does not require any inference over
-the latent variables; it uses samples from the prior.
-
-\begin{center}\rule{3in}{0.4pt}\end{center}
-
-\begin{itemize}
- \item @{ed.inferences.Inference}
- \item @{ed.inferences.VariationalInference}
- \begin{itemize}
- \item @{ed.inferences.KLqp}
- \begin{itemize}
- \item @{ed.inferences.ReparameterizationKLqp}
- \item @{ed.inferences.ReparameterizationKLKLqp}
- \item @{ed.inferences.ReparameterizationEntropyKLqp}
- \item @{ed.inferences.ScoreKLqp}
- \item @{ed.inferences.ScoreKLKLqp}
- \item @{ed.inferences.ScoreEntropyKLqp}
- \end{itemize}
- \item @{ed.inferences.KLpq}
- \item @{ed.inferences.GANInference}
- \begin{itemize}
- \item @{ed.inferences.BiGANInference}
- \item @{ed.inferences.ImplicitKLqp}
- \item @{ed.inferences.WGANInference}
- \end{itemize}
- \item @{ed.inferences.MAP}
- \begin{itemize}
- \item @{ed.inferences.Laplace}
- \end{itemize}
- \end{itemize}
- \item @{ed.inferences.MonteCarlo}
- \begin{itemize}
- \item @{ed.inferences.Gibbs}
- \item @{ed.inferences.MetropolisHastings}
- \item @{ed.inferences.HMC}
- \item @{ed.inferences.SGLD}
- \item @{ed.inferences.SGHMC}
- \end{itemize}
- \item @{ed.inferences.complete_conditional}
-\end{itemize}
diff --git a/docs/tex/api/model.tex b/docs/tex/api/model.tex
deleted file mode 100644
index 9c66d5d6e..000000000
--- a/docs/tex/api/model.tex
+++ /dev/null
@@ -1,78 +0,0 @@
-\title{Model}
-
-{{navbar}}
-
-\subsubsection{Model}
-
-A probabilistic model is a joint distribution $p(\mathbf{x},
-\mathbf{z})$ of data $\mathbf{x}$ and latent variables $\mathbf{z}$.
-For background, see the \href{/tutorials/model}{Probabilistic Models tutorial}.
-
-In Edward, we specify models using a simple language of random variables.
-A random variable $\mathbf{x}$ is an object parameterized by
-tensors $\theta^*$, where
-the number of random variables in one object is determined by
-the dimensions of its parameters.
-
-\begin{lstlisting}[language=Python]
-from edward.models import Normal, Exponential
-
-# univariate normal
-Normal(loc=tf.constant(0.0), scale=tf.constant(1.0))
-# vector of 5 univariate normals
-Normal(loc=tf.zeros(5), scale=tf.ones(5))
-# 2 x 3 matrix of Exponentials
-Exponential(rate=tf.ones([2, 3]))
-\end{lstlisting}
-
-For multivariate distributions, the multivariate dimension is the
-innermost (right-most) dimension of the parameters.
-
-\begin{lstlisting}[language=Python]
-from edward.models import Dirichlet, MultivariateNormalTriL
-
-# K-dimensional Dirichlet
-Dirichlet(concentration=tf.constant([0.1] * K))
-# vector of 5 K-dimensional multivariate normals with lower triangular cov
-MultivariateNormalTriL(loc=tf.zeros([5, K]), scale_tril=tf.ones([5, K, K]))
-# 2 x 5 matrix of K-dimensional multivariate normals
-MultivariateNormalTriL(loc=tf.zeros([2, 5, K]), scale_tril=tf.ones([2, 5, K, K]))
-\end{lstlisting}
-
-Random variables are equipped with methods such as
-\texttt{log_prob()}, $\log p(\mathbf{x}\mid\theta^*)$,
-\texttt{mean()}, $\mathbb{E}_{p(\mathbf{x}\mid\theta^*)}[\mathbf{x}]$,
-and \texttt{sample()}, $\mathbf{x}^*\sim p(\mathbf{x}\mid\theta^*)$.
-Further, each random variable is associated to a tensor $\mathbf{x}^*$ in the
-computational graph, which represents a single sample $\mathbf{x}^*\sim
-p(\mathbf{x}\mid\theta^*)$.
-
-This makes it easy to parameterize random variables with complex
-deterministic structure, such as with deep neural networks, a diverse
-set of math operations, and compatibility with third party libraries
-which also build on TensorFlow.
-The design also enables compositions of random variables
-to capture complex stochastic structure.
-They operate on $\mathbf{x}^*$.
-
-\includegraphics[width=375px]{/images/random_variable_ops.png}
-
-\begin{lstlisting}[language=Python]
-from edward.models import Normal
-
-x = Normal(loc=tf.zeros(10), scale=tf.ones(10))
-y = tf.constant(5.0)
-x + y, x - y, x * y, x / y
-tf.tanh(x * y)
-x[2] # 3rd normal rv in the vector
-\end{lstlisting}
-
-In the \href{/api/model-compositionality}{compositionality page}, we
-describe how to build models by composing random variables.
-
-\begin{center}\rule{3in}{0.4pt}\end{center}
-
-\begin{itemize}
- \item @{ed.models.RandomVariable}
- \item {{models}}
-\end{itemize}
diff --git a/docs/tex/api/reference.tex b/docs/tex/api/reference.tex
deleted file mode 100644
index 18db85f23..000000000
--- a/docs/tex/api/reference.tex
+++ /dev/null
@@ -1,84 +0,0 @@
-\title{Reference}
-
-{{navbar}}
-
-There are four modules in Edward:
-\texttt{ed.criticisms},
-\texttt{ed.inferences},
-\texttt{ed.models},
-and
-\texttt{ed.util}.
-
-\subsubsection{Criticism}
-
-\texttt{ed.criticisms} is comprised of functions. They operate on
-random variables in a model or they operate on NumPy arrays
-representing values drawn from the random variables.
-
-\begin{itemize}
- \item {{criticisms}}
-\end{itemize}
-
-\subsubsection{Inference}
-
-\texttt{ed.inferences} is mostly comprised of classes. They are
-organized in a class hierarchy, where methods are shared via parent
-classes and \texttt{Inference} is the top-most base class.
-
-\begin{itemize}
- \item @{ed.inferences.Inference}
- \item @{ed.inferences.VariationalInference}
- \begin{itemize}
- \item @{ed.inferences.KLqp}
- \begin{itemize}
- \item @{ed.inferences.ReparameterizationKLqp}
- \item @{ed.inferences.ReparameterizationKLKLqp}
- \item @{ed.inferences.ReparameterizationEntropyKLqp}
- \item @{ed.inferences.ScoreKLqp}
- \item @{ed.inferences.ScoreKLKLqp}
- \item @{ed.inferences.ScoreEntropyKLqp}
- \end{itemize}
- \item @{ed.inferences.KLpq}
- \item @{ed.inferences.GANInference}
- \begin{itemize}
- \item @{ed.inferences.BiGANInference}
- \item @{ed.inferences.ImplicitKLqp}
- \item @{ed.inferences.WGANInference}
- \end{itemize}
- \item @{ed.inferences.MAP}
- \begin{itemize}
- \item @{ed.inferences.Laplace}
- \end{itemize}
- \end{itemize}
- \item @{ed.inferences.MonteCarlo}
- \begin{itemize}
- \item @{ed.inferences.Gibbs}
- \item @{ed.inferences.MetropolisHastings}
- \item @{ed.inferences.HMC}
- \item @{ed.inferences.SGLD}
- \item @{ed.inferences.SGHMC}
- \end{itemize}
- \item @{ed.inferences.complete_conditional}
-\end{itemize}
-
-\subsubsection{Models}
-
-\texttt{ed.models} is comprised of random variables.
-The list of available random variables depends on the TensorFlow
-version installed. For TensorFlow {{tensorflow_version}}, the
-following are available:
-
-\begin{itemize}
- \item @{ed.models.RandomVariable}
- \item {{models}}
-\end{itemize}
-
-\subsubsection{Utilities}
-
-\texttt{ed.util} is comprised of functions for miscellaneous usage.
-
-\begin{itemize}
- \item {{util}}
- \item @{ed.VERSION}
- \item @{ed.__version__}
-\end{itemize}
diff --git a/docs/tex/iclr2017.tex b/docs/tex/iclr2017.tex
index 9483bb356..d04e6a68e 100644
--- a/docs/tex/iclr2017.tex
+++ b/docs/tex/iclr2017.tex
@@ -198,7 +198,7 @@ \subsubsection{Section 4. Compositional Representations for Inference}
inference_m.update()
\end{lstlisting}
For more details, see the
-\href{/api/inference-compositionality}{inference compositionality} webpage.
+\href{/tutorials/inference-compositionality}{inference compositionality} webpage.
See
\href{https://github.com/blei-lab/edward/blob/master/examples/factor_analysis.py}{\texttt{examples/factor_analysis.py}} for
a version performing Monte Carlo EM for logistic factor analysis
@@ -234,7 +234,7 @@ \subsubsection{Section 4. Compositional Representations for Inference}
inference.initialize(scale={x: float(N) / M, z: float(N) / M})
\end{lstlisting}
For more details, see the
-\href{/api/inference-data-subsampling}{data subsampling} webpage.
+\href{/tutorials/data-subsampling}{data subsampling} webpage.
\subsubsection{Section 5. Experiments}
@@ -346,7 +346,7 @@ \subsubsection{Appendix B. Inference Examples}
\textbf{Figure *}. Stochastic variational inference \citep{hoffman2013stochastic}.
For more details, see the
-\href{/api/inference-data-subsampling}{data subsampling} webpage.
+\href{/tutorials/data-subsampling}{data subsampling} webpage.
\subsubsection{Appendix C. Complete Examples}
diff --git a/docs/tex/template-api.pandoc b/docs/tex/template-api.pandoc
index ce9c6d584..d3b25be5c 100644
--- a/docs/tex/template-api.pandoc
+++ b/docs/tex/template-api.pandoc
@@ -114,7 +114,7 @@ $endfor$
API
- Reference
+ Overview
{{toc}}
+
+```python
+from edward.models import Normal
+
+x = Normal(loc=tf.zeros(10), scale=tf.ones(10))
+y = tf.constant(5.0)
+x + y, x - y, x * y, x / y
+tf.tanh(x * y)
+x[2] # 3rd normal rv in the vector
+```
+
+In the [compositionality tutorial](/tutorials/model-compositionality), we
+describe how to build models by composing random variables.
"""
from __future__ import absolute_import
from __future__ import division
diff --git a/edward/util/__init__.py b/edward/util/__init__.py
index dce454aed..e1e322a9a 100644
--- a/edward/util/__init__.py
+++ b/edward/util/__init__.py
@@ -1,4 +1,4 @@
-"""
+"""Miscellaneous utility functions.
"""
from __future__ import absolute_import
from __future__ import division
diff --git a/notebooks/batch_training.ipynb b/notebooks/batch_training.ipynb
index 8de81c27c..6915dfa96 100644
--- a/notebooks/batch_training.ipynb
+++ b/notebooks/batch_training.ipynb
@@ -380,7 +380,7 @@
"illustrated batch training for models with only global latent variables,\n",
"which are variables are shared across all data points.\n",
"For more complex strategies, see the\n",
- "[inference data subsampling API](http://edwardlib.org/api/inference-data-subsampling)."
+ "[inference data subsampling API](http://edwardlib.org/tutorials/data-subsampling)."
]
}
],
diff --git a/notebooks/iclr2017.ipynb b/notebooks/iclr2017.ipynb
index a93cb8320..0f3958f09 100644
--- a/notebooks/iclr2017.ipynb
+++ b/notebooks/iclr2017.ipynb
@@ -298,7 +298,7 @@
"metadata": {},
"source": [
"For more details, see the\n",
- "[inference compositionality](http://edwardlib.org/api/inference-compositionality) webpage.\n",
+ "[inference compositionality](http://edwardlib.org/tutorials/inference-compositionality) webpage.\n",
"See\n",
"[`examples/factor_analysis.py`](https://github.com/blei-lab/edward/blob/master/examples/factor_analysis.py) for\n",
"a version performing Monte Carlo EM for logistic factor analysis\n",
@@ -346,7 +346,7 @@
"metadata": {},
"source": [
"For more details, see the\n",
- "[data subsampling](http://edwardlib.org/api/inference-data-subsampling) webpage.\n",
+ "[data subsampling](http://edwardlib.org/tutorials/data-subsampling) webpage.\n",
"\n",
"## Section 5. Experiments\n",
"\n",
@@ -520,7 +520,7 @@
"\n",
"__Figure *__. Stochastic variational inference (M. D. Hoffman, Blei, Wang, & Paisley, 2013). \n",
"For more details, see the\n",
- "[data subsampling](http://edwardlib.org/api/inference-data-subsampling) webpage.\n",
+ "[data subsampling](http://edwardlib.org/tutorials/data-subsampling) webpage.\n",
"\n",
"## Appendix C. Complete Examples\n",
"\n",