Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 21 additions & 1 deletion edward/inferences/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from __future__ import division
from __future__ import print_function

import abc
import numpy as np
import six
import tensorflow as tf
Expand All @@ -10,8 +11,21 @@
from edward.util import check_data, check_latent_vars, get_session, Progbar


@six.add_metaclass(abc.ABCMeta)
class Inference(object):
"""Base class for Edward inference methods.
"""Abstract base class for inference. All inference algorithms in
Edward inherit from ``Inference``, sharing common methods and
properties via a class hierarchy.

Specific algorithms typically inherit from other subclasses of
``Inference`` rather than ``Inference`` directly. For example, one
might inherit from the abstract classes ``MonteCarlo`` or
``VariationalInference``.

To build an algorithm inheriting from ``Inference``, one must at the
minimum implement ``initialize`` and ``update``: the former builds
the computational graph for the algorithm; the latter runs the
computational graph for the algorithm.
"""
def __init__(self, latent_vars=None, data=None):
"""Initialization.
Expand Down Expand Up @@ -130,12 +144,15 @@ def run(self, variables=None, use_coordinator=True, *args, **kwargs):
self.coord.request_stop()
self.coord.join(self.threads)

@abc.abstractmethod
def initialize(self, n_iter=1000, n_print=None, scale=None, logdir=None,
debug=False):
"""Initialize inference algorithm. It initializes hyperparameters
and builds ops for the algorithm's computational graph. No ops
should be created outside the call to ``initialize()``.

Any derived class of ``Inference`` **must** implement this method.

Parameters
----------
n_iter : int, optional
Expand Down Expand Up @@ -186,9 +203,12 @@ def initialize(self, n_iter=1000, n_print=None, scale=None, logdir=None,
if self.debug:
self.op_check = tf.add_check_numerics_ops()

@abc.abstractmethod
def update(self, feed_dict=None):
"""Run one iteration of inference.

Any derived class of ``Inference`` **must** implement this method.

Parameters
----------
feed_dict : dict, optional
Expand Down
17 changes: 12 additions & 5 deletions edward/inferences/monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from __future__ import division
from __future__ import print_function

import abc
import numpy as np
import six
import tensorflow as tf
Expand All @@ -11,8 +12,14 @@
from edward.util import get_session


@six.add_metaclass(abc.ABCMeta)
class MonteCarlo(Inference):
"""Base class for Monte Carlo inference methods.
"""Abstract base class for Monte Carlo. Specific Monte Carlo methods
inherit from ``MonteCarlo``, sharing methods in this class.

To build an algorithm inheriting from ``MonteCarlo``, one must at the
minimum implement ``build_update``: it determines how to assign
the samples in the ``Empirical`` approximations.
"""
def __init__(self, latent_vars=None, data=None):
"""Initialization.
Expand Down Expand Up @@ -138,12 +145,12 @@ def print_progress(self, info_dict):
if t == 1 or t % self.n_print == 0:
self.progbar.update(t, {'Acceptance Rate': info_dict['accept_rate']})

@abc.abstractmethod
def build_update(self):
"""Build update, which returns an assign op for parameters in
the Empirical random variables.
"""Build update rules, returning an assign op for parameters in
the ``Empirical`` random variables.

Any derived class of ``MonteCarlo`` **must** implement
this method.
Any derived class of ``MonteCarlo`` **must** implement this method.

Raises
------
Expand Down
15 changes: 13 additions & 2 deletions edward/inferences/variational_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from __future__ import division
from __future__ import print_function

import abc
import numpy as np
import six
import tensorflow as tf
Expand All @@ -16,8 +17,16 @@
pass


@six.add_metaclass(abc.ABCMeta)
class VariationalInference(Inference):
"""Base class for variational inference methods.
"""Abstract base class for variational inference. Specific
variational inference methods inherit from ``VariationalInference``,
sharing methods such as a default optimizer.

To build an algorithm inheriting from ``VariaitonalInference``, one
must at the minimum implement ``build_loss_and_gradients``: it
determines the loss function and gradients to apply for a given
optimizer.
"""
def __init__(self, *args, **kwargs):
super(VariationalInference, self).__init__(*args, **kwargs)
Expand Down Expand Up @@ -150,8 +159,10 @@ def print_progress(self, info_dict):
if t == 1 or t % self.n_print == 0:
self.progbar.update(t, {'Loss': info_dict['loss']})

@abc.abstractmethod
def build_loss_and_gradients(self, var_list):
"""Build loss function.
"""Build loss function and its gradients. They will be leveraged
in an optimizer to update the model and variational parameters.

Any derived class of ``VariationalInference`` **must** implement
this method.
Expand Down
4 changes: 4 additions & 0 deletions edward/models/random_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,15 +291,19 @@ def get_shape(self):
"""Get shape of random variable."""
return self.shape

@staticmethod
def _session_run_conversion_fetch_function(tensor):
return ([tensor.value()], lambda val: val[0])

@staticmethod
def _session_run_conversion_feed_function(feed, feed_val):
return [(feed.value(), feed_val)]

@staticmethod
def _session_run_conversion_feed_function_for_partial_run(feed):
return [feed.value()]

@staticmethod
def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False):
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
Expand Down