-
-
Notifications
You must be signed in to change notification settings - Fork 60
Implement Laplace (quadratic) approximation #345
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 11 commits
56abf7d
8d3f0a1
50ee3f4
7d63b26
262f86f
924352b
0ba4b55
0d7d4be
45cf590
35c068e
071b04b
5f0cc28
f8fc0e2
9fa3295
ee2f7d5
d96940a
ffd706d
52bc191
ab6ed2b
e0bfcfc
10f7164
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,182 @@ | ||
# Copyright 2024 The PyMC Developers | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from collections.abc import Sequence | ||
from typing import Optional | ||
|
||
import arviz as az | ||
import numpy as np | ||
import pymc as pm | ||
import xarray as xr | ||
from arviz import dict_to_dataset | ||
from pymc.backends.arviz import ( | ||
coords_and_dims_for_inferencedata, | ||
find_constants, | ||
find_observations, | ||
) | ||
from pymc.model.transform.conditioning import remove_value_transforms | ||
from pymc.util import RandomSeed | ||
from pytensor import Variable | ||
|
||
|
||
def laplace( | ||
vars: Sequence[Variable], | ||
draws=1_000, | ||
carsten-j marked this conversation as resolved.
Show resolved
Hide resolved
|
||
model=None, | ||
random_seed: Optional[RandomSeed] = None, | ||
progressbar=True, | ||
): | ||
""" | ||
Create a Laplace (quadratic) approximation for a posterior distribution. | ||
|
||
This function generates a Laplace approximation for a given posterior distribution using a specified | ||
number of draws. This is useful for obtaining a parametric approximation to the posterior distribution | ||
that can be used for further analysis. | ||
|
||
Parameters | ||
---------- | ||
vars : Sequence[Variable] | ||
A sequence of variables for which the Laplace approximation of the posterior distribution | ||
is to be created. | ||
draws : int, optional, default=1_000 | ||
The number of draws to sample from the posterior distribution for creating the approximation. | ||
For draws=0 only the fit of the Laplace approximation is returned | ||
model : object, optional, default=None | ||
The model object that defines the posterior distribution. If None, the default model will be used. | ||
random_seed : Optional[RandomSeed], optional, default=None | ||
An optional random seed to ensure reproducibility of the draws. If None, the draws will be | ||
generated using the current random state. | ||
progressbar: bool, optional defaults to True | ||
Whether to display a progress bar in the command line. | ||
|
||
Returns | ||
------- | ||
arviz.InferenceData | ||
An `InferenceData` object from the `arviz` library containing the Laplace | ||
approximation of the posterior distribution. The inferenceData object also | ||
contains constant and observed data as well as deterministic variables. | ||
InferenceData also contains a group 'fit' with the mean and covariance | ||
for the Laplace approximation. | ||
|
||
Examples | ||
-------- | ||
|
||
>>> import numpy as np | ||
>>> import pymc as pm | ||
>>> import arviz as az | ||
>>> from pymc_experimental.inference.laplace import laplace | ||
>>> y = np.array([2642, 3503, 4358]*10) | ||
>>> with pm.Model() as m: | ||
>>> logsigma = pm.Uniform("logsigma", 1, 100) | ||
>>> mu = pm.Uniform("mu", -10000, 10000) | ||
>>> yobs = pm.Normal("y", mu=mu, sigma=pm.math.exp(logsigma), observed=y) | ||
>>> idata = laplace([mu, logsigma], model=m) | ||
|
||
Notes | ||
----- | ||
This method of approximation may not be suitable for all types of posterior distributions, | ||
especially those with significant skewness or multimodality. | ||
|
||
See Also | ||
-------- | ||
fit : Calling the inference function 'fit' like pmx.fit(method="laplace", vars=[mu, logsigma], model=m) | ||
will forward the call to 'laplace'. | ||
|
||
""" | ||
|
||
rng = np.random.default_rng(seed=random_seed) | ||
|
||
transformed_m = pm.modelcontext(model) | ||
map = pm.find_MAP(vars=vars, progressbar=progressbar, model=transformed_m) | ||
|
||
# See https://www.pymc.io/projects/docs/en/stable/api/model/generated/pymc.model.transform.conditioning.remove_value_transforms.html | ||
untransformed_m = remove_value_transforms(transformed_m) | ||
untransformed_vars = [untransformed_m[v.name] for v in vars] | ||
hessian = pm.find_hessian(point=map, vars=untransformed_vars, model=untransformed_m) | ||
|
||
if np.linalg.det(hessian) == 0: | ||
raise np.linalg.LinAlgError("Hessian is singular.") | ||
|
||
cov = np.linalg.inv(hessian) | ||
mean = np.concatenate([np.atleast_1d(map[v.name]) for v in vars]) | ||
|
||
chains = 1 | ||
|
||
if draws != 0: | ||
samples = rng.multivariate_normal(mean, cov, size=(chains, draws)) | ||
|
||
data_vars = {} | ||
for i, var in enumerate(vars): | ||
data_vars[str(var)] = xr.DataArray(samples[:, :, i], dims=("chain", "draw")) | ||
|
||
coords = {"chain": np.arange(chains), "draw": np.arange(draws)} | ||
ds = xr.Dataset(data_vars, coords=coords) | ||
|
||
idata = az.convert_to_inference_data(ds) | ||
idata = addDataToInferenceData(model, idata, progressbar) | ||
else: | ||
idata = az.InferenceData() | ||
|
||
idata = addFitToInferenceData(vars, idata, mean, cov) | ||
|
||
return idata | ||
|
||
|
||
def addFitToInferenceData(vars, idata, mean, covariance): | ||
coord_names = [v.name for v in vars] | ||
# Convert to xarray DataArray | ||
mean_dataarray = xr.DataArray(mean, dims=["rows"], coords={"rows": coord_names}) | ||
cov_dataarray = xr.DataArray( | ||
covariance, dims=["rows", "columns"], coords={"rows": coord_names, "columns": coord_names} | ||
) | ||
|
||
# Create xarray dataset | ||
dataset = xr.Dataset({"mean_vector": mean_dataarray, "covariance_matrix": cov_dataarray}) | ||
|
||
idata.add_groups(fit=dataset) | ||
|
||
return idata | ||
|
||
|
||
def addDataToInferenceData(model, trace, progressbar): | ||
# Add deterministic variables to inference data | ||
trace.posterior = pm.compute_deterministics( | ||
trace.posterior, model=model, merge_dataset=True, progressbar=progressbar | ||
) | ||
|
||
coords, dims = coords_and_dims_for_inferencedata(model) | ||
|
||
observed_data = dict_to_dataset( | ||
find_observations(model), | ||
library=pm, | ||
coords=coords, | ||
dims=dims, | ||
default_dims=[], | ||
) | ||
|
||
constant_data = dict_to_dataset( | ||
find_constants(model), | ||
library=pm, | ||
coords=coords, | ||
dims=dims, | ||
default_dims=[], | ||
) | ||
|
||
trace.add_groups( | ||
{"observed_data": observed_data, "constant_data": constant_data}, | ||
coords=coords, | ||
dims=dims, | ||
) | ||
|
||
return trace |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
# Copyright 2024 The PyMC Developers | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
|
||
import numpy as np | ||
import pymc as pm | ||
import pytest | ||
|
||
import pymc_experimental as pmx | ||
|
||
|
||
@pytest.mark.filterwarnings( | ||
"ignore:Model.model property is deprecated. Just use Model.:FutureWarning", | ||
"ignore:hessian will stop negating the output in a future version of PyMC.\n" | ||
+ "To suppress this warning set `negate_output=False`:FutureWarning", | ||
) | ||
def test_laplace(): | ||
|
||
# Example originates from Bayesian Data Analyses, 3rd Edition | ||
# By Andrew Gelman, John Carlin, Hal Stern, David Dunson, | ||
# Aki Vehtari, and Donald Rubin. | ||
# See section. 4.1 | ||
|
||
y = np.array([2642, 3503, 4358], dtype=np.float64) | ||
n = y.size | ||
draws = 100000 | ||
|
||
with pm.Model() as m: | ||
logsigma = pm.Uniform("logsigma", 1, 100) | ||
mu = pm.Uniform("mu", -10000, 10000) | ||
yobs = pm.Normal("y", mu=mu, sigma=pm.math.exp(logsigma), observed=y) | ||
vars = [mu, logsigma] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Question: say you only did There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think find_MAP in that case uses the initial_point for the excluded variable(s). I never found that behavior useful tbh Edit: Maybe it's fine. Either way it's documented here: https://github.com/pymc-devs/pymc/blob/05b557f6460a10c29c3db33690ee535f5b1ecde0/pymc/tuning/starting.py#L73-L75 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sounds like we may want to pass a similar There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. worth adding a test on this to confirm the behaviour There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am not sure I fully understand this. I will give it a second go with the documentation for find_MAP. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi Carsten, is there anything we can do to help get this over the line? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hi @theorashid. I am not sure how to handle if only a subset of the variables are passed in, e.g. vars=[mu] and log_sigma is left out. If this should raise a warning I need some way of figuring out the number of model parameters and compare that with the number of parameters in vars. I am not sure how to determine the number of model parameters There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @theorashid and @ricardoV94, I committed an update that will raise a warning in case number of variables in vars does not equal number of model variables. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. LGTM |
||
|
||
idata = pmx.fit( | ||
method="laplace", | ||
vars=vars, | ||
model=m, | ||
draws=draws, | ||
random_seed=173300, | ||
) | ||
|
||
assert idata.posterior["mu"].shape == (1, draws) | ||
assert idata.posterior["logsigma"].shape == (1, draws) | ||
assert idata.observed_data["y"].shape == (n,) | ||
assert idata.fit["mean_vector"].shape == (len(vars),) | ||
assert idata.fit["covariance_matrix"].shape == (len(vars), len(vars)) | ||
|
||
bda_map = [y.mean(), np.log(y.std())] | ||
bda_cov = np.array([[y.var() / n, 0], [0, 1 / (2 * n)]]) | ||
|
||
assert np.allclose(idata.fit["mean_vector"].values, bda_map) | ||
assert np.allclose(idata.fit["covariance_matrix"].values, bda_cov, atol=1e-4) |
Uh oh!
There was an error while loading. Please reload this page.