import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Retina display
%config InlineBackend.figure_format = 'retina'
import warnings
'ignore')
warnings.filterwarnings(import arviz as az
123)
hamiltorch.set_random_seed(= torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device
from tueplots import bundles
plt.rcParams.update(bundles.beamer_moml())
# Also add despine to the bundle using rcParams
'axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
plt.rcParams[
# Increase font size to match Beamer template
'font.size'] = 16
plt.rcParams[# Make background transparent
'figure.facecolor'] = 'none' plt.rcParams[
# mixture of gaussians
= torch.distributions.MixtureSameFamily(
m 0.4, 0.6])),
torch.distributions.Categorical(torch.tensor([-2., 3.]), torch.tensor([0.4, 1.]))) torch.distributions.Normal(torch.tensor([
# plot the distribution
= torch.linspace(-10, 10, 1000)
x plt.plot(x, torch.exp(m.log_prob(x)))
def get_samples(m, x0, seed=42):
torch.manual_seed(seed)= [x0.item()]
samples for i in range(1000):
= torch.distributions.Normal(torch.tensor(samples[-1]), 5).sample()
xi # Check if log-density is higher at new sample
if m.log_prob(xi) > m.log_prob(torch.tensor(samples[-1])):
samples.append(xi.item()) else:
-1])
samples.append(samples[return samples
= torch.tensor([0.1]) # Initial value
x0
= get_samples(m, x0, 42)
samples plt.plot(samples)
= get_samples(m, x0, 10)
samples plt.plot(samples)
def get_samples_jump(m, x0, seed=42):
torch.manual_seed(seed)= [x0.item()]
samples for i in range(10000):
= torch.distributions.Normal(torch.tensor(samples[-1]), 1).sample()
xi # Find acceptance probability
= m.log_prob(xi) - m.log_prob(torch.tensor(samples[-1]))
a = torch.exp(a)
a # Check if log-density is higher at new sample
if a > 1:
samples.append(xi.item())else:
= torch.rand(1)
u if u < a:
samples.append(xi.item())else:
-1])
samples.append(samples[return samples
42)
get_samples_jump(m, x0, plt.plot(samples)
2)
get_samples_jump(m, x0, plt.plot(samples)
10)
get_samples_jump(m, x0, plt.plot(samples)
az.plot_kde(np.array(samples))
<AxesSubplot:>
# use Hamiltorch
import hamiltorch
def log_prob(x):
return m.log_prob(x)
= 10000
N = .3
step_size = 5
L
123)
hamiltorch.set_random_seed(= torch.zeros(1)
params_init = hamiltorch.sample(log_prob_func=log_prob, params_init=params_init, num_samples=N,
params_hmc =step_size, num_steps_per_sample=L) step_size
Sampling (Sampler.HMC; Integrator.IMPLICIT)
Time spent | Time remain.| Progress | Samples | Samples/sec
0d:00:00:19 | 0d:00:00:00 | #################### | 10000/10000 | 501.14
Acceptance Rate 0.98
plt.plot(torch.stack(params_hmc).numpy())
sns.kdeplot(torch.stack(params_hmc).numpy().flatten()) plt.plot(x, torch.exp(m.log_prob(x)))
import torch
import torch.distributions as dist
def random_walk_metropolis_hastings(target_dist, num_samples, initial_sample, proposal_std):
= [initial_sample]
samples = initial_sample
current_sample
for _ in range(num_samples):
# Propose a new sample from a normal distribution (random walk)
= torch.normal(current_sample, proposal_std)
proposal
# Calculate the acceptance ratio
= target_dist.log_prob(proposal) - target_dist.log_prob(current_sample)
log_acceptance_ratio
# Accept or reject the proposal
if torch.log(torch.rand(1)) < log_acceptance_ratio:
= proposal
current_sample
samples.append(current_sample)
return torch.stack(samples[1:])
# Example usage:
# Define your target distribution (e.g., a normal distribution)
#target_distribution = dist.Normal(3.0, 1.0)
= m
target_distribution
# Number of samples to generate
= 1000
num_samples
# Initial sample
= torch.tensor(0.0)
initial_sample
# Standard deviation of the proposal distribution (controls the step size)
= 0.5
proposal_std
# Generate samples using RWMH
= random_walk_metropolis_hastings(target_distribution, num_samples, initial_sample, proposal_std)
samples
# Now 'samples' contains samples from the target distribution
plt.plot(samples)