Skip to content

Commit 43a9fe4

Browse files
authored
Merge pull request #37 from numericalEFT/pchou
Add effective mass example
2 parents cc34581 + ef95625 commit 43a9fe4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+21352
-148131
lines changed

MCintegration/maps.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -323,6 +323,7 @@ def adapt(self, alpha=0.5):
323323
self.max_ninc, dtype=self.dtype, device=self.device)
324324

325325
# avg_f = torch.ones(self.inc.shape[1], dtype=self.dtype, device=self.device)
326+
# print(self.ninc.shape, self.dim)
326327
for d in range(self.dim):
327328
ninc = self.ninc[d].item()
328329

@@ -378,6 +379,8 @@ def adapt(self, alpha=0.5):
378379
)
379380
- 1
380381
) # Find the intervals in the original grid where the target weights fall
382+
interval_indices.clamp_(0, ninc - 1)
383+
381384
# Extract the necessary values using the interval indices
382385
grid_left = self.grid[d, interval_indices]
383386
inc_relevant = self.inc[d, interval_indices]

examples/example_1.py

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,16 @@
2020
import sys
2121
import traceback
2222
from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas
23+
2324
os.environ["NCCL_DEBUG"] = "OFF"
2425
os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
2526
os.environ["GLOG_minloglevel"] = "2"
2627
os.environ["MASTER_ADDR"] = os.getenv("MASTER_ADDR", "localhost")
2728
os.environ["MASTER_PORT"] = os.getenv("MASTER_PORT", "12355")
2829

2930
backend = "nccl"
31+
# backend = "gloo"
32+
3033

3134
def init_process(rank, world_size, fn, backend=backend):
3235
try:
@@ -38,6 +41,7 @@ def init_process(rank, world_size, fn, backend=backend):
3841
dist.destroy_process_group()
3942
raise e
4043

44+
4145
def run_mcmc(rank, world_size):
4246
try:
4347
if rank != 0:
@@ -49,7 +53,6 @@ def unit_circle_integrand(x, f):
4953
f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double()
5054
return f[:, 0]
5155

52-
5356
def half_sphere_integrand(x, f):
5457
f[:, 0] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2
5558
return f[:, 0]
@@ -58,39 +61,51 @@ def half_sphere_integrand(x, f):
5861
bounds = [(-1, 1), (-1, 1)]
5962
n_eval = 6400000
6063
batch_size = 40000
64+
alpha = 2.0
65+
ninc = 1000
6166
n_therm = 20
6267

63-
device = torch.device(f"cuda:{rank}")
68+
if backend == "gloo":
69+
device = torch.device("cpu")
70+
elif backend == "nccl":
71+
device = torch.device(f"cuda:{rank}")
72+
else:
73+
raise ValueError(f"Invalid backend: {backend}")
6474

65-
vegas_map = Vegas(dim, device=device, ninc=10)
75+
vegas_map = Vegas(dim, device=device, ninc=ninc)
6676

6777
# Monte Carlo and MCMC for Unit Circle
6878
mc_integrator = MonteCarlo(
69-
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size,
70-
device=device
79+
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size, device=device
7180
)
7281
mcmc_integrator = MarkovChainMonteCarlo(
73-
f=unit_circle_integrand, bounds=bounds, batch_size=batch_size, nburnin=n_therm,
74-
device=device
82+
f=unit_circle_integrand,
83+
bounds=bounds,
84+
batch_size=batch_size,
85+
nburnin=n_therm,
86+
device=device,
7587
)
7688

7789
print("Unit Circle Integration Results:")
7890
print("Plain MC:", mc_integrator(n_eval))
7991
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
8092

8193
# Train VEGAS map for Unit Circle
82-
vegas_map.adaptive_training(batch_size, unit_circle_integrand, alpha=0.5)
94+
vegas_map.adaptive_training(batch_size, unit_circle_integrand, alpha=alpha)
8395
vegas_integrator = MonteCarlo(
84-
bounds, f=unit_circle_integrand, maps=vegas_map, batch_size=batch_size,
85-
device=device
96+
bounds,
97+
f=unit_circle_integrand,
98+
maps=vegas_map,
99+
batch_size=batch_size,
100+
device=device,
86101
)
87102
vegasmcmc_integrator = MarkovChainMonteCarlo(
88103
bounds,
89104
f=unit_circle_integrand,
90105
maps=vegas_map,
91106
batch_size=batch_size,
92107
nburnin=n_therm,
93-
device=device
108+
device=device,
94109
)
95110

96111
print("VEGAS:", vegas_integrator(n_eval))
@@ -105,14 +120,15 @@ def half_sphere_integrand(x, f):
105120
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
106121

107122
vegas_map.make_uniform()
108-
vegas_map.adaptive_training(batch_size, half_sphere_integrand, epoch=10, alpha=0.5)
123+
vegas_map.adaptive_training(
124+
batch_size, half_sphere_integrand, epoch=10, alpha=alpha
125+
)
109126
vegas_integrator.f = half_sphere_integrand
110127
vegasmcmc_integrator.f = half_sphere_integrand
111128

112129
print("VEGAS:", vegas_integrator(n_eval))
113130
print("VEGAS-MCMC:", vegasmcmc_integrator(n_eval, mix_rate=0.5))
114131

115-
116132
except Exception as e:
117133
print(f"Error in run_mcmc for rank {rank}: {e}")
118134
traceback.print_exc()
@@ -135,6 +151,7 @@ def test_mcmc(world_size):
135151
except Exception as e:
136152
print(f"Error in test_mcmc: {e}")
137153

154+
138155
if __name__ == "__main__":
139156
mp.set_start_method("spawn", force=True)
140-
test_mcmc(4)
157+
test_mcmc(4)

examples/example_2.py

Lines changed: 36 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,16 @@
2020
import sys
2121
import traceback
2222
from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas
23+
2324
os.environ["NCCL_DEBUG"] = "OFF"
2425
os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
2526
os.environ["GLOG_minloglevel"] = "2"
2627
os.environ["MASTER_ADDR"] = os.getenv("MASTER_ADDR", "localhost")
2728
os.environ["MASTER_PORT"] = os.getenv("MASTER_PORT", "12355")
2829

2930
backend = "nccl"
31+
# backend = "gloo"
32+
3033

3134
def init_process(rank, world_size, fn, backend=backend):
3235
try:
@@ -38,6 +41,7 @@ def init_process(rank, world_size, fn, backend=backend):
3841
dist.destroy_process_group()
3942
raise e
4043

44+
4145
def run_mcmc(rank, world_size):
4246
try:
4347
if rank != 0:
@@ -53,35 +57,57 @@ def sharp_integrands(x, f):
5357
f[:, 2] = f[:, 0] * x[:, 0] ** 2
5458
return f.mean(dim=-1)
5559

56-
5760
dim = 4
5861
bounds = [(0, 1)] * dim
5962
n_eval = 6400000
6063
batch_size = 40000
64+
alpha = 2.0
65+
ninc = 1000
6166
n_therm = 20
6267

63-
device = torch.device(f"cuda:{rank}")
68+
if backend == "gloo":
69+
device = torch.device("cpu")
70+
elif backend == "nccl":
71+
device = torch.device(f"cuda:{rank}")
72+
else:
73+
raise ValueError(f"Invalid backend: {backend}")
6474

6575
print(f"Process {rank} using device: {device}")
6676

67-
vegas_map = Vegas(dim, device=device, ninc=1000)
77+
vegas_map = Vegas(dim, device=device, ninc=ninc)
6878

6979
# Plain MC and MCMC
7080
mc_integrator = MonteCarlo(
71-
f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size,device=device
81+
f=sharp_integrands,
82+
f_dim=3,
83+
bounds=bounds,
84+
batch_size=batch_size,
85+
device=device,
7286
)
7387
mcmc_integrator = MarkovChainMonteCarlo(
74-
f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size, nburnin=n_therm,device=device
88+
f=sharp_integrands,
89+
f_dim=3,
90+
bounds=bounds,
91+
batch_size=batch_size,
92+
nburnin=n_therm,
93+
device=device,
7594
)
7695

7796
print("Sharp Peak Integration Results:")
7897
print("Plain MC:", mc_integrator(n_eval))
7998
print("MCMC:", mcmc_integrator(n_eval, mix_rate=0.5))
8099

81100
# Train VEGAS map
82-
vegas_map.adaptive_training(batch_size, sharp_integrands, f_dim=3, epoch=10, alpha=2.0)
101+
vegas_map.adaptive_training(
102+
batch_size, sharp_integrands, f_dim=3, epoch=10, alpha=alpha
103+
)
83104
vegas_integrator = MonteCarlo(
84-
bounds, f=sharp_integrands, f_dim=3, maps=vegas_map, batch_size=batch_size,device=device
105+
bounds,
106+
f=sharp_integrands,
107+
f_dim=3,
108+
maps=vegas_map,
109+
batch_size=batch_size,
110+
device=device,
85111
)
86112
vegasmcmc_integrator = MarkovChainMonteCarlo(
87113
bounds,
@@ -90,13 +116,12 @@ def sharp_integrands(x, f):
90116
maps=vegas_map,
91117
batch_size=batch_size,
92118
nburnin=n_therm,
93-
device=device
119+
device=device,
94120
)
95121

96122
print("VEGAS:", vegas_integrator(n_eval))
97123
print("VEGAS-MCMC:", vegasmcmc_integrator(n_eval, mix_rate=0.5))
98124

99-
100125
except Exception as e:
101126
print(f"Error in run_mcmc for rank {rank}: {e}")
102127
traceback.print_exc()
@@ -119,6 +144,7 @@ def test_mcmc(world_size):
119144
except Exception as e:
120145
print(f"Error in test_mcmc: {e}")
121146

147+
122148
if __name__ == "__main__":
123149
mp.set_start_method("spawn", force=True)
124-
test_mcmc(4)
150+
test_mcmc(4)

examples/example_3.py

Lines changed: 23 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,20 @@
1-
# Example 3: Integration of log(x)/sqrt(x) using VEGAS
2-
31
import torch
42
import torch.distributed as dist
53
import torch.multiprocessing as mp
64
import os
75
import sys
86
import traceback
97
from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas
8+
109
os.environ["NCCL_DEBUG"] = "OFF"
1110
os.environ["TORCH_DISTRIBUTED_DEBUG"] = "OFF"
1211
os.environ["GLOG_minloglevel"] = "2"
1312
os.environ["MASTER_ADDR"] = os.getenv("MASTER_ADDR", "localhost")
1413
os.environ["MASTER_PORT"] = os.getenv("MASTER_PORT", "12355")
1514

1615
backend = "nccl"
16+
# backend = "gloo"
17+
1718

1819
def init_process(rank, world_size, fn, backend=backend):
1920
try:
@@ -25,6 +26,7 @@ def init_process(rank, world_size, fn, backend=backend):
2526
dist.destroy_process_group()
2627
raise e
2728

29+
2830
def run_mcmc(rank, world_size):
2931
try:
3032
if rank != 0:
@@ -44,7 +46,12 @@ def func(x, f):
4446
ninc = 1000
4547
n_therm = 20
4648

47-
device = torch.device(f"cuda:{rank}")
49+
if backend == "gloo":
50+
device = torch.device("cpu")
51+
elif backend == "nccl":
52+
device = torch.device(f"cuda:{rank}")
53+
else:
54+
raise ValueError(f"Invalid backend: {backend}")
4855

4956
print(f"Process {rank} using device: {device}")
5057

@@ -56,26 +63,32 @@ def func(x, f):
5663

5764
print("Integration Results for log(x)/sqrt(x):")
5865

59-
6066
# Plain MC Integration
61-
mc_integrator = MonteCarlo(bounds, func, batch_size=batch_size,device=device)
67+
mc_integrator = MonteCarlo(bounds, func, batch_size=batch_size, device=device)
6268
print("Plain MC Integral Result:", mc_integrator(n_eval))
6369

6470
# MCMC Integration
6571
mcmc_integrator = MarkovChainMonteCarlo(
66-
bounds, func, batch_size=batch_size, nburnin=n_therm,device=device
72+
bounds, func, batch_size=batch_size, nburnin=n_therm, device=device
6773
)
6874
print("MCMC Integral Result:", mcmc_integrator(n_eval, mix_rate=0.5))
6975

7076
# Perform VEGAS integration
71-
vegas_integrator = MonteCarlo(bounds, func, maps=vegas_map, batch_size=batch_size,device=device)
77+
vegas_integrator = MonteCarlo(
78+
bounds, func, maps=vegas_map, batch_size=batch_size, device=device
79+
)
7280
res = vegas_integrator(n_eval)
7381

7482
print("VEGAS Integral Result:", res)
7583

7684
# VEGAS-MCMC Integration
7785
vegasmcmc_integrator = MarkovChainMonteCarlo(
78-
bounds, func, maps=vegas_map, batch_size=batch_size, nburnin=n_therm,device=device
86+
bounds,
87+
func,
88+
maps=vegas_map,
89+
batch_size=batch_size,
90+
nburnin=n_therm,
91+
device=device,
7992
)
8093
res_vegasmcmc = vegasmcmc_integrator(n_eval, mix_rate=0.5)
8194
print("VEGAS-MCMC Integral Result:", res_vegasmcmc)
@@ -102,6 +115,7 @@ def test_mcmc(world_size):
102115
except Exception as e:
103116
print(f"Error in test_mcmc: {e}")
104117

118+
105119
if __name__ == "__main__":
106120
mp.set_start_method("spawn", force=True)
107-
test_mcmc(4)
121+
test_mcmc(4)

0 commit comments

Comments
 (0)