Files

145 lines
5.5 KiB
Python

__doc__ = "Simulates a lattice scalar field using the Metropolis-Hastings algorithm."
import numpy as np
import argparse
import time
import h5py
rng = np.random.default_rng()
def potential_v(x,lamb):
'''Compute the potential function V(x).'''
return lamb*(x*x-1)*(x*x-1)+x*x
def neighbor_sum(phi,s):
'''Compute the sum of the state phi on all 8 neighbors of the site s.'''
w = len(phi)
return (phi[(s[0]+1)%w,s[1],s[2],s[3]] + phi[(s[0]-1)%w,s[1],s[2],s[3]] +
phi[s[0],(s[1]+1)%w,s[2],s[3]] + phi[s[0],(s[1]-1)%w,s[2],s[3]] +
phi[s[0],s[1],(s[2]+1)%w,s[3]] + phi[s[0],s[1],(s[2]-1)%w,s[3]] +
phi[s[0],s[1],s[2],(s[3]+1)%w] + phi[s[0],s[1],s[2],(s[3]-1)%w] )
def scalar_action_diff(phi,site,newphi,lamb,kappa):
'''Compute the change in the action when phi[site] is changed to newphi.'''
return (2 * kappa * neighbor_sum(phi,site) * (phi[site] - newphi) +
potential_v(newphi,lamb) - potential_v(phi[site],lamb) )
def scalar_MH_step(phi,lamb,kappa,delta):
'''Perform Metropolis-Hastings update on state phi with range delta.'''
site = tuple(rng.integers(0,len(phi),4))
newphi = phi[site] + rng.uniform(-delta,delta)
deltaS = scalar_action_diff(phi,site,newphi,lamb,kappa)
if deltaS < 0 or rng.uniform() < np.exp(-deltaS):
phi[site] = newphi
return True
return False
def run_scalar_MH(phi,lamb,kappa,delta,n):
'''Perform n Metropolis-Hastings updates on state phi and return number of accepted transtions.'''
total_accept = 0
for _ in range(n):
total_accept += scalar_MH_step(phi,lamb,kappa,delta)
return total_accept
def batch_estimate(data,observable,k):
'''Devide data into k batches and apply the function observable to each.
Returns the mean and standard error.'''
batches = np.reshape(data,(k,-1))
values = np.apply_along_axis(observable, 1, batches)
return np.mean(values), np.std(values)/np.sqrt(k-1)
def main():
# use the argparse package to parse command line arguments
parser = argparse.ArgumentParser(description=__doc__)
# TODO: Describe what lambda, delta, and kappa really mean.
parser.add_argument('-l', type=float, default=1.5, help='lambda')
parser.add_argument('-k', type=float, default=0.08, help='kappa')
parser.add_argument('-w', type=int, default=3, help='Width w of the square lattice.')
# delta = 1.5 chosen to have ~ 50% acceptance
parser.add_argument('-d', type=int, default=1.5, help='delta')
parser.add_argument('-n', type=int, help='Number N of measurements (indefinite by default)')
parser.add_argument('-e', type=int, default=100, help='Number E of equilibration sweeps')
parser.add_argument('-m', type=int, default=2, help='Number M of sweeps per measurement')
parser.add_argument('-o', type=int, default=30, help='Time in seconds between file outputs')
parser.add_argument('-f', help='Output filename')
parser.add_argument('-p', type=str, default='data_', help='Data output filename prefix')
args = parser.parse_args()
# perform sanity checks on the arguments
if args.w is None or args.w < 1:
parser.error("Please specify a positive lattice size!")
if args.k is None or args.k <= 0.0:
parser.error("Please specify a positive kappa!")
if args.e < 10:
parser.error("Need at least 10 equilibration sweeps to determine the average cluster size")
if args.d < 1:
parser.error("Delta should be >= 1.")
# fix parameters
lamb = args.l
kappa = args.k
width = args.w
num_sites = width**4
delta = args.d
equil_sweeps = args.e
measure_sweeps = args.m
measurements = args.n or 0
if args.f is None:
# construct a filename from the parameters plus a timestamp (to avoid overwriting)
output_filename = "{}l{}_k{}_w{}_d{}_{}.hdf5".format(args.p,lamb,kappa,width,delta,time.strftime("%Y%m%d%H%M%S"))
else:
output_filename = args.f
# Equilibration
phi_state = np.zeros((width,width,width,width))
run_scalar_MH(phi_state,lamb,kappa,delta,equil_sweeps * num_sites)
# Last things before we go-go
measurements_done = 0
starttime = time.time()
last_output_time = time.time()
# Prepare file
with h5py.File(output_filename,'a') as f:
if not "magnetizations" in f:
dataset = f.create_dataset("magnetizations", (0,), maxshape=(None,), dtype='f', chunks=True, compression="gzip")
# store some information as metadata for the data set
dataset.attrs["lamb"] = lamb
dataset.attrs["kappa"] = kappa
dataset.attrs["width"] = width
dataset.attrs["num_sites"] = num_sites
dataset.attrs["delta"] = delta
dataset.attrs["equil_sweeps"] = equil_sweeps
dataset.attrs["measure_sweeps"] = measure_sweeps
dataset.attrs["measurements"] = measurements
dataset.attrs["start time"] = starttime
else:
# continue taking measurements (e.g. when previous run was interrupted)
measurements_done = len(f["magnetizations"])
# Measure
# TODO: Does mean_magn need to be a list?
magnetizations = []
while True:
run_scalar_MH(phi_state,lamb,kappa,delta,measure_sweeps * num_sites)
magnetizations.append(np.mean(phi_state))
measurements_done += 1
if measurements_done == measurements or time.time() - last_output_time > args.o:
# time to output data again
with h5py.File(output_filename,'a') as f:
# enlarge the data set
dataset = f["magnetizations"]
dataset.resize(len(dataset) + len(magnetizations), axis=0)
# copy the data to the new space at the end of the dataset
dataset[-len(magnetizations):] = magnetizations
dataset.attrs["current_time"] = time.time()
magnetizations.clear()
if measurements_done == measurements:
break
else:
last_output_time = time.time()
# TODO: Save if n is unset, m is set.
if __name__ == "__main__":
main()