124 lines
4.7 KiB
Python
124 lines
4.7 KiB
Python
__doc__ = "Simulates a lattice scalar field using the Metropolis-Hastings algorithm."
|
|
|
|
import numpy as np
|
|
import argparse
|
|
import time
|
|
import h5py
|
|
|
|
starttime = time.asctime()
|
|
|
|
rng = np.random.default_rng()
|
|
|
|
def potential_v(x,lamb):
|
|
'''Compute the potential function V(x).'''
|
|
return lamb*(x*x-1)*(x*x-1)+x*x
|
|
|
|
def neighbor_sum(phi,s):
|
|
'''Compute the sum of the state phi on all 8 neighbors of the site s.'''
|
|
w = len(phi)
|
|
return (phi[(s[0]+1)%w,s[1],s[2],s[3]] + phi[(s[0]-1)%w,s[1],s[2],s[3]] +
|
|
phi[s[0],(s[1]+1)%w,s[2],s[3]] + phi[s[0],(s[1]-1)%w,s[2],s[3]] +
|
|
phi[s[0],s[1],(s[2]+1)%w,s[3]] + phi[s[0],s[1],(s[2]-1)%w,s[3]] +
|
|
phi[s[0],s[1],s[2],(s[3]+1)%w] + phi[s[0],s[1],s[2],(s[3]-1)%w] )
|
|
|
|
def scalar_action_diff(phi,site,newphi,lamb,kappa):
|
|
'''Compute the change in the action when phi[site] is changed to newphi.'''
|
|
return (2 * kappa * neighbor_sum(phi,site) * (phi[site] - newphi) +
|
|
potential_v(newphi,lamb) - potential_v(phi[site],lamb) )
|
|
|
|
def scalar_MH_step(phi,lamb,kappa,delta):
|
|
'''Perform Metropolis-Hastings update on state phi with range delta.'''
|
|
site = tuple(rng.integers(0,len(phi),4))
|
|
newphi = phi[site] + rng.uniform(-delta,delta)
|
|
deltaS = scalar_action_diff(phi,site,newphi,lamb,kappa)
|
|
if deltaS < 0 or rng.uniform() < np.exp(-deltaS):
|
|
phi[site] = newphi
|
|
return True
|
|
return False
|
|
|
|
def run_scalar_MH(phi,lamb,kappa,delta,n):
|
|
'''Perform n Metropolis-Hastings updates on state phi and return number of accepted transtions.'''
|
|
total_accept = 0
|
|
for _ in range(n):
|
|
total_accept += scalar_MH_step(phi,lamb,kappa,delta)
|
|
return total_accept
|
|
|
|
def batch_estimate(data,observable,k):
|
|
'''Devide data into k batches and apply the function observable to each.
|
|
Returns the mean and standard error.'''
|
|
batches = np.reshape(data,(k,-1))
|
|
values = np.apply_along_axis(observable, 1, batches)
|
|
return np.mean(values), np.std(values)/np.sqrt(k-1)
|
|
|
|
def main():
|
|
# use the argparse package to parse command line arguments
|
|
parser = argparse.ArgumentParser(description=__doc__)
|
|
# TODO: Describe what lambda, delta, and kappa really mean.
|
|
parser.add_argument('-l', type=float, default=1.5, help='lambda')
|
|
parser.add_argument('-k', type=float, default=0.08, help='kappa')
|
|
parser.add_argument('-w', type=int, default=3, help='Width w of the square lattice.')
|
|
# delta = 1.5 chosen to have ~ 50% acceptance
|
|
parser.add_argument('-d', type=int, default=1.5, help='delta')
|
|
parser.add_argument('-n', type=int, help='Number N of measurements (indefinite by default)')
|
|
parser.add_argument('-e', type=int, default=100, help='Number E of equilibration sweeps')
|
|
parser.add_argument('-m', type=int, default=2, help='Number M of sweeps per measurement')
|
|
parser.add_argument('-o', type=int, default=30, help='Time in seconds between file outputs')
|
|
parser.add_argument('-f', help='Output filename')
|
|
args = parser.parse_args()
|
|
|
|
# perform sanity checks on the arguments
|
|
if args.w is None or args.w < 1:
|
|
parser.error("Please specify a positive lattice size!")
|
|
if args.k is None or args.k <= 0.0:
|
|
parser.error("Please specify a positive kappa!")
|
|
if args.e < 10:
|
|
parser.error("Need at least 10 equilibration sweeps to determine the average cluster size")
|
|
if args.d < 1:
|
|
parser.error("Delta should be >= 1.")
|
|
|
|
# fix parameters
|
|
lamb = args.l
|
|
kappa = args.k
|
|
width = args.w
|
|
num_sites = width**4
|
|
delta = args.d
|
|
equil_sweeps = args.e
|
|
measure_sweeps = args.m
|
|
measurements = args.n
|
|
if args.f is None:
|
|
# construct a filename from the parameters plus a timestamp (to avoid overwriting)
|
|
output_filename = "data_l{}_k{}_w{}_d{}_{}.hdf5".format(lamb,kappa,width,delta,time.strftime("%Y%m%d%H%M%S"))
|
|
else:
|
|
output_filename = args.f
|
|
|
|
last_output_time = time.time()
|
|
with h5py.File(output_filename,'a') as f:
|
|
if not "mean-magn" in f:
|
|
dataset = f.create_dataset("mean-magn", chunks=True, data=mean_magn)
|
|
# store some information as metadata for the data set
|
|
dataset.attrs["lamb"] = lamb
|
|
dataset.attrs["kappa"] = kappa
|
|
dataset.attrs["width"] = width
|
|
dataset.attrs["num_sites"] = num_sites
|
|
dataset.attrs["delta"] = delta
|
|
dataset.attrs["equil_sweeps"] = equil_sweeps
|
|
dataset.attrs["measure_sweeps"] = measure_sweeps
|
|
dataset.attrs["measurements"] = measurements
|
|
dataset.attrs["start time"] = starttime
|
|
dataset.attrs["stop time"] = time.asctime()
|
|
|
|
# Measure
|
|
# TODO: Does mean_magn need to be a list?
|
|
mean_magn = []
|
|
phi_state = np.zeros((width,width,width,width))
|
|
run_scalar_MH(phi_state,lamb,kappa,delta,equil_sweeps * num_sites)
|
|
magnetizations = np.empty(measurements)
|
|
for i in range(measurements):
|
|
run_scalar_MH(phi_state,lamb,kappa,delta,measure_sweeps * num_sites)
|
|
magnetizations[i] = np.mean(phi_state)
|
|
mean, err = batch_estimate(np.abs(magnetizations),lambda x:np.mean(x),10)
|
|
mean_magn.append([mean,err])
|
|
|
|
if __name__ == "__main__":
|
|
main()
|