🔳 l2hmc-qcd Example: 4D SU(3)

December 6, 2023

Imports

# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
from pathlib import Path
from typing import Optional
import logging

import matplotlib_inline
import matplotlib.pyplot as plt

import torch
import yaml

import ambivalent

import numpy as np

import lovely_tensors as lt
lt.monkey_patch()

matplotlib_inline.backend_inline.set_matplotlib_formats('svg')

os.environ['CUDA_VISIBLE_DEVICES'] = '6'
os.environ['COLORTERM'] = 'truecolor;'
port = np.random.randint(5000, 6000)
os.environ['MASTER_PORT'] = f"{port}"

log_config = get_logging_config()
log = logging.getLogger('name')
log.setLevel("INFO")

from l2hmc.utils.dist import setup_torch
RANK = setup_torch(precision='float64', backend='DDP', seed=4351)

plt.style.use(ambivalent.STYLES["ambivalent"])
plt.rcParams['figure.figsize'] = plt.rcParamsDefault['figure.figsize']

[Sams-MacBook-Pro.local:56401] shmem: mmap: an error occurred while determining whether or not /var/folders/53/5t2nv83136j76rld14vgfh2h0000gq/T//ompi.Sams-MacBook-Pro.503/jf.0/226820096/sm_segment.Sams-MacBook-Pro.503.d850000.0 could be created.

[2024-07-04 11:44:34][INFO][init:136] - Setting logging level to ‘INFO’ on ‘RANK == 0’

[2024-07-04 11:44:34][INFO][init:137] - Setting logging level to ‘CRITICAL’ on ‘RANK != 0’
[2024-07-04 11:44:34][INFO][init:138] - To disable this behavior, and log from ALL ranks (not recommended), set: ‘export LOG_FROM_ALL_RANKS=1’ in your environment, and re-run.
Using device: cpu [2024-07-04 11:44:38,445] [INFO] [real_accelerator.py:203:get_accelerator] Setting ds_accelerator to mps (auto detect) W0704 11:44:39.287000 8461388800 torch/distributed/elastic/multiprocessing/redirects.py:27] NOTE: Redirects are currently not supported in Windows or MacOs. [2024-07-04 11:44:39][WARNING][dist:332] - Setting default dtype: float64

def savefig(fig: plt.Figure, fname: str, outdir: os.PathLike):
    pngfile = Path(outdir).joinpath(f"pngs/{fname}.png")
    svgfile = Path(outdir).joinpath(f"svgs/{fname}.svg")
    pngfile.parent.mkdir(exist_ok=True, parents=True)
    svgfile.parent.mkdir(exist_ok=True, parents=True)
    fig.savefig(svgfile, transparent=True, bbox_inches='tight')
    fig.savefig(pngfile, transparent=True, bbox_inches='tight', dpi=300)
def plot_metrics(metrics: dict, title: Optional[str] = None, **kwargs):
    from l2hmc.utils.rich import is_interactive
    from l2hmc.configs import QUARTO_OUTPUTS_DIR
    outdir = Path(f"{QUARTO_OUTPUTS_DIR}/plots-4dSU3/{title}")
    outdir.mkdir(exist_ok=True, parents=True)
    for key, val in metrics.items():
        fig, ax = plot_metric(val, name=key, **kwargs)
        if title is not None:
            ax.set_title(title)
        log.info(f"Saving {key} to {outdir}")
        savefig(fig, f"{key}", outdir=outdir)
        if not is_interactive():
            plt.show()
def plot_metric(
        metric: torch.Tensor,
        name: Optional[str] = None,
        **kwargs,
):
    assert len(metric) > 0
    if isinstance(metric[0], (int, float, bool, np.floating)):
        y = np.stack(metric)
        return plot_scalar(y, ylabel=name, **kwargs)
    element_shape = metric[0].shape
    if len(element_shape) == 2:
        if isinstance(metric, torch.Tensor):
            y = grab_tensor(torch.stack(metric))
        else:
            y = np.stack(metric)
        return plot_leapfrogs(y, ylabel=name)
    if len(element_shape) == 1:
        if isinstance(metric, torch.Tensor):
            y = grab_tensor(torch.stack(metric))
        else:
            y = np.stack(metric)
        return plot_chains(y, ylabel=name, **kwargs)
    if len(element_shape) == 0:
        if isinstance(metric, torch.Tensor):
            y = grab_tensor(torch.stack(metric))
        else:
            y = np.stack(metric)
        return plot_scalar(y, ylabel=name, **kwargs)
    raise ValueError
def main():
    from l2hmc.experiment.pytorch.experiment import train_step
    from l2hmc.configs import CONF_DIR
    su3conf = Path(CONF_DIR).joinpath('su3-min-cpu.yaml')
    assert su3conf.is_file()
    # su3conf = Path('su3-min-cpu.yaml')
    with su3conf.open('r') as stream:
        conf = dict(yaml.safe_load(stream))

    log.info(conf)
    overrides = dict_to_list_of_overrides(conf)
    ptExpSU3 = get_experiment(overrides=[*overrides], build_networks=True)
    state = ptExpSU3.trainer.dynamics.random_state(6.0)
    assert isinstance(state.x, torch.Tensor)
    assert isinstance(state.beta, torch.Tensor)
    assert isinstance(ptExpSU3, Experiment)
    xhmc, history_hmc = evaluate(
        nsteps=100,
        exp=ptExpSU3,
        beta=state.beta,
        x=state.x,
        eps=0.1,
        nleapfrog=1,
        job_type='hmc',
        nlog=1,
        nprint=2,
        grab=True
    )
    xhmc = ptExpSU3.trainer.dynamics.unflatten(xhmc)
    log.info(f"checkSU(x_hmc): {g.checkSU(xhmc)}")
    plot_metrics(history_hmc.history, title='HMC', marker='.')
    # ptExpSU3.trainer.dynamics.init_weights(
    #     method='uniform',
    #     min=-1e-16,
    #     max=1e-16,
    #     bias=True,
    #     # xeps=0.001,
    #     # veps=0.001,
    # )
    xeval, history_eval = evaluate(
        nsteps=10,
        exp=ptExpSU3,
        beta=6.0,
        x=state.x,
        job_type='eval',
        nlog=1,
        nprint=2,
        grab=True,
    )
    xeval = ptExpSU3.trainer.dynamics.unflatten(xeval)
    log.info(f"checkSU(x_eval): {g.checkSU(xeval)}")
    plot_metrics(history_eval.history, title='Evaluate', marker='.')

    history = {}
    x = state.x
    for step in range(20):
        log.info(f'TRAIN STEP: {step}')
        x, metrics = ptExpSU3.trainer.train_step((x, state.beta))
        if (step > 0 and step % 2 == 0):
            print_dict(metrics, grab=True)
        if (step > 0 and step % 1 == 0):
            for key, val in metrics.items():
                try:
                    history[key].append(val)
                except KeyError:
                    history[key] = [val]

    x = ptExpSU3.trainer.dynamics.unflatten(x)
    log.info(f"checkSU(x_train): {g.checkSU(x)}")
    plot_metrics(history, title='train', marker='.')
    #
    # for step in range(20):
    #     log.info(f"train step: {step}")
    #     x, metrics = ptExpSU3.trainer.train_step((x, state.beta))
    #     if step % 5 == 0:
    #         print_dict(metrics, grab=True)

    return x, history
# main()
from l2hmc.experiment.pytorch.experiment import train_step

from l2hmc.configs import CONF_DIR
su3conf = Path(CONF_DIR).joinpath('su3-min-cpu.yaml')
assert su3conf.is_file()
# su3conf = Path('./conf/su3-min-cpu.yaml')
with su3conf.open('r') as stream:
    conf = dict(yaml.safe_load(stream))

log.info(conf)
overrides = dict_to_list_of_overrides(conf)
ptExpSU3 = get_experiment(overrides=[*overrides], build_networks=True)

[2024-07-04 11:44:39][INFO][1705502108:11] - {‘annealing_schedule’: {‘beta_final’: 6.0, ‘beta_init’: 6.0}, ‘backend’: ‘DDP’, ‘conv’: ‘none’, ‘dynamics’: {‘eps’: 0.1, ‘eps_fixed’: True, ‘group’: ‘SU3’, ‘latvolume’: [1, 1, 1, 1], ‘nchains’: 4, ‘nleapfrog’: 1, ‘use_separate_networks’: False, ‘use_split_xnets’: False, ‘verbose’: True}, ‘framework’: ‘pytorch’, ‘init_aim’: False, ‘init_wandb’: False, ‘learning_rate’: {‘clip_norm’: 1.0, ‘lr_init’: 1e-05}, ‘loss’: {‘charge_weight’: 0.0, ‘plaq_weight’: 0.0, ‘rmse_weight’: 1.0, ‘use_mixed_loss’: False}, ‘net_weights’: {‘v’: {‘q’: 1.0, ‘s’: 1.0, ‘t’: 1.0}, ‘x’: {‘q’: 0.0, ‘s’: 0.0, ‘t’: 0.0}}, ‘network’: {‘activation_fn’: ‘tanh’, ‘dropout_prob’: 0.0, ‘units’: [1], ‘use_batch_norm’: False}, ‘restore’: False, ‘save’: False, ‘steps’: {‘log’: 1, ‘nepoch’: 10, ‘nera’: 1, ‘print’: 1, ‘test’: 50}, ‘use_tb’: False, ‘use_wandb’: False}
[2024-07-04 11:44:39][WARNING][trainer:467] - Using torch.float32 on cpu!
[2024-07-04 11:44:39][WARNING][trainer:467] - Using torch.optim.Adam optimizer
[2024-07-04 11:44:39][WARNING][trainer:271] - logging with freq 1 for wandb.watch

state = ptExpSU3.trainer.dynamics.random_state(6.0)
assert isinstance(state.x, torch.Tensor)
assert isinstance(state.beta, torch.Tensor)
assert isinstance(ptExpSU3, Experiment)
log.info(f"checkSU(x): {g.checkSU(state.x)}")

[2024-07-04 11:44:39][INFO][2027559912:1] - checkSU(x): (tensor[4] f64 x∈[3.013e-08, 2.845e-07] μ=9.867e-08 σ=1.240e-07 [3.862e-08, 4.138e-08, 2.845e-07, 3.013e-08], tensor[4] f64 x∈[5.401e-08, 5.683e-07] μ=1.875e-07 σ=2.539e-07 [6.819e-08, 5.958e-08, 5.683e-07, 5.401e-08])

xhmc, history_hmc = evaluate(
    nsteps=10,
    exp=ptExpSU3,
    beta=state.beta,
    x=state.x,
    eps=0.1,
    nleapfrog=1,
    job_type='hmc',
    nlog=1,
    nprint=2,
    grab=True
)
xhmc = ptExpSU3.trainer.dynamics.unflatten(xhmc)
log.info(f"checkSU(x_hmc): {g.checkSU(xhmc)}")
plot_metrics(history_hmc.history, title='HMC', marker='.')

[2024-07-04 11:44:39][INFO][2419635544:14] - checkSU(x_hmc): (tensor[4] f64 x∈[2.341e-16, 3.604e-16] μ=2.931e-16 σ=5.613e-17 [2.341e-16, 3.604e-16, 2.624e-16, 3.153e-16], tensor[4] f64 x∈[3.575e-16, 5.398e-16] μ=4.280e-16 σ=8.496e-17 [3.575e-16, 4.483e-16, 3.665e-16, 5.398e-16])
[2024-07-04 11:44:39][INFO][1984252221:10] - Saving energy to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:40][INFO][1984252221:10] - Saving logprob to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:40][INFO][1984252221:10] - Saving logdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:40][INFO][1984252221:10] - Saving acc to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:40][INFO][1984252221:10] - Saving sumlogdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:41][INFO][1984252221:10] - Saving acc_mask to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:41][INFO][1984252221:10] - Saving plaqs to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:41][INFO][1984252221:10] - Saving sinQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:41][INFO][1984252221:10] - Saving intQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:42][INFO][1984252221:10] - Saving dQint to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:42][INFO][1984252221:10] - Saving dQsin to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[2024-07-04 11:44:42][INFO][1984252221:10] - Saving loss to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC

# ptExpSU3.trainer.dynamics.init_weights(
#     method='uniform',
#     min=-1e-16,
#     max=1e-16,
#     bias=True,
#     # xeps=0.001,
#     # veps=0.001,
# )
xeval, history_eval = evaluate(
    nsteps=500,
    exp=ptExpSU3,
    beta=6.0,
    x=state.x,
    job_type='eval',
    nlog=2,
    nprint=5,
    grab=True,
)
xeval = ptExpSU3.trainer.dynamics.unflatten(xeval)
log.info(f"checkSU(x_eval): {g.checkSU(xeval)}")
plot_metrics(history_eval.history, title='Evaluate', marker='.')

[2024-07-04 11:44:43][INFO][1629827420:20] - checkSU(x_eval): (tensor[4] f64 x∈[9.602e-14, 0.024] μ=0.009 σ=0.012 [0.024, 2.063e-13, 0.014, 9.602e-14], tensor[4] f64 x∈[1.796e-13, 0.034] μ=0.013 σ=0.016 [0.034, 3.935e-13, 0.016, 1.796e-13])
[2024-07-04 11:44:43][INFO][1984252221:10] - Saving energy to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:43][INFO][1984252221:10] - Saving logprob to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:44][INFO][1984252221:10] - Saving logdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:44][INFO][1984252221:10] - Saving sldf to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:44][INFO][1984252221:10] - Saving sldb to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:44][INFO][1984252221:10] - Saving sld to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:45][INFO][1984252221:10] - Saving xeps to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:45][INFO][1984252221:10] - Saving veps to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:45][INFO][1984252221:10] - Saving acc to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:45][INFO][1984252221:10] - Saving sumlogdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:45][INFO][1984252221:10] - Saving beta to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:46][INFO][1984252221:10] - Saving acc_mask to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:46][INFO][1984252221:10] - Saving plaqs to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:46][INFO][1984252221:10] - Saving sinQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:46][INFO][1984252221:10] - Saving intQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:47][INFO][1984252221:10] - Saving dQint to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:47][INFO][1984252221:10] - Saving dQsin to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[2024-07-04 11:44:47][INFO][1984252221:10] - Saving loss to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate

x = ptExpSU3.trainer.dynamics.unflatten(state.x)
log.info(f"checkSU(x_train): {g.checkSU(x)}")
# plot_metrics(history, title='train', marker='.')

[2024-07-04 11:44:49][INFO][3331171632:2] - checkSU(x_train): (tensor[4] f64 x∈[3.013e-08, 2.845e-07] μ=9.867e-08 σ=1.240e-07 [3.862e-08, 4.138e-08, 2.845e-07, 3.013e-08], tensor[4] f64 x∈[5.401e-08, 5.683e-07] μ=1.875e-07 σ=2.539e-07 [6.819e-08, 5.958e-08, 5.683e-07, 5.401e-08])

No matching items
Back to top

Citation

BibTeX citation:
@online{foreman2023,
  author = {Foreman, Sam},
  title = {Personal {Website}},
  date = {2023-12-06},
  url = {https://samforeman.me},
  langid = {en}
}
For attribution, please cite this work as:
Foreman, Sam. 2023. “Personal Website.” December 6, 2023. https://samforeman.me.