mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 06:40:24 +01:00
[Fix] Fix further mutable default arguments and smaller issues.
This commit is contained in:
parent
668fa62793
commit
4089238ddd
9 changed files with 44 additions and 28 deletions
|
@ -1,4 +1,4 @@
|
||||||
r'''
|
r"""
|
||||||
# What is pyerrors?
|
# What is pyerrors?
|
||||||
`pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data.
|
`pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data.
|
||||||
It is based on the gamma method [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are:
|
It is based on the gamma method [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are:
|
||||||
|
@ -476,7 +476,7 @@ The array `cdata` contains information about the contribution of auxiliary obser
|
||||||
A JSON schema that may be used to verify the correctness of a file with respect to the format definition is stored in ./examples/json_schema.json. The schema is a self-descriptive format definition and contains an exemplary file.
|
A JSON schema that may be used to verify the correctness of a file with respect to the format definition is stored in ./examples/json_schema.json. The schema is a self-descriptive format definition and contains an exemplary file.
|
||||||
|
|
||||||
Julia I/O routines for the json.gz format, compatible with [ADerrors.jl](https://gitlab.ift.uam-csic.es/alberto/aderrors.jl), can be found [here](https://github.com/fjosw/ADjson.jl).
|
Julia I/O routines for the json.gz format, compatible with [ADerrors.jl](https://gitlab.ift.uam-csic.es/alberto/aderrors.jl), can be found [here](https://github.com/fjosw/ADjson.jl).
|
||||||
'''
|
"""
|
||||||
from .obs import *
|
from .obs import *
|
||||||
from .correlators import *
|
from .correlators import *
|
||||||
from .fits import *
|
from .fits import *
|
||||||
|
|
|
@ -71,7 +71,7 @@ class Fit_result(Sequence):
|
||||||
|
|
||||||
|
|
||||||
def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
r'''Performs a non-linear fit to y = func(x).
|
r"""Performs a non-linear fit to y = func(x).
|
||||||
```
|
```
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
@ -224,7 +224,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
chisquare/d.o.f.: 0.5388013574561786 # random
|
chisquare/d.o.f.: 0.5388013574561786 # random
|
||||||
fit parameters [1.11897846 0.96361162 0.92325319] # random
|
fit parameters [1.11897846 0.96361162 0.92325319] # random
|
||||||
|
|
||||||
'''
|
"""
|
||||||
output = Fit_result()
|
output = Fit_result()
|
||||||
|
|
||||||
if (isinstance(x, dict) and isinstance(y, dict) and isinstance(func, dict)):
|
if (isinstance(x, dict) and isinstance(y, dict) and isinstance(func, dict)):
|
||||||
|
@ -504,7 +504,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
|
|
||||||
|
|
||||||
def total_least_squares(x, y, func, silent=False, **kwargs):
|
def total_least_squares(x, y, func, silent=False, **kwargs):
|
||||||
r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
|
r"""Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
@ -553,7 +553,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
||||||
-------
|
-------
|
||||||
output : Fit_result
|
output : Fit_result
|
||||||
Parameters and information on the fitted result.
|
Parameters and information on the fitted result.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
output = Fit_result()
|
output = Fit_result()
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
r'''
|
r"""
|
||||||
`pyerrors` includes an `input` submodule in which input routines and parsers for the output of various numerical programs are contained.
|
`pyerrors` includes an `input` submodule in which input routines and parsers for the output of various numerical programs are contained.
|
||||||
|
|
||||||
# Jackknife samples
|
# Jackknife samples
|
||||||
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
||||||
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
||||||
'''
|
"""
|
||||||
from . import bdio as bdio
|
from . import bdio as bdio
|
||||||
from . import dobs as dobs
|
from . import dobs as dobs
|
||||||
from . import hadrons as hadrons
|
from . import hadrons as hadrons
|
||||||
|
|
|
@ -85,7 +85,7 @@ def _dict_to_xmlstring_spaces(d, space=' '):
|
||||||
return o
|
return o
|
||||||
|
|
||||||
|
|
||||||
def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
def create_pobs_string(obsl, name, spec='', origin='', symbol=None, enstag=None):
|
||||||
"""Export a list of Obs or structures containing Obs to an xml string
|
"""Export a list of Obs or structures containing Obs to an xml string
|
||||||
according to the Zeuthen pobs format.
|
according to the Zeuthen pobs format.
|
||||||
|
|
||||||
|
@ -113,6 +113,8 @@ def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
||||||
XML formatted string of the input data
|
XML formatted string of the input data
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if symbol is None:
|
||||||
|
symbol = []
|
||||||
od = {}
|
od = {}
|
||||||
ename = obsl[0].e_names[0]
|
ename = obsl[0].e_names[0]
|
||||||
names = list(obsl[0].deltas.keys())
|
names = list(obsl[0].deltas.keys())
|
||||||
|
@ -176,7 +178,7 @@ def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
|
||||||
return rs
|
return rs
|
||||||
|
|
||||||
|
|
||||||
def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz=True):
|
def write_pobs(obsl, fname, name, spec='', origin='', symbol=None, enstag=None, gz=True):
|
||||||
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
||||||
according to the Zeuthen pobs format.
|
according to the Zeuthen pobs format.
|
||||||
|
|
||||||
|
@ -206,6 +208,8 @@ def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz
|
||||||
-------
|
-------
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
if symbol is None:
|
||||||
|
symbol = []
|
||||||
pobsstring = create_pobs_string(obsl, name, spec, origin, symbol, enstag)
|
pobsstring = create_pobs_string(obsl, name, spec, origin, symbol, enstag)
|
||||||
|
|
||||||
if not fname.endswith('.xml') and not fname.endswith('.gz'):
|
if not fname.endswith('.xml') and not fname.endswith('.gz'):
|
||||||
|
@ -309,7 +313,7 @@ def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
|
||||||
full_output : bool
|
full_output : bool
|
||||||
If True, a dict containing auxiliary information and the data is returned.
|
If True, a dict containing auxiliary information and the data is returned.
|
||||||
If False, only the data is returned as list.
|
If False, only the data is returned as list.
|
||||||
separatior_insertion: str or int
|
separator_insertion: str or int
|
||||||
str: replace all occurences of "separator_insertion" within the replica names
|
str: replace all occurences of "separator_insertion" within the replica names
|
||||||
by "|%s" % (separator_insertion) when constructing the names of the replica.
|
by "|%s" % (separator_insertion) when constructing the names of the replica.
|
||||||
int: Insert the separator "|" at the position given by separator_insertion.
|
int: Insert the separator "|" at the position given by separator_insertion.
|
||||||
|
@ -409,7 +413,7 @@ def import_dobs_string(content, full_output=False, separator_insertion=True):
|
||||||
full_output : bool
|
full_output : bool
|
||||||
If True, a dict containing auxiliary information and the data is returned.
|
If True, a dict containing auxiliary information and the data is returned.
|
||||||
If False, only the data is returned as list.
|
If False, only the data is returned as list.
|
||||||
separatior_insertion: str, int or bool
|
separator_insertion: str, int or bool
|
||||||
str: replace all occurences of "separator_insertion" within the replica names
|
str: replace all occurences of "separator_insertion" within the replica names
|
||||||
by "|%s" % (separator_insertion) when constructing the names of the replica.
|
by "|%s" % (separator_insertion) when constructing the names of the replica.
|
||||||
int: Insert the separator "|" at the position given by separator_insertion.
|
int: Insert the separator "|" at the position given by separator_insertion.
|
||||||
|
@ -677,7 +681,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
|
||||||
return o
|
return o
|
||||||
|
|
||||||
|
|
||||||
def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
|
def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=None, who=None, enstags=None):
|
||||||
"""Generate the string for the export of a list of Obs or structures containing Obs
|
"""Generate the string for the export of a list of Obs or structures containing Obs
|
||||||
to a .xml.gz file according to the Zeuthen dobs format.
|
to a .xml.gz file according to the Zeuthen dobs format.
|
||||||
|
|
||||||
|
@ -708,6 +712,8 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
||||||
xml_str : str
|
xml_str : str
|
||||||
XML string generated from the data
|
XML string generated from the data
|
||||||
"""
|
"""
|
||||||
|
if symbol is None:
|
||||||
|
symbol = []
|
||||||
if enstags is None:
|
if enstags is None:
|
||||||
enstags = {}
|
enstags = {}
|
||||||
od = {}
|
od = {}
|
||||||
|
@ -866,7 +872,7 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
||||||
return rs
|
return rs
|
||||||
|
|
||||||
|
|
||||||
def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
|
def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=None, who=None, enstags=None, gz=True):
|
||||||
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
||||||
according to the Zeuthen dobs format.
|
according to the Zeuthen dobs format.
|
||||||
|
|
||||||
|
@ -900,6 +906,8 @@ def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=No
|
||||||
-------
|
-------
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
|
if symbol is None:
|
||||||
|
symbol = []
|
||||||
if enstags is None:
|
if enstags is None:
|
||||||
enstags = {}
|
enstags = {}
|
||||||
|
|
||||||
|
|
|
@ -245,7 +245,7 @@ def extract_t0_hd5(path, filestem, ens_id, obs='Clover energy density', fit_rang
|
||||||
return fit_t0(t2E_dict, fit_range, plot_fit=kwargs.get('plot_fit'))
|
return fit_t0(t2E_dict, fit_range, plot_fit=kwargs.get('plot_fit'))
|
||||||
|
|
||||||
|
|
||||||
def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None):
|
def read_DistillationContraction_hd5(path, ens_id, diagrams=None, idl=None):
|
||||||
"""Read hadrons DistillationContraction hdf5 files in given directory structure
|
"""Read hadrons DistillationContraction hdf5 files in given directory structure
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
@ -265,6 +265,8 @@ def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None
|
||||||
extracted DistillationContration data
|
extracted DistillationContration data
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if diagrams is None:
|
||||||
|
diagrams = ["direct"]
|
||||||
res_dict = {}
|
res_dict = {}
|
||||||
|
|
||||||
directories, idx = _get_files(path, "data", idl)
|
directories, idx = _get_files(path, "data", idl)
|
||||||
|
@ -486,7 +488,7 @@ def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
|
||||||
return result_dict
|
return result_dict
|
||||||
|
|
||||||
|
|
||||||
def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
|
def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=None):
|
||||||
"""Read hadrons FourquarkFullyConnected hdf5 file and output an array of CObs
|
"""Read hadrons FourquarkFullyConnected hdf5 file and output an array of CObs
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
@ -508,6 +510,8 @@ def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
|
||||||
extracted fourquark matrizes
|
extracted fourquark matrizes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if vertices is None:
|
||||||
|
vertices = ["VA", "AV"]
|
||||||
files, idx = _get_files(path, filestem, idl)
|
files, idx = _get_files(path, filestem, idl)
|
||||||
|
|
||||||
mom_in = None
|
mom_in = None
|
||||||
|
@ -596,7 +600,7 @@ def _get_lorentz_names(name):
|
||||||
assert len(name) == 2
|
assert len(name) == 2
|
||||||
|
|
||||||
if 'S' in name or 'P' in name:
|
if 'S' in name or 'P' in name:
|
||||||
if not set(name) <= set(['S', 'P']):
|
if not set(name) <= {'S', 'P'}:
|
||||||
raise Exception("'" + name + "' is not a Lorentz scalar")
|
raise Exception("'" + name + "' is not a Lorentz scalar")
|
||||||
|
|
||||||
g_names = {'S': 'Identity',
|
g_names = {'S': 'Identity',
|
||||||
|
@ -605,7 +609,7 @@ def _get_lorentz_names(name):
|
||||||
res.append((g_names[name[0]], g_names[name[1]]))
|
res.append((g_names[name[0]], g_names[name[1]]))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if not set(name) <= set(['V', 'A']):
|
if not set(name) <= {'V', 'A'}:
|
||||||
raise Exception("'" + name + "' is not a Lorentz scalar")
|
raise Exception("'" + name + "' is not a Lorentz scalar")
|
||||||
|
|
||||||
for ind in lorentz_index:
|
for ind in lorentz_index:
|
||||||
|
|
|
@ -596,7 +596,9 @@ def _parse_array_openQCD2(d, n, size, wa, quadrupel=False):
|
||||||
return arr
|
return arr
|
||||||
|
|
||||||
|
|
||||||
def _find_files(path, prefix, postfix, ext, known_files=[]):
|
def _find_files(path, prefix, postfix, ext, known_files=None):
|
||||||
|
if known_files is None:
|
||||||
|
known_files = []
|
||||||
found = []
|
found = []
|
||||||
files = []
|
files = []
|
||||||
|
|
||||||
|
@ -1268,7 +1270,7 @@ def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
|
||||||
idl_wanted = True
|
idl_wanted = True
|
||||||
if 'idl' in kwargs:
|
if 'idl' in kwargs:
|
||||||
idl_wanted = (cnfg in expected_idl[repnum])
|
idl_wanted = (cnfg in expected_idl[repnum])
|
||||||
left_idl = left_idl - set([cnfg])
|
left_idl = left_idl - {cnfg}
|
||||||
if idl_wanted:
|
if idl_wanted:
|
||||||
cnfgs[repnum].append(cnfg)
|
cnfgs[repnum].append(cnfg)
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
# Exclude folders with different names
|
# Exclude folders with different names
|
||||||
for exc in ls:
|
for exc in ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*'):
|
if not fnmatch.fnmatch(exc, prefix + '*'):
|
||||||
ls = list(set(ls) - set([exc]))
|
ls = list(set(ls) - {exc})
|
||||||
|
|
||||||
if not appended:
|
if not appended:
|
||||||
ls = sort_names(ls)
|
ls = sort_names(ls)
|
||||||
|
@ -343,7 +343,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
name_ls = ls
|
name_ls = ls
|
||||||
for exc in name_ls:
|
for exc in name_ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
||||||
name_ls = list(set(name_ls) - set([exc]))
|
name_ls = list(set(name_ls) - {exc})
|
||||||
name_ls = sort_names(name_ls)
|
name_ls = sort_names(name_ls)
|
||||||
pattern = intern[name]['spec'][quarks][off][w][w2]['pattern']
|
pattern = intern[name]['spec'][quarks][off][w][w2]['pattern']
|
||||||
deltas = []
|
deltas = []
|
||||||
|
@ -460,7 +460,9 @@ def _extract_corr_type(corr_type):
|
||||||
return b2b, single
|
return b2b, single
|
||||||
|
|
||||||
|
|
||||||
def _find_files(rep_path, prefix, compact, files=[]):
|
def _find_files(rep_path, prefix, compact, files=None):
|
||||||
|
if files is None:
|
||||||
|
files = []
|
||||||
sub_ls = []
|
sub_ls = []
|
||||||
if not files == []:
|
if not files == []:
|
||||||
files.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
files.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||||
|
@ -474,12 +476,12 @@ def _find_files(rep_path, prefix, compact, files=[]):
|
||||||
if compact:
|
if compact:
|
||||||
for exc in sub_ls:
|
for exc in sub_ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*'):
|
if not fnmatch.fnmatch(exc, prefix + '*'):
|
||||||
sub_ls = list(set(sub_ls) - set([exc]))
|
sub_ls = list(set(sub_ls) - {exc})
|
||||||
sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||||
else:
|
else:
|
||||||
for exc in sub_ls:
|
for exc in sub_ls:
|
||||||
if not fnmatch.fnmatch(exc, 'cfg*'):
|
if not fnmatch.fnmatch(exc, 'cfg*'):
|
||||||
sub_ls = list(set(sub_ls) - set([exc]))
|
sub_ls = list(set(sub_ls) - {exc})
|
||||||
sub_ls.sort(key=lambda x: int(x[3:]))
|
sub_ls.sort(key=lambda x: int(x[3:]))
|
||||||
files = sub_ls
|
files = sub_ls
|
||||||
if len(files) == 0:
|
if len(files) == 0:
|
||||||
|
@ -665,7 +667,7 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None):
|
||||||
new_names = []
|
new_names = []
|
||||||
for exc in ls:
|
for exc in ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
||||||
ls = list(set(ls) - set([exc]))
|
ls = list(set(ls) - {exc})
|
||||||
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||||
for entry in ls:
|
for entry in ls:
|
||||||
myentry = entry[:-len(name) - 1]
|
myentry = entry[:-len(name) - 1]
|
||||||
|
|
|
@ -112,7 +112,7 @@ def check_params(path, param_hash, prefix, param_prefix="parameters_"):
|
||||||
# Exclude folders with different names
|
# Exclude folders with different names
|
||||||
for exc in ls:
|
for exc in ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*'):
|
if not fnmatch.fnmatch(exc, prefix + '*'):
|
||||||
ls = list(set(ls) - set([exc]))
|
ls = list(set(ls) - {exc})
|
||||||
|
|
||||||
ls = sort_names(ls)
|
ls = sort_names(ls)
|
||||||
nums = {}
|
nums = {}
|
||||||
|
|
|
@ -174,7 +174,7 @@ def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
|
||||||
return [Obs([dat], [name]) for dat in corr_data.T]
|
return [Obs([dat], [name]) for dat in corr_data.T]
|
||||||
|
|
||||||
|
|
||||||
def _assert_equal_properties(ol, otype=Obs):
|
def _assert_equal_properties(ol):
|
||||||
otype = type(ol[0])
|
otype = type(ol[0])
|
||||||
for o in ol[1:]:
|
for o in ol[1:]:
|
||||||
if not isinstance(o, otype):
|
if not isinstance(o, otype):
|
||||||
|
|
Loading…
Add table
Reference in a new issue