better docstrings (#144)

* first example of returns statement in docstring

* added a some return statements for pandas API

* last return statements in pandas input

* added returns to bdio docstrings

* few returns statements added to docstring

* finished docstrings for hadrons submodule

* also finished docstrings for json submodule

* finished misc submodule

* added returns in docstrings in openqQCD

* made some cosmetic chanes to dostrings

* added return nad return statement in docstring

* linting

* Improved docstrings of mpm, fits, roots, misc to have return statements

returns added for misc.py

returns added for mpm.py

reutrns added for fits.py

* linting...

* Some polishing of docstrings
This commit is contained in:
Justus Kuhlmann 2023-01-16 15:57:22 +01:00 committed by GitHub
parent b9cdebd442
commit 26447d658c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 276 additions and 6 deletions

View file

@ -26,6 +26,11 @@ def epsilon_tensor(i, j, k):
"""Rank-3 epsilon tensor """Rank-3 epsilon tensor
Based on https://codegolf.stackexchange.com/a/160375 Based on https://codegolf.stackexchange.com/a/160375
Returns
-------
elem : int
Element (i,j,k) of the epsilon tensor of rank 3
""" """
test_set = set((i, j, k)) test_set = set((i, j, k))
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))): if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
@ -38,6 +43,12 @@ def epsilon_tensor_rank4(i, j, k, o):
"""Rank-4 epsilon tensor """Rank-4 epsilon tensor
Extension of https://codegolf.stackexchange.com/a/160375 Extension of https://codegolf.stackexchange.com/a/160375
Returns
-------
elem : int
Element (i,j,k,o) of the epsilon tensor of rank 4
""" """
test_set = set((i, j, k, o)) test_set = set((i, j, k, o))
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))): if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):

View file

@ -129,6 +129,11 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
If True, a quantile-quantile plot of the fit result is generated (default False). If True, a quantile-quantile plot of the fit result is generated (default False).
num_grad : bool num_grad : bool
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Returns
-------
output : Fit_result
Parameters and information on the fitted result.
''' '''
if priors is not None: if priors is not None:
return _prior_fit(x, y, func, priors, silent=silent, **kwargs) return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
@ -180,7 +185,12 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
Notes Notes
----- -----
Based on the orthogonal distance regression module of scipy Based on the orthogonal distance regression module of scipy.
Returns
-------
output : Fit_result
Parameters and information on the fitted result.
''' '''
output = Fit_result() output = Fit_result()
@ -668,6 +678,11 @@ def fit_lin(x, y, **kwargs):
a list of Obs, where the dvalues of the Obs are used as xerror for the fit. a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
y : list y : list
List of Obs, the dvalues of the Obs are used as yerror for the fit. List of Obs, the dvalues of the Obs are used as yerror for the fit.
Returns
-------
fit_parameters : list[Obs]
LIist of fitted observables.
""" """
def f(a, x): def f(a, x):
@ -687,6 +702,10 @@ def fit_lin(x, y, **kwargs):
def qqplot(x, o_y, func, p): def qqplot(x, o_y, func, p):
"""Generates a quantile-quantile plot of the fit result which can be used to """Generates a quantile-quantile plot of the fit result which can be used to
check if the residuals of the fit are gaussian distributed. check if the residuals of the fit are gaussian distributed.
Returns
-------
None
""" """
residuals = [] residuals = []
@ -711,7 +730,12 @@ def qqplot(x, o_y, func, p):
def residual_plot(x, y, func, fit_res): def residual_plot(x, y, func, fit_res):
""" Generates a plot which compares the fit to the data and displays the corresponding residuals""" """Generates a plot which compares the fit to the data and displays the corresponding residuals
Returns
-------
None
"""
sorted_x = sorted(x) sorted_x = sorted(x)
xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
@ -741,7 +765,13 @@ def residual_plot(x, y, func, fit_res):
def error_band(x, func, beta): def error_band(x, func, beta):
"""Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
Returns
-------
err : np.array(Obs)
Error band for an array of sample values x
"""
cov = covariance(beta) cov = covariance(beta)
if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
@ -765,6 +795,10 @@ def ks_test(objects=None):
---------- ----------
objects : list objects : list
List of fit results to include in the analysis (optional). List of fit results to include in the analysis (optional).
Returns
-------
None
""" """
if objects is None: if objects is None:

View file

@ -18,6 +18,11 @@ def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
---------- ----------
file_path -- path to the bdio file file_path -- path to the bdio file
bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so) bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
Returns
-------
data : List[Obs]
Extracted data
""" """
bdio = ctypes.cdll.LoadLibrary(bdio_path) bdio = ctypes.cdll.LoadLibrary(bdio_path)
@ -169,6 +174,11 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
---------- ----------
file_path -- path to the bdio file file_path -- path to the bdio file
bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so) bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
Returns
-------
success : int
returns 0 is successful
""" """
for obs in obs_list: for obs in obs_list:
@ -314,6 +324,11 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
Fixed step size between two measurements (default 1) Fixed step size between two measurements (default 1)
alternative_ensemble_name : str alternative_ensemble_name : str
Manually overwrite ensemble name Manually overwrite ensemble name
Returns
-------
data : dict
Extracted meson data
""" """
start = kwargs.get('start', 1) start = kwargs.get('start', 1)

View file

@ -106,6 +106,11 @@ def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
A list of symbols that describe the observables to be written. May be empty. A list of symbols that describe the observables to be written. May be empty.
enstag : str enstag : str
Enstag that is written to pobs. If None, the ensemble name is used. Enstag that is written to pobs. If None, the ensemble name is used.
Returns
-------
xml_str : str
XML formatted string of the input data
""" """
od = {} od = {}
@ -196,6 +201,10 @@ def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz
Enstag that is written to pobs. If None, the ensemble name is used. Enstag that is written to pobs. If None, the ensemble name is used.
gz : bool gz : bool
If True, the output is a gzipped xml. If False, the output is an xml file. If True, the output is a gzipped xml. If False, the output is an xml file.
Returns
-------
None
""" """
pobsstring = create_pobs_string(obsl, name, spec, origin, symbol, enstag) pobsstring = create_pobs_string(obsl, name, spec, origin, symbol, enstag)
@ -305,6 +314,14 @@ def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
by "|%s" % (separator_insertion) when constructing the names of the replica. by "|%s" % (separator_insertion) when constructing the names of the replica.
int: Insert the separator "|" at the position given by separator_insertion. int: Insert the separator "|" at the position given by separator_insertion.
None (default): Replica names remain unchanged. None (default): Replica names remain unchanged.
Returns
-------
res : list[Obs]
Imported data
or
res : dict
Imported data and meta-data
""" """
if not fname.endswith('.xml') and not fname.endswith('.gz'): if not fname.endswith('.xml') and not fname.endswith('.gz'):
@ -402,6 +419,14 @@ def import_dobs_string(content, noempty=False, full_output=False, separator_inse
True (default): separator "|" is inserted after len(ensname), assuming that the True (default): separator "|" is inserted after len(ensname), assuming that the
ensemble name is a prefix to the replica name. ensemble name is a prefix to the replica name.
None or False: No separator is inserted. None or False: No separator is inserted.
Returns
-------
res : list[Obs]
Imported data
or
res : dict
Imported data and meta-data
""" """
root = et.fromstring(content) root = et.fromstring(content)
@ -567,6 +592,14 @@ def read_dobs(fname, noempty=False, full_output=False, gz=True, separator_insert
True (default): separator "|" is inserted after len(ensname), assuming that the True (default): separator "|" is inserted after len(ensname), assuming that the
ensemble name is a prefix to the replica name. ensemble name is a prefix to the replica name.
None or False: No separator is inserted. None or False: No separator is inserted.
Returns
-------
res : list[Obs]
Imported data
or
res : dict
Imported data and meta-data
""" """
if not fname.endswith('.xml') and not fname.endswith('.gz'): if not fname.endswith('.xml') and not fname.endswith('.gz'):
@ -669,6 +702,11 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
enstags : dict enstags : dict
Provide alternative enstag for ensembles in the form enstags = {ename: enstag} Provide alternative enstag for ensembles in the form enstags = {ename: enstag}
Otherwise, the ensemble name is used. Otherwise, the ensemble name is used.
Returns
-------
xml_str : str
XML string generated from the data
""" """
if enstags is None: if enstags is None:
enstags = {} enstags = {}
@ -857,6 +895,10 @@ def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=No
Otherwise, the ensemble name is used. Otherwise, the ensemble name is used.
gz : bool gz : bool
If True, the output is a gzipped XML. If False, the output is a XML file. If True, the output is a gzipped XML. If False, the output is a XML file.
Returns
-------
None
""" """
if enstags is None: if enstags is None:
enstags = {} enstags = {}

View file

@ -74,6 +74,11 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=Non
two-point function. The gammas argument dominateds over meson. two-point function. The gammas argument dominateds over meson.
idl : range idl : range
If specified only configurations in the given range are read in. If specified only configurations in the given range are read in.
Returns
-------
corr : Corr
Correlator of the source sink combination in question.
''' '''
files, idx = _get_files(path, filestem, idl) files, idx = _get_files(path, filestem, idl)
@ -129,6 +134,11 @@ def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None
List of strings of the diagrams to extract, e.g. ["direct", "box", "cross"]. List of strings of the diagrams to extract, e.g. ["direct", "box", "cross"].
idl : range idl : range
If specified only configurations in the given range are read in. If specified only configurations in the given range are read in.
Returns
-------
result : dict
extracted DistillationContration data
""" """
res_dict = {} res_dict = {}
@ -258,6 +268,11 @@ def read_ExternalLeg_hd5(path, filestem, ens_id, idl=None):
name of the ensemble, required for internal bookkeeping name of the ensemble, required for internal bookkeeping
idl : range idl : range
If specified only configurations in the given range are read in. If specified only configurations in the given range are read in.
Returns
-------
result : Npr_matrix
read Cobs-matrix
""" """
files, idx = _get_files(path, filestem, idl) files, idx = _get_files(path, filestem, idl)
@ -298,6 +313,11 @@ def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
name of the ensemble, required for internal bookkeeping name of the ensemble, required for internal bookkeeping
idl : range idl : range
If specified only configurations in the given range are read in. If specified only configurations in the given range are read in.
Returns
-------
result_dict: dict[Npr_matrix]
extracted Bilinears
""" """
files, idx = _get_files(path, filestem, idl) files, idx = _get_files(path, filestem, idl)
@ -354,6 +374,11 @@ def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
If specified only configurations in the given range are read in. If specified only configurations in the given range are read in.
vertices : list vertices : list
Vertex functions to be extracted. Vertex functions to be extracted.
Returns
-------
result_dict : dict
extracted fourquark matrizes
""" """
files, idx = _get_files(path, filestem, idl) files, idx = _get_files(path, filestem, idl)

View file

@ -29,6 +29,11 @@ def create_json_string(ol, description='', indent=1):
indent : int indent : int
Specify the indentation level of the json file. None or 0 is permissible and Specify the indentation level of the json file. None or 0 is permissible and
saves disk space. saves disk space.
Returns
-------
json_string : str
String for export to .json(.gz) file
""" """
def _gen_data_d_from_list(ol): def _gen_data_d_from_list(ol):
@ -212,6 +217,10 @@ def dump_to_json(ol, fname, description='', indent=1, gz=True):
saves disk space. saves disk space.
gz : bool gz : bool
If True, the output is a gzipped json. If False, the output is a json file. If True, the output is a gzipped json. If False, the output is a json file.
Returns
-------
Null
""" """
jsonstring = create_json_string(ol, description, indent) jsonstring = create_json_string(ol, description, indent)
@ -247,6 +256,17 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
full_output : bool full_output : bool
If True, a dict containing auxiliary information and the data is returned. If True, a dict containing auxiliary information and the data is returned.
If False, only the data is returned. If False, only the data is returned.
Returns
-------
result : list[Obs]
reconstructed list of observables from the json string
or
result : Obs
only one observable if the list only has one entry
or
result : dict
if full_output=True
""" """
def _gen_obsd_from_datad(d): def _gen_obsd_from_datad(d):
@ -447,6 +467,17 @@ def import_json_string(json_string, verbose=True, full_output=False):
full_output : bool full_output : bool
If True, a dict containing auxiliary information and the data is returned. If True, a dict containing auxiliary information and the data is returned.
If False, only the data is returned. If False, only the data is returned.
Returns
-------
result : list[Obs]
reconstructed list of observables from the json string
or
result : Obs
only one observable if the list only has one entry
or
result : dict
if full_output=True
""" """
return _parse_json_dict(json.loads(json_string), verbose, full_output) return _parse_json_dict(json.loads(json_string), verbose, full_output)
@ -469,6 +500,17 @@ def load_json(fname, verbose=True, gz=True, full_output=False):
full_output : bool full_output : bool
If True, a dict containing auxiliary information and the data is returned. If True, a dict containing auxiliary information and the data is returned.
If False, only the data is returned. If False, only the data is returned.
Returns
-------
result : list[Obs]
reconstructed list of observables from the json string
or
result : Obs
only one observable if the list only has one entry
or
result : dict
if full_output=True
""" """
if not fname.endswith('.json') and not fname.endswith('.gz'): if not fname.endswith('.json') and not fname.endswith('.gz'):
fname += '.json' fname += '.json'
@ -586,6 +628,10 @@ def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=Tr
Specify the structure of the placeholder in exported dict to be reps[0-9]+. Specify the structure of the placeholder in exported dict to be reps[0-9]+.
gz : bool gz : bool
If True, the output is a gzipped json. If False, the output is a json file. If True, the output is a gzipped json. If False, the output is a json file.
Returns
-------
None
""" """
if not isinstance(od, dict): if not isinstance(od, dict):
@ -683,6 +729,14 @@ def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOB
If False, only the data is returned. If False, only the data is returned.
reps : str reps : str
Specify the structure of the placeholder in imported dict to be reps[0-9]+. Specify the structure of the placeholder in imported dict to be reps[0-9]+.
Returns
-------
data : Obs / list / Corr
Read data
or
data : dict
Read data and meta-data
""" """
indata = load_json(fname, verbose=verbose, gz=gz, full_output=True) indata = load_json(fname, verbose=verbose, gz=gz, full_output=True)
description = indata['description']['description'] description = indata['description']['description']

View file

@ -7,7 +7,7 @@ from ..obs import Obs
def read_pbp(path, prefix, **kwargs): def read_pbp(path, prefix, **kwargs):
"""Read pbp format from given folder structure. Returns a list of length nrw """Read pbp format from given folder structure.
Parameters Parameters
---------- ----------
@ -15,6 +15,11 @@ def read_pbp(path, prefix, **kwargs):
list which contains the first config to be read for each replicum list which contains the first config to be read for each replicum
r_stop : list r_stop : list
list which contains the last config to be read for each replicum list which contains the last config to be read for each replicum
Returns
-------
result : list[Obs]
list of observables read
""" """
ls = [] ls = []

View file

@ -42,6 +42,11 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
files performed if given. files performed if given.
print_err : bool print_err : bool
Print additional information that is useful for debugging. Print additional information that is useful for debugging.
Returns
-------
rwms : Obs
Reweighting factors read
""" """
known_oqcd_versions = ['1.4', '1.6', '2.0'] known_oqcd_versions = ['1.4', '1.6', '2.0']
if not (version in known_oqcd_versions): if not (version in known_oqcd_versions):
@ -285,6 +290,11 @@ def extract_t0(path, prefix, dtr_read, xmin, spatial_extent, fit_range=5, **kwar
1, it is assumed that this is due to thermalization and the first measurement belongs 1, it is assumed that this is due to thermalization and the first measurement belongs
to the first config (default). to the first config (default).
If False: The config numbers are assumed to be traj_number // difference If False: The config numbers are assumed to be traj_number // difference
Returns
-------
t0 : Obs
Extracted t0
""" """
ls = [] ls = []
@ -565,6 +575,11 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="openQCD", **kwargs):
for version=='sfqcd' If False, the Wilson flow is used. for version=='sfqcd' If False, the Wilson flow is used.
integer_charge : bool integer_charge : bool
If True, the charge is rounded towards the nearest integer on each config. If True, the charge is rounded towards the nearest integer on each config.
Returns
-------
result : Obs
Read topological charge
""" """
return _read_flow_obs(path, prefix, c, dtr_cnfg=dtr_cnfg, version=version, obspos=0, **kwargs) return _read_flow_obs(path, prefix, c, dtr_cnfg=dtr_cnfg, version=version, obspos=0, **kwargs)
@ -689,6 +704,11 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
for version=='sfqcd' If False, the Wilson flow is used. for version=='sfqcd' If False, the Wilson flow is used.
integer_charge : bool integer_charge : bool
If True, the charge is rounded towards the nearest integer on each config. If True, the charge is rounded towards the nearest integer on each config.
Returns
-------
result : Obs
flow observable specified
""" """
known_versions = ["openQCD", "sfqcd"] known_versions = ["openQCD", "sfqcd"]
@ -923,6 +943,11 @@ def qtop_projection(qtop, target=0):
Topological charge. Topological charge.
target : int target : int
Specifies the topological sector to be reweighted to (default 0) Specifies the topological sector to be reweighted to (default 0)
Returns
-------
reto : Obs
projection to the topological charge sector defined by target
""" """
if qtop.reweighted: if qtop.reweighted:
raise Exception('You can not use a reweighted observable for reweighting!') raise Exception('You can not use a reweighted observable for reweighting!')
@ -977,6 +1002,11 @@ def read_qtop_sector(path, prefix, c, target=0, **kwargs):
Zeuthen_flow : bool Zeuthen_flow : bool
(optional) If True, the Zeuthen flow is used for Qtop. Only possible (optional) If True, the Zeuthen flow is used for Qtop. Only possible
for version=='sfqcd' If False, the Wilson flow is used. for version=='sfqcd' If False, the Wilson flow is used.
Returns
-------
reto : Obs
projection to the topological charge sector defined by target
""" """
if not isinstance(target, int): if not isinstance(target, int):

View file

@ -22,6 +22,10 @@ def to_sql(df, table_name, db, if_exists='fail', gz=True, **kwargs):
How to behave if table already exists. Options 'fail', 'replace', 'append'. How to behave if table already exists. Options 'fail', 'replace', 'append'.
gz : bool gz : bool
If True the json strings are gzipped. If True the json strings are gzipped.
Returns
-------
None
""" """
se_df = _serialize_df(df, gz=gz) se_df = _serialize_df(df, gz=gz)
con = sqlite3.connect(db) con = sqlite3.connect(db)
@ -41,6 +45,11 @@ def read_sql(sql, db, auto_gamma=False, **kwargs):
auto_gamma : bool auto_gamma : bool
If True applies the gamma_method to all imported Obs objects with the default parameters for If True applies the gamma_method to all imported Obs objects with the default parameters for
the error analysis. Default False. the error analysis. Default False.
Returns
-------
data : pandas.DataFrame
Dataframe with the content of the sqlite database.
""" """
con = sqlite3.connect(db) con = sqlite3.connect(db)
extract_df = pd.read_sql(sql, con, **kwargs) extract_df = pd.read_sql(sql, con, **kwargs)
@ -62,6 +71,10 @@ def dump_df(df, fname, gz=True):
Filename of the output file. Filename of the output file.
gz : bool gz : bool
If True, the output is a gzipped csv file. If False, the output is a csv file. If True, the output is a gzipped csv file. If False, the output is a csv file.
Returns
-------
None
""" """
out = _serialize_df(df, gz=False) out = _serialize_df(df, gz=False)
@ -88,6 +101,11 @@ def load_df(fname, auto_gamma=False, gz=True):
the error analysis. Default False. the error analysis. Default False.
gz : bool gz : bool
If True, assumes that data is gzipped. If False, assumes JSON file. If True, assumes that data is gzipped. If False, assumes JSON file.
Returns
-------
data : pandas.DataFrame
Dataframe with the content of the sqlite database.
""" """
if not fname.endswith('.csv') and not fname.endswith('.gz'): if not fname.endswith('.csv') and not fname.endswith('.gz'):
fname += '.csv' fname += '.csv'

View file

@ -55,9 +55,15 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
files: list files: list
list of files to be read per replica, default is all. list of files to be read per replica, default is all.
for non-compact output format, hand the folders to be read here. for non-compact output format, hand the folders to be read here.
check_configs: check_configs: list[list[int]]
list of list of supposed configs, eg. [range(1,1000)] list of list of supposed configs, eg. [range(1,1000)]
for one replicum with 1000 configs for one replicum with 1000 configs
Returns
-------
result: list[Obs]
list of Observables with length T, observable per timeslice.
bb-type correlators have length 1.
""" """
if kwargs.get('im'): if kwargs.get('im'):
im = 1 im = 1

View file

@ -10,6 +10,11 @@ def check_idl(idl, che):
idl of the current replicum idl of the current replicum
che : list che : list
list of configurations to be checked against list of configurations to be checked against
Returns
-------
miss_str : str
string with integers of which idls are missing
""" """
missing = [] missing = []
for c in che: for c in che:
@ -22,3 +27,4 @@ def check_idl(idl, che):
for i in missing[1:]: for i in missing[1:]:
miss_str += "," + str(i) miss_str += "," + str(i)
print(miss_str) print(miss_str)
return miss_str

View file

@ -14,6 +14,10 @@ def dump_object(obj, name, **kwargs):
name of the file name of the file
path : str path : str
specifies a custom path for the file (default '.') specifies a custom path for the file (default '.')
Returns
-------
None
""" """
if 'path' in kwargs: if 'path' in kwargs:
file_name = kwargs.get('path') + '/' + name + '.p' file_name = kwargs.get('path') + '/' + name + '.p'
@ -30,6 +34,11 @@ def load_object(path):
---------- ----------
path : str path : str
path to the file path to the file
Returns
-------
object : Obs
Loaded Object
""" """
with open(path, 'rb') as file: with open(path, 'rb') as file:
return pickle.load(file) return pickle.load(file)
@ -48,6 +57,11 @@ def pseudo_Obs(value, dvalue, name, samples=1000):
name of the ensemble for which the Obs is to be generated. name of the ensemble for which the Obs is to be generated.
samples: int samples: int
number of samples for the Obs (default 1000). number of samples for the Obs (default 1000).
Returns
-------
res : Obs
Generated Observable
""" """
if dvalue <= 0.0: if dvalue <= 0.0:
return Obs([np.zeros(samples) + value], [name]) return Obs([np.zeros(samples) + value], [name])
@ -83,6 +97,11 @@ def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
every dataset. every dataset.
samples : int samples : int
number of samples to be generated for each observable. number of samples to be generated for each observable.
Returns
-------
corr_obs : list[Obs]
Generated observable list
""" """
assert len(means) == cov.shape[-1] assert len(means) == cov.shape[-1]

View file

@ -21,6 +21,11 @@ def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
matrix pencil parameter which filters noise. The optimal value is expected between matrix pencil parameter which filters noise. The optimal value is expected between
len(data)/3 and 2*len(data)/3. The computation is more expensive the closer p is len(data)/3 and 2*len(data)/3. The computation is more expensive the closer p is
to len(data)/2 but could possibly suppress more noise (default len(data)//2). to len(data)/2 but could possibly suppress more noise (default len(data)//2).
Returns
-------
energy_levels : list[Obs]
Extracted energy levels
""" """
if isinstance(corrs[0], Obs): if isinstance(corrs[0], Obs):
data = [corrs] data = [corrs]

View file

@ -24,7 +24,7 @@ def find_root(d, func, guess=1.0, **kwargs):
Returns Returns
------- -------
Obs res : Obs
`Obs` valued root of the function. `Obs` valued root of the function.
''' '''
d_val = np.vectorize(lambda x: x.value)(np.array(d)) d_val = np.vectorize(lambda x: x.value)(np.array(d))