From 4b0451613c2922a50ccb81c005006a15349222d6 Mon Sep 17 00:00:00 2001 From: fjosw Date: Sat, 7 Jan 2023 10:24:16 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/input/json.html | 1885 ++++---- docs/pyerrors/input/openQCD.html | 778 ++-- docs/pyerrors/misc.html | 2 +- docs/pyerrors/obs.html | 7043 +++++++++++++++--------------- 4 files changed, 4839 insertions(+), 4869 deletions(-) diff --git a/docs/pyerrors/input/json.html b/docs/pyerrors/input/json.html index 09d6ce28..469291f5 100644 --- a/docs/pyerrors/input/json.html +++ b/docs/pyerrors/input/json.html @@ -134,669 +134,661 @@ 41 for r_name in ol[0].e_content[name]: 42 rd = {} 43 rd['name'] = r_name - 44 if ol[0].is_merged.get(r_name, False): - 45 rd['is_merged'] = True - 46 rd['deltas'] = [] - 47 offsets = [o.r_values[r_name] - o.value for o in ol] - 48 deltas = np.column_stack([ol[oi].deltas[r_name] + offsets[oi] for oi in range(No)]) - 49 for i in range(len(ol[0].idl[r_name])): - 50 rd['deltas'].append([ol[0].idl[r_name][i]]) - 51 rd['deltas'][-1] += deltas[i].tolist() - 52 ed['replica'].append(rd) - 53 dl.append(ed) - 54 return dl - 55 - 56 def _gen_cdata_d_from_list(ol): - 57 dl = [] - 58 for name in ol[0].cov_names: - 59 ed = {} - 60 ed['id'] = name - 61 ed['layout'] = str(ol[0].covobs[name].cov.shape).lstrip('(').rstrip(')').rstrip(',') - 62 ed['cov'] = list(np.ravel(ol[0].covobs[name].cov)) - 63 ncov = ol[0].covobs[name].cov.shape[0] - 64 ed['grad'] = [] - 65 for i in range(ncov): - 66 ed['grad'].append([]) - 67 for o in ol: - 68 ed['grad'][-1].append(o.covobs[name].grad[i][0]) - 69 dl.append(ed) - 70 return dl - 71 - 72 def write_Obs_to_dict(o): - 73 d = {} - 74 d['type'] = 'Obs' - 75 d['layout'] = '1' - 76 if o.tag: - 77 d['tag'] = [o.tag] - 78 if o.reweighted: - 79 d['reweighted'] = o.reweighted - 80 d['value'] = [o.value] - 81 data = _gen_data_d_from_list([o]) - 82 if len(data) > 0: - 83 d['data'] = data - 84 cdata = _gen_cdata_d_from_list([o]) - 85 if len(cdata) > 0: - 86 d['cdata'] = cdata - 87 return d - 88 - 89 def write_List_to_dict(ol): - 90 _assert_equal_properties(ol) - 91 d = {} - 92 d['type'] = 'List' - 93 d['layout'] = '%d' % len(ol) - 94 taglist = [o.tag for o in ol] - 95 if np.any([tag is not None for tag in taglist]): - 96 d['tag'] = taglist - 97 if ol[0].reweighted: - 98 d['reweighted'] = ol[0].reweighted - 99 d['value'] = [o.value for o in ol] -100 data = _gen_data_d_from_list(ol) -101 if len(data) > 0: -102 d['data'] = data -103 cdata = _gen_cdata_d_from_list(ol) -104 if len(cdata) > 0: -105 d['cdata'] = cdata -106 return d -107 -108 def write_Array_to_dict(oa): -109 ol = np.ravel(oa) -110 _assert_equal_properties(ol) -111 d = {} -112 d['type'] = 'Array' -113 d['layout'] = str(oa.shape).lstrip('(').rstrip(')').rstrip(',') -114 taglist = [o.tag for o in ol] -115 if np.any([tag is not None for tag in taglist]): -116 d['tag'] = taglist -117 if ol[0].reweighted: -118 d['reweighted'] = ol[0].reweighted -119 d['value'] = [o.value for o in ol] -120 data = _gen_data_d_from_list(ol) -121 if len(data) > 0: -122 d['data'] = data -123 cdata = _gen_cdata_d_from_list(ol) -124 if len(cdata) > 0: -125 d['cdata'] = cdata -126 return d -127 -128 def _nan_Obs_like(obs): -129 samples = [] -130 names = [] -131 idl = [] -132 for key, value in obs.idl.items(): -133 samples.append([np.nan] * len(value)) -134 names.append(key) -135 idl.append(value) -136 my_obs = Obs(samples, names, idl) -137 my_obs._covobs = obs._covobs -138 for name in obs._covobs: -139 my_obs.names.append(name) -140 my_obs.reweighted = obs.reweighted -141 my_obs.is_merged = obs.is_merged -142 return my_obs -143 -144 def write_Corr_to_dict(my_corr): -145 first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j)) -146 dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object) -147 dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0]) -148 content = [o if o is not None else dummy_array for o in my_corr.content] -149 dat = write_Array_to_dict(np.array(content, dtype=object)) -150 dat['type'] = 'Corr' -151 corr_meta_data = str(my_corr.tag) -152 if 'tag' in dat.keys(): -153 dat['tag'].append(corr_meta_data) -154 else: -155 dat['tag'] = [corr_meta_data] -156 taglist = dat['tag'] -157 dat['tag'] = {} # tag is now a dictionary, that contains the previous taglist in the key "tag" -158 dat['tag']['tag'] = taglist -159 if my_corr.prange is not None: -160 dat['tag']['prange'] = my_corr.prange -161 return dat + 44 rd['deltas'] = [] + 45 offsets = [o.r_values[r_name] - o.value for o in ol] + 46 deltas = np.column_stack([ol[oi].deltas[r_name] + offsets[oi] for oi in range(No)]) + 47 for i in range(len(ol[0].idl[r_name])): + 48 rd['deltas'].append([ol[0].idl[r_name][i]]) + 49 rd['deltas'][-1] += deltas[i].tolist() + 50 ed['replica'].append(rd) + 51 dl.append(ed) + 52 return dl + 53 + 54 def _gen_cdata_d_from_list(ol): + 55 dl = [] + 56 for name in ol[0].cov_names: + 57 ed = {} + 58 ed['id'] = name + 59 ed['layout'] = str(ol[0].covobs[name].cov.shape).lstrip('(').rstrip(')').rstrip(',') + 60 ed['cov'] = list(np.ravel(ol[0].covobs[name].cov)) + 61 ncov = ol[0].covobs[name].cov.shape[0] + 62 ed['grad'] = [] + 63 for i in range(ncov): + 64 ed['grad'].append([]) + 65 for o in ol: + 66 ed['grad'][-1].append(o.covobs[name].grad[i][0]) + 67 dl.append(ed) + 68 return dl + 69 + 70 def write_Obs_to_dict(o): + 71 d = {} + 72 d['type'] = 'Obs' + 73 d['layout'] = '1' + 74 if o.tag: + 75 d['tag'] = [o.tag] + 76 if o.reweighted: + 77 d['reweighted'] = o.reweighted + 78 d['value'] = [o.value] + 79 data = _gen_data_d_from_list([o]) + 80 if len(data) > 0: + 81 d['data'] = data + 82 cdata = _gen_cdata_d_from_list([o]) + 83 if len(cdata) > 0: + 84 d['cdata'] = cdata + 85 return d + 86 + 87 def write_List_to_dict(ol): + 88 _assert_equal_properties(ol) + 89 d = {} + 90 d['type'] = 'List' + 91 d['layout'] = '%d' % len(ol) + 92 taglist = [o.tag for o in ol] + 93 if np.any([tag is not None for tag in taglist]): + 94 d['tag'] = taglist + 95 if ol[0].reweighted: + 96 d['reweighted'] = ol[0].reweighted + 97 d['value'] = [o.value for o in ol] + 98 data = _gen_data_d_from_list(ol) + 99 if len(data) > 0: +100 d['data'] = data +101 cdata = _gen_cdata_d_from_list(ol) +102 if len(cdata) > 0: +103 d['cdata'] = cdata +104 return d +105 +106 def write_Array_to_dict(oa): +107 ol = np.ravel(oa) +108 _assert_equal_properties(ol) +109 d = {} +110 d['type'] = 'Array' +111 d['layout'] = str(oa.shape).lstrip('(').rstrip(')').rstrip(',') +112 taglist = [o.tag for o in ol] +113 if np.any([tag is not None for tag in taglist]): +114 d['tag'] = taglist +115 if ol[0].reweighted: +116 d['reweighted'] = ol[0].reweighted +117 d['value'] = [o.value for o in ol] +118 data = _gen_data_d_from_list(ol) +119 if len(data) > 0: +120 d['data'] = data +121 cdata = _gen_cdata_d_from_list(ol) +122 if len(cdata) > 0: +123 d['cdata'] = cdata +124 return d +125 +126 def _nan_Obs_like(obs): +127 samples = [] +128 names = [] +129 idl = [] +130 for key, value in obs.idl.items(): +131 samples.append([np.nan] * len(value)) +132 names.append(key) +133 idl.append(value) +134 my_obs = Obs(samples, names, idl) +135 my_obs._covobs = obs._covobs +136 for name in obs._covobs: +137 my_obs.names.append(name) +138 my_obs.reweighted = obs.reweighted +139 return my_obs +140 +141 def write_Corr_to_dict(my_corr): +142 first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j)) +143 dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object) +144 dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0]) +145 content = [o if o is not None else dummy_array for o in my_corr.content] +146 dat = write_Array_to_dict(np.array(content, dtype=object)) +147 dat['type'] = 'Corr' +148 corr_meta_data = str(my_corr.tag) +149 if 'tag' in dat.keys(): +150 dat['tag'].append(corr_meta_data) +151 else: +152 dat['tag'] = [corr_meta_data] +153 taglist = dat['tag'] +154 dat['tag'] = {} # tag is now a dictionary, that contains the previous taglist in the key "tag" +155 dat['tag']['tag'] = taglist +156 if my_corr.prange is not None: +157 dat['tag']['prange'] = my_corr.prange +158 return dat +159 +160 if not isinstance(ol, list): +161 ol = [ol] 162 -163 if not isinstance(ol, list): -164 ol = [ol] -165 -166 d = {} -167 d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__) -168 d['version'] = '1.1' -169 d['who'] = getpass.getuser() -170 d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z') -171 d['host'] = socket.gethostname() + ', ' + platform.platform() +163 d = {} +164 d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__) +165 d['version'] = '1.1' +166 d['who'] = getpass.getuser() +167 d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z') +168 d['host'] = socket.gethostname() + ', ' + platform.platform() +169 +170 if description: +171 d['description'] = description 172 -173 if description: -174 d['description'] = description -175 -176 d['obsdata'] = [] -177 for io in ol: -178 if isinstance(io, Obs): -179 d['obsdata'].append(write_Obs_to_dict(io)) -180 elif isinstance(io, list): -181 d['obsdata'].append(write_List_to_dict(io)) -182 elif isinstance(io, np.ndarray): -183 d['obsdata'].append(write_Array_to_dict(io)) -184 elif isinstance(io, Corr): -185 d['obsdata'].append(write_Corr_to_dict(io)) -186 else: -187 raise Exception("Unkown datatype.") -188 -189 def _jsonifier(o): -190 if isinstance(o, np.int64): -191 return int(o) -192 raise TypeError('%r is not JSON serializable' % o) -193 -194 if indent: -195 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_SINGLE_LINE_ARRAY) -196 else: -197 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_COMPACT) -198 +173 d['obsdata'] = [] +174 for io in ol: +175 if isinstance(io, Obs): +176 d['obsdata'].append(write_Obs_to_dict(io)) +177 elif isinstance(io, list): +178 d['obsdata'].append(write_List_to_dict(io)) +179 elif isinstance(io, np.ndarray): +180 d['obsdata'].append(write_Array_to_dict(io)) +181 elif isinstance(io, Corr): +182 d['obsdata'].append(write_Corr_to_dict(io)) +183 else: +184 raise Exception("Unkown datatype.") +185 +186 def _jsonifier(o): +187 if isinstance(o, np.int64): +188 return int(o) +189 raise TypeError('%r is not JSON serializable' % o) +190 +191 if indent: +192 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_SINGLE_LINE_ARRAY) +193 else: +194 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_COMPACT) +195 +196 +197def dump_to_json(ol, fname, description='', indent=1, gz=True): +198 """Export a list of Obs or structures containing Obs to a .json(.gz) file 199 -200def dump_to_json(ol, fname, description='', indent=1, gz=True): -201 """Export a list of Obs or structures containing Obs to a .json(.gz) file -202 -203 Parameters -204 ---------- -205 ol : list -206 List of objects that will be exported. At the moment, these objects can be -207 either of: Obs, list, numpy.ndarray, Corr. -208 All Obs inside a structure have to be defined on the same set of configurations. -209 fname : str -210 Filename of the output file. -211 description : str -212 Optional string that describes the contents of the json file. -213 indent : int -214 Specify the indentation level of the json file. None or 0 is permissible and -215 saves disk space. -216 gz : bool -217 If True, the output is a gzipped json. If False, the output is a json file. -218 """ -219 -220 jsonstring = create_json_string(ol, description, indent) +200 Parameters +201 ---------- +202 ol : list +203 List of objects that will be exported. At the moment, these objects can be +204 either of: Obs, list, numpy.ndarray, Corr. +205 All Obs inside a structure have to be defined on the same set of configurations. +206 fname : str +207 Filename of the output file. +208 description : str +209 Optional string that describes the contents of the json file. +210 indent : int +211 Specify the indentation level of the json file. None or 0 is permissible and +212 saves disk space. +213 gz : bool +214 If True, the output is a gzipped json. If False, the output is a json file. +215 """ +216 +217 jsonstring = create_json_string(ol, description, indent) +218 +219 if not fname.endswith('.json') and not fname.endswith('.gz'): +220 fname += '.json' 221 -222 if not fname.endswith('.json') and not fname.endswith('.gz'): -223 fname += '.json' -224 -225 if gz: -226 if not fname.endswith('.gz'): -227 fname += '.gz' -228 -229 fp = gzip.open(fname, 'wb') -230 fp.write(jsonstring.encode('utf-8')) -231 else: -232 fp = open(fname, 'w', encoding='utf-8') -233 fp.write(jsonstring) -234 fp.close() -235 -236 -237def _parse_json_dict(json_dict, verbose=True, full_output=False): -238 """Reconstruct a list of Obs or structures containing Obs from a dict that -239 was built out of a json string. +222 if gz: +223 if not fname.endswith('.gz'): +224 fname += '.gz' +225 +226 fp = gzip.open(fname, 'wb') +227 fp.write(jsonstring.encode('utf-8')) +228 else: +229 fp = open(fname, 'w', encoding='utf-8') +230 fp.write(jsonstring) +231 fp.close() +232 +233 +234def _parse_json_dict(json_dict, verbose=True, full_output=False): +235 """Reconstruct a list of Obs or structures containing Obs from a dict that +236 was built out of a json string. +237 +238 The following structures are supported: Obs, list, numpy.ndarray, Corr +239 If the list contains only one element, it is unpacked from the list. 240 -241 The following structures are supported: Obs, list, numpy.ndarray, Corr -242 If the list contains only one element, it is unpacked from the list. -243 -244 Parameters -245 ---------- -246 json_string : str -247 json string containing the data. -248 verbose : bool -249 Print additional information that was written to the file. -250 full_output : bool -251 If True, a dict containing auxiliary information and the data is returned. -252 If False, only the data is returned. -253 """ -254 -255 def _gen_obsd_from_datad(d): -256 retd = {} -257 if d: -258 retd['names'] = [] -259 retd['idl'] = [] -260 retd['deltas'] = [] -261 retd['is_merged'] = {} -262 for ens in d: -263 for rep in ens['replica']: -264 rep_name = rep['name'] -265 if len(rep_name) > len(ens["id"]): -266 if rep_name[len(ens["id"])] != "|": -267 tmp_list = list(rep_name) -268 tmp_list = tmp_list[:len(ens["id"])] + ["|"] + tmp_list[len(ens["id"]):] -269 rep_name = ''.join(tmp_list) -270 retd['names'].append(rep_name) -271 retd['idl'].append([di[0] for di in rep['deltas']]) -272 retd['deltas'].append(np.array([di[1:] for di in rep['deltas']])) -273 retd['is_merged'][rep_name] = rep.get('is_merged', False) -274 return retd -275 -276 def _gen_covobsd_from_cdatad(d): -277 retd = {} -278 for ens in d: -279 retl = [] -280 name = ens['id'] -281 layouts = ens.get('layout', '1').strip() -282 layout = [int(ls.strip()) for ls in layouts.split(',') if len(ls) > 0] -283 cov = np.reshape(ens['cov'], layout) -284 grad = ens['grad'] -285 nobs = len(grad[0]) -286 for i in range(nobs): -287 retl.append({'name': name, 'cov': cov, 'grad': [g[i] for g in grad]}) -288 retd[name] = retl -289 return retd +241 Parameters +242 ---------- +243 json_string : str +244 json string containing the data. +245 verbose : bool +246 Print additional information that was written to the file. +247 full_output : bool +248 If True, a dict containing auxiliary information and the data is returned. +249 If False, only the data is returned. +250 """ +251 +252 def _gen_obsd_from_datad(d): +253 retd = {} +254 if d: +255 retd['names'] = [] +256 retd['idl'] = [] +257 retd['deltas'] = [] +258 for ens in d: +259 for rep in ens['replica']: +260 rep_name = rep['name'] +261 if len(rep_name) > len(ens["id"]): +262 if rep_name[len(ens["id"])] != "|": +263 tmp_list = list(rep_name) +264 tmp_list = tmp_list[:len(ens["id"])] + ["|"] + tmp_list[len(ens["id"]):] +265 rep_name = ''.join(tmp_list) +266 retd['names'].append(rep_name) +267 retd['idl'].append([di[0] for di in rep['deltas']]) +268 retd['deltas'].append(np.array([di[1:] for di in rep['deltas']])) +269 return retd +270 +271 def _gen_covobsd_from_cdatad(d): +272 retd = {} +273 for ens in d: +274 retl = [] +275 name = ens['id'] +276 layouts = ens.get('layout', '1').strip() +277 layout = [int(ls.strip()) for ls in layouts.split(',') if len(ls) > 0] +278 cov = np.reshape(ens['cov'], layout) +279 grad = ens['grad'] +280 nobs = len(grad[0]) +281 for i in range(nobs): +282 retl.append({'name': name, 'cov': cov, 'grad': [g[i] for g in grad]}) +283 retd[name] = retl +284 return retd +285 +286 def get_Obs_from_dict(o): +287 layouts = o.get('layout', '1').strip() +288 if layouts != '1': +289 raise Exception("layout is %s has to be 1 for type Obs." % (layouts), RuntimeWarning) 290 -291 def get_Obs_from_dict(o): -292 layouts = o.get('layout', '1').strip() -293 if layouts != '1': -294 raise Exception("layout is %s has to be 1 for type Obs." % (layouts), RuntimeWarning) -295 -296 values = o['value'] -297 od = _gen_obsd_from_datad(o.get('data', {})) -298 cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) -299 -300 if od: -301 ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl']) -302 ret._value = values[0] -303 ret.is_merged = od['is_merged'] -304 else: -305 ret = Obs([], [], means=[]) -306 ret._value = values[0] -307 for name in cd: -308 co = cd[name][0] -309 ret._covobs[name] = Covobs(None, co['cov'], co['name'], grad=co['grad']) -310 ret.names.append(co['name']) -311 -312 ret.reweighted = o.get('reweighted', False) -313 ret.tag = o.get('tag', [None])[0] -314 return ret -315 -316 def get_List_from_dict(o): -317 layouts = o.get('layout', '1').strip() -318 layout = int(layouts) -319 values = o['value'] -320 od = _gen_obsd_from_datad(o.get('data', {})) -321 cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) -322 -323 ret = [] -324 taglist = o.get('tag', layout * [None]) -325 for i in range(layout): -326 if od: -327 ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl'])) -328 ret[-1]._value = values[i] -329 ret[-1].is_merged = od['is_merged'] -330 else: -331 ret.append(Obs([], [], means=[])) -332 ret[-1]._value = values[i] -333 print('Created Obs with means= ', values[i]) -334 for name in cd: -335 co = cd[name][i] -336 ret[-1]._covobs[name] = Covobs(None, co['cov'], co['name'], grad=co['grad']) -337 ret[-1].names.append(co['name']) -338 -339 ret[-1].reweighted = o.get('reweighted', False) -340 ret[-1].tag = taglist[i] -341 return ret -342 -343 def get_Array_from_dict(o): -344 layouts = o.get('layout', '1').strip() -345 layout = [int(ls.strip()) for ls in layouts.split(',') if len(ls) > 0] -346 N = np.prod(layout) -347 values = o['value'] -348 od = _gen_obsd_from_datad(o.get('data', {})) -349 cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) -350 -351 ret = [] -352 taglist = o.get('tag', N * [None]) -353 for i in range(N): -354 if od: -355 ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl'])) -356 ret[-1]._value = values[i] -357 ret[-1].is_merged = od['is_merged'] -358 else: -359 ret.append(Obs([], [], means=[])) -360 ret[-1]._value = values[i] -361 for name in cd: -362 co = cd[name][i] -363 ret[-1]._covobs[name] = Covobs(None, co['cov'], co['name'], grad=co['grad']) -364 ret[-1].names.append(co['name']) -365 ret[-1].reweighted = o.get('reweighted', False) -366 ret[-1].tag = taglist[i] -367 return np.reshape(ret, layout) -368 -369 def get_Corr_from_dict(o): -370 if isinstance(o.get('tag'), list): # supports the old way -371 taglist = o.get('tag') # This had to be modified to get the taglist from the dictionary -372 temp_prange = None -373 elif isinstance(o.get('tag'), dict): -374 tagdic = o.get('tag') -375 taglist = tagdic['tag'] -376 if 'prange' in tagdic: -377 temp_prange = tagdic['prange'] -378 else: -379 temp_prange = None -380 else: -381 raise Exception("The tag is not a list or dict") -382 -383 corr_tag = taglist[-1] -384 tmp_o = o -385 tmp_o['tag'] = taglist[:-1] -386 if len(tmp_o['tag']) == 0: -387 del tmp_o['tag'] -388 dat = get_Array_from_dict(tmp_o) -389 my_corr = Corr([None if np.isnan(o.ravel()[0].value) else o for o in list(dat)]) -390 if corr_tag != 'None': -391 my_corr.tag = corr_tag -392 -393 my_corr.prange = temp_prange -394 return my_corr -395 -396 prog = json_dict.get('program', '') -397 version = json_dict.get('version', '') -398 who = json_dict.get('who', '') -399 date = json_dict.get('date', '') -400 host = json_dict.get('host', '') -401 if prog and verbose: -402 print('Data has been written using %s.' % (prog)) -403 if version and verbose: -404 print('Format version %s' % (version)) -405 if np.any([who, date, host] and verbose): -406 print('Written by %s on %s on host %s' % (who, date, host)) -407 description = json_dict.get('description', '') -408 if description and verbose: -409 print() -410 print('Description: ', description) -411 obsdata = json_dict['obsdata'] -412 ol = [] -413 for io in obsdata: -414 if io['type'] == 'Obs': -415 ol.append(get_Obs_from_dict(io)) -416 elif io['type'] == 'List': -417 ol.append(get_List_from_dict(io)) -418 elif io['type'] == 'Array': -419 ol.append(get_Array_from_dict(io)) -420 elif io['type'] == 'Corr': -421 ol.append(get_Corr_from_dict(io)) -422 else: -423 raise Exception("Unknown datatype.") -424 -425 if full_output: -426 retd = {} -427 retd['program'] = prog -428 retd['version'] = version -429 retd['who'] = who -430 retd['date'] = date -431 retd['host'] = host -432 retd['description'] = description -433 retd['obsdata'] = ol +291 values = o['value'] +292 od = _gen_obsd_from_datad(o.get('data', {})) +293 cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) +294 +295 if od: +296 ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl']) +297 ret._value = values[0] +298 else: +299 ret = Obs([], [], means=[]) +300 ret._value = values[0] +301 for name in cd: +302 co = cd[name][0] +303 ret._covobs[name] = Covobs(None, co['cov'], co['name'], grad=co['grad']) +304 ret.names.append(co['name']) +305 +306 ret.reweighted = o.get('reweighted', False) +307 ret.tag = o.get('tag', [None])[0] +308 return ret +309 +310 def get_List_from_dict(o): +311 layouts = o.get('layout', '1').strip() +312 layout = int(layouts) +313 values = o['value'] +314 od = _gen_obsd_from_datad(o.get('data', {})) +315 cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) +316 +317 ret = [] +318 taglist = o.get('tag', layout * [None]) +319 for i in range(layout): +320 if od: +321 ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl'])) +322 ret[-1]._value = values[i] +323 else: +324 ret.append(Obs([], [], means=[])) +325 ret[-1]._value = values[i] +326 print('Created Obs with means= ', values[i]) +327 for name in cd: +328 co = cd[name][i] +329 ret[-1]._covobs[name] = Covobs(None, co['cov'], co['name'], grad=co['grad']) +330 ret[-1].names.append(co['name']) +331 +332 ret[-1].reweighted = o.get('reweighted', False) +333 ret[-1].tag = taglist[i] +334 return ret +335 +336 def get_Array_from_dict(o): +337 layouts = o.get('layout', '1').strip() +338 layout = [int(ls.strip()) for ls in layouts.split(',') if len(ls) > 0] +339 N = np.prod(layout) +340 values = o['value'] +341 od = _gen_obsd_from_datad(o.get('data', {})) +342 cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) +343 +344 ret = [] +345 taglist = o.get('tag', N * [None]) +346 for i in range(N): +347 if od: +348 ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl'])) +349 ret[-1]._value = values[i] +350 else: +351 ret.append(Obs([], [], means=[])) +352 ret[-1]._value = values[i] +353 for name in cd: +354 co = cd[name][i] +355 ret[-1]._covobs[name] = Covobs(None, co['cov'], co['name'], grad=co['grad']) +356 ret[-1].names.append(co['name']) +357 ret[-1].reweighted = o.get('reweighted', False) +358 ret[-1].tag = taglist[i] +359 return np.reshape(ret, layout) +360 +361 def get_Corr_from_dict(o): +362 if isinstance(o.get('tag'), list): # supports the old way +363 taglist = o.get('tag') # This had to be modified to get the taglist from the dictionary +364 temp_prange = None +365 elif isinstance(o.get('tag'), dict): +366 tagdic = o.get('tag') +367 taglist = tagdic['tag'] +368 if 'prange' in tagdic: +369 temp_prange = tagdic['prange'] +370 else: +371 temp_prange = None +372 else: +373 raise Exception("The tag is not a list or dict") +374 +375 corr_tag = taglist[-1] +376 tmp_o = o +377 tmp_o['tag'] = taglist[:-1] +378 if len(tmp_o['tag']) == 0: +379 del tmp_o['tag'] +380 dat = get_Array_from_dict(tmp_o) +381 my_corr = Corr([None if np.isnan(o.ravel()[0].value) else o for o in list(dat)]) +382 if corr_tag != 'None': +383 my_corr.tag = corr_tag +384 +385 my_corr.prange = temp_prange +386 return my_corr +387 +388 prog = json_dict.get('program', '') +389 version = json_dict.get('version', '') +390 who = json_dict.get('who', '') +391 date = json_dict.get('date', '') +392 host = json_dict.get('host', '') +393 if prog and verbose: +394 print('Data has been written using %s.' % (prog)) +395 if version and verbose: +396 print('Format version %s' % (version)) +397 if np.any([who, date, host] and verbose): +398 print('Written by %s on %s on host %s' % (who, date, host)) +399 description = json_dict.get('description', '') +400 if description and verbose: +401 print() +402 print('Description: ', description) +403 obsdata = json_dict['obsdata'] +404 ol = [] +405 for io in obsdata: +406 if io['type'] == 'Obs': +407 ol.append(get_Obs_from_dict(io)) +408 elif io['type'] == 'List': +409 ol.append(get_List_from_dict(io)) +410 elif io['type'] == 'Array': +411 ol.append(get_Array_from_dict(io)) +412 elif io['type'] == 'Corr': +413 ol.append(get_Corr_from_dict(io)) +414 else: +415 raise Exception("Unknown datatype.") +416 +417 if full_output: +418 retd = {} +419 retd['program'] = prog +420 retd['version'] = version +421 retd['who'] = who +422 retd['date'] = date +423 retd['host'] = host +424 retd['description'] = description +425 retd['obsdata'] = ol +426 +427 return retd +428 else: +429 if len(obsdata) == 1: +430 ol = ol[0] +431 +432 return ol +433 434 -435 return retd -436 else: -437 if len(obsdata) == 1: -438 ol = ol[0] -439 -440 return ol -441 -442 -443def import_json_string(json_string, verbose=True, full_output=False): -444 """Reconstruct a list of Obs or structures containing Obs from a json string. -445 -446 The following structures are supported: Obs, list, numpy.ndarray, Corr -447 If the list contains only one element, it is unpacked from the list. -448 -449 Parameters -450 ---------- -451 json_string : str -452 json string containing the data. -453 verbose : bool -454 Print additional information that was written to the file. -455 full_output : bool -456 If True, a dict containing auxiliary information and the data is returned. -457 If False, only the data is returned. -458 """ -459 -460 return _parse_json_dict(json.loads(json_string), verbose, full_output) -461 -462 -463def load_json(fname, verbose=True, gz=True, full_output=False): -464 """Import a list of Obs or structures containing Obs from a .json(.gz) file. -465 -466 The following structures are supported: Obs, list, numpy.ndarray, Corr -467 If the list contains only one element, it is unpacked from the list. -468 -469 Parameters -470 ---------- -471 fname : str -472 Filename of the input file. -473 verbose : bool -474 Print additional information that was written to the file. -475 gz : bool -476 If True, assumes that data is gzipped. If False, assumes JSON file. -477 full_output : bool -478 If True, a dict containing auxiliary information and the data is returned. -479 If False, only the data is returned. -480 """ -481 if not fname.endswith('.json') and not fname.endswith('.gz'): -482 fname += '.json' -483 if gz: -484 if not fname.endswith('.gz'): -485 fname += '.gz' -486 with gzip.open(fname, 'r') as fin: -487 d = json.load(fin) -488 else: -489 if fname.endswith('.gz'): -490 warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning) -491 with open(fname, 'r', encoding='utf-8') as fin: -492 d = json.loads(fin.read()) -493 -494 return _parse_json_dict(d, verbose, full_output) -495 -496 -497def _ol_from_dict(ind, reps='DICTOBS'): -498 """Convert a dictionary of Obs objects to a list and a dictionary that contains -499 placeholders instead of the Obs objects. -500 -501 Parameters -502 ---------- -503 ind : dict -504 Dict of JSON valid structures and objects that will be exported. -505 At the moment, these object can be either of: Obs, list, numpy.ndarray, Corr. -506 All Obs inside a structure have to be defined on the same set of configurations. -507 reps : str -508 Specify the structure of the placeholder in exported dict to be reps[0-9]+. -509 """ -510 -511 obstypes = (Obs, Corr, np.ndarray) -512 -513 if not reps.isalnum(): -514 raise Exception('Placeholder string has to be alphanumeric!') -515 ol = [] -516 counter = 0 -517 -518 def dict_replace_obs(d): -519 nonlocal ol -520 nonlocal counter -521 x = {} -522 for k, v in d.items(): -523 if isinstance(v, dict): -524 v = dict_replace_obs(v) -525 elif isinstance(v, list) and all([isinstance(o, Obs) for o in v]): -526 v = obslist_replace_obs(v) -527 elif isinstance(v, list): -528 v = list_replace_obs(v) -529 elif isinstance(v, obstypes): -530 ol.append(v) -531 v = reps + '%d' % (counter) -532 counter += 1 -533 elif isinstance(v, str): -534 if bool(re.match(r'%s[0-9]+' % (reps), v)): -535 raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be savely exported.' % (v, reps)) -536 x[k] = v -537 return x -538 -539 def list_replace_obs(li): -540 nonlocal ol -541 nonlocal counter -542 x = [] -543 for e in li: -544 if isinstance(e, list): -545 e = list_replace_obs(e) -546 elif isinstance(e, list) and all([isinstance(o, Obs) for o in e]): -547 e = obslist_replace_obs(e) -548 elif isinstance(e, dict): -549 e = dict_replace_obs(e) -550 elif isinstance(e, obstypes): -551 ol.append(e) -552 e = reps + '%d' % (counter) -553 counter += 1 -554 elif isinstance(e, str): -555 if bool(re.match(r'%s[0-9]+' % (reps), e)): -556 raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be savely exported.' % (e, reps)) -557 x.append(e) -558 return x -559 -560 def obslist_replace_obs(li): -561 nonlocal ol -562 nonlocal counter -563 il = [] -564 for e in li: -565 il.append(e) -566 -567 ol.append(il) -568 x = reps + '%d' % (counter) -569 counter += 1 -570 return x +435def import_json_string(json_string, verbose=True, full_output=False): +436 """Reconstruct a list of Obs or structures containing Obs from a json string. +437 +438 The following structures are supported: Obs, list, numpy.ndarray, Corr +439 If the list contains only one element, it is unpacked from the list. +440 +441 Parameters +442 ---------- +443 json_string : str +444 json string containing the data. +445 verbose : bool +446 Print additional information that was written to the file. +447 full_output : bool +448 If True, a dict containing auxiliary information and the data is returned. +449 If False, only the data is returned. +450 """ +451 +452 return _parse_json_dict(json.loads(json_string), verbose, full_output) +453 +454 +455def load_json(fname, verbose=True, gz=True, full_output=False): +456 """Import a list of Obs or structures containing Obs from a .json(.gz) file. +457 +458 The following structures are supported: Obs, list, numpy.ndarray, Corr +459 If the list contains only one element, it is unpacked from the list. +460 +461 Parameters +462 ---------- +463 fname : str +464 Filename of the input file. +465 verbose : bool +466 Print additional information that was written to the file. +467 gz : bool +468 If True, assumes that data is gzipped. If False, assumes JSON file. +469 full_output : bool +470 If True, a dict containing auxiliary information and the data is returned. +471 If False, only the data is returned. +472 """ +473 if not fname.endswith('.json') and not fname.endswith('.gz'): +474 fname += '.json' +475 if gz: +476 if not fname.endswith('.gz'): +477 fname += '.gz' +478 with gzip.open(fname, 'r') as fin: +479 d = json.load(fin) +480 else: +481 if fname.endswith('.gz'): +482 warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning) +483 with open(fname, 'r', encoding='utf-8') as fin: +484 d = json.loads(fin.read()) +485 +486 return _parse_json_dict(d, verbose, full_output) +487 +488 +489def _ol_from_dict(ind, reps='DICTOBS'): +490 """Convert a dictionary of Obs objects to a list and a dictionary that contains +491 placeholders instead of the Obs objects. +492 +493 Parameters +494 ---------- +495 ind : dict +496 Dict of JSON valid structures and objects that will be exported. +497 At the moment, these object can be either of: Obs, list, numpy.ndarray, Corr. +498 All Obs inside a structure have to be defined on the same set of configurations. +499 reps : str +500 Specify the structure of the placeholder in exported dict to be reps[0-9]+. +501 """ +502 +503 obstypes = (Obs, Corr, np.ndarray) +504 +505 if not reps.isalnum(): +506 raise Exception('Placeholder string has to be alphanumeric!') +507 ol = [] +508 counter = 0 +509 +510 def dict_replace_obs(d): +511 nonlocal ol +512 nonlocal counter +513 x = {} +514 for k, v in d.items(): +515 if isinstance(v, dict): +516 v = dict_replace_obs(v) +517 elif isinstance(v, list) and all([isinstance(o, Obs) for o in v]): +518 v = obslist_replace_obs(v) +519 elif isinstance(v, list): +520 v = list_replace_obs(v) +521 elif isinstance(v, obstypes): +522 ol.append(v) +523 v = reps + '%d' % (counter) +524 counter += 1 +525 elif isinstance(v, str): +526 if bool(re.match(r'%s[0-9]+' % (reps), v)): +527 raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be savely exported.' % (v, reps)) +528 x[k] = v +529 return x +530 +531 def list_replace_obs(li): +532 nonlocal ol +533 nonlocal counter +534 x = [] +535 for e in li: +536 if isinstance(e, list): +537 e = list_replace_obs(e) +538 elif isinstance(e, list) and all([isinstance(o, Obs) for o in e]): +539 e = obslist_replace_obs(e) +540 elif isinstance(e, dict): +541 e = dict_replace_obs(e) +542 elif isinstance(e, obstypes): +543 ol.append(e) +544 e = reps + '%d' % (counter) +545 counter += 1 +546 elif isinstance(e, str): +547 if bool(re.match(r'%s[0-9]+' % (reps), e)): +548 raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be savely exported.' % (e, reps)) +549 x.append(e) +550 return x +551 +552 def obslist_replace_obs(li): +553 nonlocal ol +554 nonlocal counter +555 il = [] +556 for e in li: +557 il.append(e) +558 +559 ol.append(il) +560 x = reps + '%d' % (counter) +561 counter += 1 +562 return x +563 +564 nd = dict_replace_obs(ind) +565 +566 return ol, nd +567 +568 +569def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True): +570 """Export a dict of Obs or structures containing Obs to a .json(.gz) file 571 -572 nd = dict_replace_obs(ind) -573 -574 return ol, nd -575 -576 -577def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True): -578 """Export a dict of Obs or structures containing Obs to a .json(.gz) file -579 -580 Parameters -581 ---------- -582 od : dict -583 Dict of JSON valid structures and objects that will be exported. -584 At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr. -585 All Obs inside a structure have to be defined on the same set of configurations. -586 fname : str -587 Filename of the output file. -588 description : str -589 Optional string that describes the contents of the json file. -590 indent : int -591 Specify the indentation level of the json file. None or 0 is permissible and -592 saves disk space. -593 reps : str -594 Specify the structure of the placeholder in exported dict to be reps[0-9]+. -595 gz : bool -596 If True, the output is a gzipped json. If False, the output is a json file. -597 """ +572 Parameters +573 ---------- +574 od : dict +575 Dict of JSON valid structures and objects that will be exported. +576 At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr. +577 All Obs inside a structure have to be defined on the same set of configurations. +578 fname : str +579 Filename of the output file. +580 description : str +581 Optional string that describes the contents of the json file. +582 indent : int +583 Specify the indentation level of the json file. None or 0 is permissible and +584 saves disk space. +585 reps : str +586 Specify the structure of the placeholder in exported dict to be reps[0-9]+. +587 gz : bool +588 If True, the output is a gzipped json. If False, the output is a json file. +589 """ +590 +591 if not isinstance(od, dict): +592 raise Exception('od has to be a dictionary. Did you want to use dump_to_json?') +593 +594 infostring = ('This JSON file contains a python dictionary that has been parsed to a list of structures. ' +595 'OBSDICT contains the dictionary, where Obs or other structures have been replaced by ' +596 '' + reps + '[0-9]+. The field description contains the additional description of this JSON file. ' +597 'This file may be parsed to a dict with the pyerrors routine load_json_dict.') 598 -599 if not isinstance(od, dict): -600 raise Exception('od has to be a dictionary. Did you want to use dump_to_json?') +599 desc_dict = {'INFO': infostring, 'OBSDICT': {}, 'description': description} +600 ol, desc_dict['OBSDICT'] = _ol_from_dict(od, reps=reps) 601 -602 infostring = ('This JSON file contains a python dictionary that has been parsed to a list of structures. ' -603 'OBSDICT contains the dictionary, where Obs or other structures have been replaced by ' -604 '' + reps + '[0-9]+. The field description contains the additional description of this JSON file. ' -605 'This file may be parsed to a dict with the pyerrors routine load_json_dict.') -606 -607 desc_dict = {'INFO': infostring, 'OBSDICT': {}, 'description': description} -608 ol, desc_dict['OBSDICT'] = _ol_from_dict(od, reps=reps) +602 dump_to_json(ol, fname, description=desc_dict, indent=indent, gz=gz) +603 +604 +605def _od_from_list_and_dict(ol, ind, reps='DICTOBS'): +606 """Parse a list of Obs or structures containing Obs and an accompanying +607 dict, where the structures have been replaced by placeholders to a +608 dict that contains the structures. 609 -610 dump_to_json(ol, fname, description=desc_dict, indent=indent, gz=gz) +610 The following structures are supported: Obs, list, numpy.ndarray, Corr 611 -612 -613def _od_from_list_and_dict(ol, ind, reps='DICTOBS'): -614 """Parse a list of Obs or structures containing Obs and an accompanying -615 dict, where the structures have been replaced by placeholders to a -616 dict that contains the structures. -617 -618 The following structures are supported: Obs, list, numpy.ndarray, Corr -619 -620 Parameters -621 ---------- -622 ol : list -623 List of objects - -624 At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr. -625 All Obs inside a structure have to be defined on the same set of configurations. -626 ind : dict -627 Dict that defines the structure of the resulting dict and contains placeholders -628 reps : str -629 Specify the structure of the placeholder in imported dict to be reps[0-9]+. -630 """ -631 if not reps.isalnum(): -632 raise Exception('Placeholder string has to be alphanumeric!') -633 -634 counter = 0 -635 -636 def dict_replace_string(d): -637 nonlocal counter -638 nonlocal ol -639 x = {} -640 for k, v in d.items(): -641 if isinstance(v, dict): -642 v = dict_replace_string(v) -643 elif isinstance(v, list): -644 v = list_replace_string(v) -645 elif isinstance(v, str) and bool(re.match(r'%s[0-9]+' % (reps), v)): -646 index = int(v[len(reps):]) -647 v = ol[index] -648 counter += 1 -649 x[k] = v -650 return x -651 -652 def list_replace_string(li): -653 nonlocal counter -654 nonlocal ol -655 x = [] -656 for e in li: -657 if isinstance(e, list): -658 e = list_replace_string(e) -659 elif isinstance(e, dict): -660 e = dict_replace_string(e) -661 elif isinstance(e, str) and bool(re.match(r'%s[0-9]+' % (reps), e)): -662 index = int(e[len(reps):]) -663 e = ol[index] -664 counter += 1 -665 x.append(e) -666 return x +612 Parameters +613 ---------- +614 ol : list +615 List of objects - +616 At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr. +617 All Obs inside a structure have to be defined on the same set of configurations. +618 ind : dict +619 Dict that defines the structure of the resulting dict and contains placeholders +620 reps : str +621 Specify the structure of the placeholder in imported dict to be reps[0-9]+. +622 """ +623 if not reps.isalnum(): +624 raise Exception('Placeholder string has to be alphanumeric!') +625 +626 counter = 0 +627 +628 def dict_replace_string(d): +629 nonlocal counter +630 nonlocal ol +631 x = {} +632 for k, v in d.items(): +633 if isinstance(v, dict): +634 v = dict_replace_string(v) +635 elif isinstance(v, list): +636 v = list_replace_string(v) +637 elif isinstance(v, str) and bool(re.match(r'%s[0-9]+' % (reps), v)): +638 index = int(v[len(reps):]) +639 v = ol[index] +640 counter += 1 +641 x[k] = v +642 return x +643 +644 def list_replace_string(li): +645 nonlocal counter +646 nonlocal ol +647 x = [] +648 for e in li: +649 if isinstance(e, list): +650 e = list_replace_string(e) +651 elif isinstance(e, dict): +652 e = dict_replace_string(e) +653 elif isinstance(e, str) and bool(re.match(r'%s[0-9]+' % (reps), e)): +654 index = int(e[len(reps):]) +655 e = ol[index] +656 counter += 1 +657 x.append(e) +658 return x +659 +660 nd = dict_replace_string(ind) +661 +662 if counter == 0: +663 raise Exception('No placeholder has been replaced! Check if reps is set correctly.') +664 +665 return nd +666 667 -668 nd = dict_replace_string(ind) -669 -670 if counter == 0: -671 raise Exception('No placeholder has been replaced! Check if reps is set correctly.') +668def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'): +669 """Import a dict of Obs or structures containing Obs from a .json(.gz) file. +670 +671 The following structures are supported: Obs, list, numpy.ndarray, Corr 672 -673 return nd -674 -675 -676def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'): -677 """Import a dict of Obs or structures containing Obs from a .json(.gz) file. -678 -679 The following structures are supported: Obs, list, numpy.ndarray, Corr -680 -681 Parameters -682 ---------- -683 fname : str -684 Filename of the input file. -685 verbose : bool -686 Print additional information that was written to the file. -687 gz : bool -688 If True, assumes that data is gzipped. If False, assumes JSON file. -689 full_output : bool -690 If True, a dict containing auxiliary information and the data is returned. -691 If False, only the data is returned. -692 reps : str -693 Specify the structure of the placeholder in imported dict to be reps[0-9]+. -694 """ -695 indata = load_json(fname, verbose=verbose, gz=gz, full_output=True) -696 description = indata['description']['description'] -697 indict = indata['description']['OBSDICT'] -698 ol = indata['obsdata'] -699 od = _od_from_list_and_dict(ol, indict, reps=reps) -700 -701 if full_output: -702 indata['description'] = description -703 indata['obsdata'] = od -704 return indata -705 else: -706 return od +673 Parameters +674 ---------- +675 fname : str +676 Filename of the input file. +677 verbose : bool +678 Print additional information that was written to the file. +679 gz : bool +680 If True, assumes that data is gzipped. If False, assumes JSON file. +681 full_output : bool +682 If True, a dict containing auxiliary information and the data is returned. +683 If False, only the data is returned. +684 reps : str +685 Specify the structure of the placeholder in imported dict to be reps[0-9]+. +686 """ +687 indata = load_json(fname, verbose=verbose, gz=gz, full_output=True) +688 description = indata['description']['description'] +689 indict = indata['description']['OBSDICT'] +690 ol = indata['obsdata'] +691 od = _od_from_list_and_dict(ol, indict, reps=reps) +692 +693 if full_output: +694 indata['description'] = description +695 indata['obsdata'] = od +696 return indata +697 else: +698 return od @@ -839,160 +831,157 @@ 42 for r_name in ol[0].e_content[name]: 43 rd = {} 44 rd['name'] = r_name - 45 if ol[0].is_merged.get(r_name, False): - 46 rd['is_merged'] = True - 47 rd['deltas'] = [] - 48 offsets = [o.r_values[r_name] - o.value for o in ol] - 49 deltas = np.column_stack([ol[oi].deltas[r_name] + offsets[oi] for oi in range(No)]) - 50 for i in range(len(ol[0].idl[r_name])): - 51 rd['deltas'].append([ol[0].idl[r_name][i]]) - 52 rd['deltas'][-1] += deltas[i].tolist() - 53 ed['replica'].append(rd) - 54 dl.append(ed) - 55 return dl - 56 - 57 def _gen_cdata_d_from_list(ol): - 58 dl = [] - 59 for name in ol[0].cov_names: - 60 ed = {} - 61 ed['id'] = name - 62 ed['layout'] = str(ol[0].covobs[name].cov.shape).lstrip('(').rstrip(')').rstrip(',') - 63 ed['cov'] = list(np.ravel(ol[0].covobs[name].cov)) - 64 ncov = ol[0].covobs[name].cov.shape[0] - 65 ed['grad'] = [] - 66 for i in range(ncov): - 67 ed['grad'].append([]) - 68 for o in ol: - 69 ed['grad'][-1].append(o.covobs[name].grad[i][0]) - 70 dl.append(ed) - 71 return dl - 72 - 73 def write_Obs_to_dict(o): - 74 d = {} - 75 d['type'] = 'Obs' - 76 d['layout'] = '1' - 77 if o.tag: - 78 d['tag'] = [o.tag] - 79 if o.reweighted: - 80 d['reweighted'] = o.reweighted - 81 d['value'] = [o.value] - 82 data = _gen_data_d_from_list([o]) - 83 if len(data) > 0: - 84 d['data'] = data - 85 cdata = _gen_cdata_d_from_list([o]) - 86 if len(cdata) > 0: - 87 d['cdata'] = cdata - 88 return d - 89 - 90 def write_List_to_dict(ol): - 91 _assert_equal_properties(ol) - 92 d = {} - 93 d['type'] = 'List' - 94 d['layout'] = '%d' % len(ol) - 95 taglist = [o.tag for o in ol] - 96 if np.any([tag is not None for tag in taglist]): - 97 d['tag'] = taglist - 98 if ol[0].reweighted: - 99 d['reweighted'] = ol[0].reweighted -100 d['value'] = [o.value for o in ol] -101 data = _gen_data_d_from_list(ol) -102 if len(data) > 0: -103 d['data'] = data -104 cdata = _gen_cdata_d_from_list(ol) -105 if len(cdata) > 0: -106 d['cdata'] = cdata -107 return d -108 -109 def write_Array_to_dict(oa): -110 ol = np.ravel(oa) -111 _assert_equal_properties(ol) -112 d = {} -113 d['type'] = 'Array' -114 d['layout'] = str(oa.shape).lstrip('(').rstrip(')').rstrip(',') -115 taglist = [o.tag for o in ol] -116 if np.any([tag is not None for tag in taglist]): -117 d['tag'] = taglist -118 if ol[0].reweighted: -119 d['reweighted'] = ol[0].reweighted -120 d['value'] = [o.value for o in ol] -121 data = _gen_data_d_from_list(ol) -122 if len(data) > 0: -123 d['data'] = data -124 cdata = _gen_cdata_d_from_list(ol) -125 if len(cdata) > 0: -126 d['cdata'] = cdata -127 return d -128 -129 def _nan_Obs_like(obs): -130 samples = [] -131 names = [] -132 idl = [] -133 for key, value in obs.idl.items(): -134 samples.append([np.nan] * len(value)) -135 names.append(key) -136 idl.append(value) -137 my_obs = Obs(samples, names, idl) -138 my_obs._covobs = obs._covobs -139 for name in obs._covobs: -140 my_obs.names.append(name) -141 my_obs.reweighted = obs.reweighted -142 my_obs.is_merged = obs.is_merged -143 return my_obs -144 -145 def write_Corr_to_dict(my_corr): -146 first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j)) -147 dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object) -148 dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0]) -149 content = [o if o is not None else dummy_array for o in my_corr.content] -150 dat = write_Array_to_dict(np.array(content, dtype=object)) -151 dat['type'] = 'Corr' -152 corr_meta_data = str(my_corr.tag) -153 if 'tag' in dat.keys(): -154 dat['tag'].append(corr_meta_data) -155 else: -156 dat['tag'] = [corr_meta_data] -157 taglist = dat['tag'] -158 dat['tag'] = {} # tag is now a dictionary, that contains the previous taglist in the key "tag" -159 dat['tag']['tag'] = taglist -160 if my_corr.prange is not None: -161 dat['tag']['prange'] = my_corr.prange -162 return dat + 45 rd['deltas'] = [] + 46 offsets = [o.r_values[r_name] - o.value for o in ol] + 47 deltas = np.column_stack([ol[oi].deltas[r_name] + offsets[oi] for oi in range(No)]) + 48 for i in range(len(ol[0].idl[r_name])): + 49 rd['deltas'].append([ol[0].idl[r_name][i]]) + 50 rd['deltas'][-1] += deltas[i].tolist() + 51 ed['replica'].append(rd) + 52 dl.append(ed) + 53 return dl + 54 + 55 def _gen_cdata_d_from_list(ol): + 56 dl = [] + 57 for name in ol[0].cov_names: + 58 ed = {} + 59 ed['id'] = name + 60 ed['layout'] = str(ol[0].covobs[name].cov.shape).lstrip('(').rstrip(')').rstrip(',') + 61 ed['cov'] = list(np.ravel(ol[0].covobs[name].cov)) + 62 ncov = ol[0].covobs[name].cov.shape[0] + 63 ed['grad'] = [] + 64 for i in range(ncov): + 65 ed['grad'].append([]) + 66 for o in ol: + 67 ed['grad'][-1].append(o.covobs[name].grad[i][0]) + 68 dl.append(ed) + 69 return dl + 70 + 71 def write_Obs_to_dict(o): + 72 d = {} + 73 d['type'] = 'Obs' + 74 d['layout'] = '1' + 75 if o.tag: + 76 d['tag'] = [o.tag] + 77 if o.reweighted: + 78 d['reweighted'] = o.reweighted + 79 d['value'] = [o.value] + 80 data = _gen_data_d_from_list([o]) + 81 if len(data) > 0: + 82 d['data'] = data + 83 cdata = _gen_cdata_d_from_list([o]) + 84 if len(cdata) > 0: + 85 d['cdata'] = cdata + 86 return d + 87 + 88 def write_List_to_dict(ol): + 89 _assert_equal_properties(ol) + 90 d = {} + 91 d['type'] = 'List' + 92 d['layout'] = '%d' % len(ol) + 93 taglist = [o.tag for o in ol] + 94 if np.any([tag is not None for tag in taglist]): + 95 d['tag'] = taglist + 96 if ol[0].reweighted: + 97 d['reweighted'] = ol[0].reweighted + 98 d['value'] = [o.value for o in ol] + 99 data = _gen_data_d_from_list(ol) +100 if len(data) > 0: +101 d['data'] = data +102 cdata = _gen_cdata_d_from_list(ol) +103 if len(cdata) > 0: +104 d['cdata'] = cdata +105 return d +106 +107 def write_Array_to_dict(oa): +108 ol = np.ravel(oa) +109 _assert_equal_properties(ol) +110 d = {} +111 d['type'] = 'Array' +112 d['layout'] = str(oa.shape).lstrip('(').rstrip(')').rstrip(',') +113 taglist = [o.tag for o in ol] +114 if np.any([tag is not None for tag in taglist]): +115 d['tag'] = taglist +116 if ol[0].reweighted: +117 d['reweighted'] = ol[0].reweighted +118 d['value'] = [o.value for o in ol] +119 data = _gen_data_d_from_list(ol) +120 if len(data) > 0: +121 d['data'] = data +122 cdata = _gen_cdata_d_from_list(ol) +123 if len(cdata) > 0: +124 d['cdata'] = cdata +125 return d +126 +127 def _nan_Obs_like(obs): +128 samples = [] +129 names = [] +130 idl = [] +131 for key, value in obs.idl.items(): +132 samples.append([np.nan] * len(value)) +133 names.append(key) +134 idl.append(value) +135 my_obs = Obs(samples, names, idl) +136 my_obs._covobs = obs._covobs +137 for name in obs._covobs: +138 my_obs.names.append(name) +139 my_obs.reweighted = obs.reweighted +140 return my_obs +141 +142 def write_Corr_to_dict(my_corr): +143 first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j)) +144 dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object) +145 dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0]) +146 content = [o if o is not None else dummy_array for o in my_corr.content] +147 dat = write_Array_to_dict(np.array(content, dtype=object)) +148 dat['type'] = 'Corr' +149 corr_meta_data = str(my_corr.tag) +150 if 'tag' in dat.keys(): +151 dat['tag'].append(corr_meta_data) +152 else: +153 dat['tag'] = [corr_meta_data] +154 taglist = dat['tag'] +155 dat['tag'] = {} # tag is now a dictionary, that contains the previous taglist in the key "tag" +156 dat['tag']['tag'] = taglist +157 if my_corr.prange is not None: +158 dat['tag']['prange'] = my_corr.prange +159 return dat +160 +161 if not isinstance(ol, list): +162 ol = [ol] 163 -164 if not isinstance(ol, list): -165 ol = [ol] -166 -167 d = {} -168 d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__) -169 d['version'] = '1.1' -170 d['who'] = getpass.getuser() -171 d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z') -172 d['host'] = socket.gethostname() + ', ' + platform.platform() +164 d = {} +165 d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__) +166 d['version'] = '1.1' +167 d['who'] = getpass.getuser() +168 d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z') +169 d['host'] = socket.gethostname() + ', ' + platform.platform() +170 +171 if description: +172 d['description'] = description 173 -174 if description: -175 d['description'] = description -176 -177 d['obsdata'] = [] -178 for io in ol: -179 if isinstance(io, Obs): -180 d['obsdata'].append(write_Obs_to_dict(io)) -181 elif isinstance(io, list): -182 d['obsdata'].append(write_List_to_dict(io)) -183 elif isinstance(io, np.ndarray): -184 d['obsdata'].append(write_Array_to_dict(io)) -185 elif isinstance(io, Corr): -186 d['obsdata'].append(write_Corr_to_dict(io)) -187 else: -188 raise Exception("Unkown datatype.") -189 -190 def _jsonifier(o): -191 if isinstance(o, np.int64): -192 return int(o) -193 raise TypeError('%r is not JSON serializable' % o) -194 -195 if indent: -196 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_SINGLE_LINE_ARRAY) -197 else: -198 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_COMPACT) +174 d['obsdata'] = [] +175 for io in ol: +176 if isinstance(io, Obs): +177 d['obsdata'].append(write_Obs_to_dict(io)) +178 elif isinstance(io, list): +179 d['obsdata'].append(write_List_to_dict(io)) +180 elif isinstance(io, np.ndarray): +181 d['obsdata'].append(write_Array_to_dict(io)) +182 elif isinstance(io, Corr): +183 d['obsdata'].append(write_Corr_to_dict(io)) +184 else: +185 raise Exception("Unkown datatype.") +186 +187 def _jsonifier(o): +188 if isinstance(o, np.int64): +189 return int(o) +190 raise TypeError('%r is not JSON serializable' % o) +191 +192 if indent: +193 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_SINGLE_LINE_ARRAY) +194 else: +195 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_COMPACT) @@ -1027,41 +1016,41 @@ saves disk space. -
201def dump_to_json(ol, fname, description='', indent=1, gz=True):
-202    """Export a list of Obs or structures containing Obs to a .json(.gz) file
-203
-204    Parameters
-205    ----------
-206    ol : list
-207        List of objects that will be exported. At the moment, these objects can be
-208        either of: Obs, list, numpy.ndarray, Corr.
-209        All Obs inside a structure have to be defined on the same set of configurations.
-210    fname : str
-211        Filename of the output file.
-212    description : str
-213        Optional string that describes the contents of the json file.
-214    indent : int
-215        Specify the indentation level of the json file. None or 0 is permissible and
-216        saves disk space.
-217    gz : bool
-218        If True, the output is a gzipped json. If False, the output is a json file.
-219    """
-220
-221    jsonstring = create_json_string(ol, description, indent)
+            
198def dump_to_json(ol, fname, description='', indent=1, gz=True):
+199    """Export a list of Obs or structures containing Obs to a .json(.gz) file
+200
+201    Parameters
+202    ----------
+203    ol : list
+204        List of objects that will be exported. At the moment, these objects can be
+205        either of: Obs, list, numpy.ndarray, Corr.
+206        All Obs inside a structure have to be defined on the same set of configurations.
+207    fname : str
+208        Filename of the output file.
+209    description : str
+210        Optional string that describes the contents of the json file.
+211    indent : int
+212        Specify the indentation level of the json file. None or 0 is permissible and
+213        saves disk space.
+214    gz : bool
+215        If True, the output is a gzipped json. If False, the output is a json file.
+216    """
+217
+218    jsonstring = create_json_string(ol, description, indent)
+219
+220    if not fname.endswith('.json') and not fname.endswith('.gz'):
+221        fname += '.json'
 222
-223    if not fname.endswith('.json') and not fname.endswith('.gz'):
-224        fname += '.json'
-225
-226    if gz:
-227        if not fname.endswith('.gz'):
-228            fname += '.gz'
-229
-230        fp = gzip.open(fname, 'wb')
-231        fp.write(jsonstring.encode('utf-8'))
-232    else:
-233        fp = open(fname, 'w', encoding='utf-8')
-234        fp.write(jsonstring)
-235    fp.close()
+223    if gz:
+224        if not fname.endswith('.gz'):
+225            fname += '.gz'
+226
+227        fp = gzip.open(fname, 'wb')
+228        fp.write(jsonstring.encode('utf-8'))
+229    else:
+230        fp = open(fname, 'w', encoding='utf-8')
+231        fp.write(jsonstring)
+232    fp.close()
 
@@ -1099,24 +1088,24 @@ If True, the output is a gzipped json. If False, the output is a json file.
-
444def import_json_string(json_string, verbose=True, full_output=False):
-445    """Reconstruct a list of Obs or structures containing Obs from a json string.
-446
-447    The following structures are supported: Obs, list, numpy.ndarray, Corr
-448    If the list contains only one element, it is unpacked from the list.
-449
-450    Parameters
-451    ----------
-452    json_string : str
-453        json string containing the data.
-454    verbose : bool
-455        Print additional information that was written to the file.
-456    full_output : bool
-457        If True, a dict containing auxiliary information and the data is returned.
-458        If False, only the data is returned.
-459    """
-460
-461    return _parse_json_dict(json.loads(json_string), verbose, full_output)
+            
436def import_json_string(json_string, verbose=True, full_output=False):
+437    """Reconstruct a list of Obs or structures containing Obs from a json string.
+438
+439    The following structures are supported: Obs, list, numpy.ndarray, Corr
+440    If the list contains only one element, it is unpacked from the list.
+441
+442    Parameters
+443    ----------
+444    json_string : str
+445        json string containing the data.
+446    verbose : bool
+447        Print additional information that was written to the file.
+448    full_output : bool
+449        If True, a dict containing auxiliary information and the data is returned.
+450        If False, only the data is returned.
+451    """
+452
+453    return _parse_json_dict(json.loads(json_string), verbose, full_output)
 
@@ -1151,38 +1140,38 @@ If False, only the data is returned.
-
464def load_json(fname, verbose=True, gz=True, full_output=False):
-465    """Import a list of Obs or structures containing Obs from a .json(.gz) file.
-466
-467    The following structures are supported: Obs, list, numpy.ndarray, Corr
-468    If the list contains only one element, it is unpacked from the list.
-469
-470    Parameters
-471    ----------
-472    fname : str
-473        Filename of the input file.
-474    verbose : bool
-475        Print additional information that was written to the file.
-476    gz : bool
-477        If True, assumes that data is gzipped. If False, assumes JSON file.
-478    full_output : bool
-479        If True, a dict containing auxiliary information and the data is returned.
-480        If False, only the data is returned.
-481    """
-482    if not fname.endswith('.json') and not fname.endswith('.gz'):
-483        fname += '.json'
-484    if gz:
-485        if not fname.endswith('.gz'):
-486            fname += '.gz'
-487        with gzip.open(fname, 'r') as fin:
-488            d = json.load(fin)
-489    else:
-490        if fname.endswith('.gz'):
-491            warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning)
-492        with open(fname, 'r', encoding='utf-8') as fin:
-493            d = json.loads(fin.read())
-494
-495    return _parse_json_dict(d, verbose, full_output)
+            
456def load_json(fname, verbose=True, gz=True, full_output=False):
+457    """Import a list of Obs or structures containing Obs from a .json(.gz) file.
+458
+459    The following structures are supported: Obs, list, numpy.ndarray, Corr
+460    If the list contains only one element, it is unpacked from the list.
+461
+462    Parameters
+463    ----------
+464    fname : str
+465        Filename of the input file.
+466    verbose : bool
+467        Print additional information that was written to the file.
+468    gz : bool
+469        If True, assumes that data is gzipped. If False, assumes JSON file.
+470    full_output : bool
+471        If True, a dict containing auxiliary information and the data is returned.
+472        If False, only the data is returned.
+473    """
+474    if not fname.endswith('.json') and not fname.endswith('.gz'):
+475        fname += '.json'
+476    if gz:
+477        if not fname.endswith('.gz'):
+478            fname += '.gz'
+479        with gzip.open(fname, 'r') as fin:
+480            d = json.load(fin)
+481    else:
+482        if fname.endswith('.gz'):
+483            warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning)
+484        with open(fname, 'r', encoding='utf-8') as fin:
+485            d = json.loads(fin.read())
+486
+487    return _parse_json_dict(d, verbose, full_output)
 
@@ -1219,40 +1208,40 @@ If False, only the data is returned.
-
578def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
-579    """Export a dict of Obs or structures containing Obs to a .json(.gz) file
-580
-581    Parameters
-582    ----------
-583    od : dict
-584        Dict of JSON valid structures and objects that will be exported.
-585        At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr.
-586        All Obs inside a structure have to be defined on the same set of configurations.
-587    fname : str
-588        Filename of the output file.
-589    description : str
-590        Optional string that describes the contents of the json file.
-591    indent : int
-592        Specify the indentation level of the json file. None or 0 is permissible and
-593        saves disk space.
-594    reps : str
-595        Specify the structure of the placeholder in exported dict to be reps[0-9]+.
-596    gz : bool
-597        If True, the output is a gzipped json. If False, the output is a json file.
-598    """
+            
570def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
+571    """Export a dict of Obs or structures containing Obs to a .json(.gz) file
+572
+573    Parameters
+574    ----------
+575    od : dict
+576        Dict of JSON valid structures and objects that will be exported.
+577        At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr.
+578        All Obs inside a structure have to be defined on the same set of configurations.
+579    fname : str
+580        Filename of the output file.
+581    description : str
+582        Optional string that describes the contents of the json file.
+583    indent : int
+584        Specify the indentation level of the json file. None or 0 is permissible and
+585        saves disk space.
+586    reps : str
+587        Specify the structure of the placeholder in exported dict to be reps[0-9]+.
+588    gz : bool
+589        If True, the output is a gzipped json. If False, the output is a json file.
+590    """
+591
+592    if not isinstance(od, dict):
+593        raise Exception('od has to be a dictionary. Did you want to use dump_to_json?')
+594
+595    infostring = ('This JSON file contains a python dictionary that has been parsed to a list of structures. '
+596                  'OBSDICT contains the dictionary, where Obs or other structures have been replaced by '
+597                  '' + reps + '[0-9]+. The field description contains the additional description of this JSON file. '
+598                  'This file may be parsed to a dict with the pyerrors routine load_json_dict.')
 599
-600    if not isinstance(od, dict):
-601        raise Exception('od has to be a dictionary. Did you want to use dump_to_json?')
+600    desc_dict = {'INFO': infostring, 'OBSDICT': {}, 'description': description}
+601    ol, desc_dict['OBSDICT'] = _ol_from_dict(od, reps=reps)
 602
-603    infostring = ('This JSON file contains a python dictionary that has been parsed to a list of structures. '
-604                  'OBSDICT contains the dictionary, where Obs or other structures have been replaced by '
-605                  '' + reps + '[0-9]+. The field description contains the additional description of this JSON file. '
-606                  'This file may be parsed to a dict with the pyerrors routine load_json_dict.')
-607
-608    desc_dict = {'INFO': infostring, 'OBSDICT': {}, 'description': description}
-609    ol, desc_dict['OBSDICT'] = _ol_from_dict(od, reps=reps)
-610
-611    dump_to_json(ol, fname, description=desc_dict, indent=indent, gz=gz)
+603    dump_to_json(ol, fname, description=desc_dict, indent=indent, gz=gz)
 
@@ -1292,37 +1281,37 @@ If True, the output is a gzipped json. If False, the output is a json file.
-
677def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'):
-678    """Import a dict of Obs or structures containing Obs from a .json(.gz) file.
-679
-680    The following structures are supported: Obs, list, numpy.ndarray, Corr
-681
-682    Parameters
-683    ----------
-684    fname : str
-685        Filename of the input file.
-686    verbose : bool
-687        Print additional information that was written to the file.
-688    gz : bool
-689        If True, assumes that data is gzipped. If False, assumes JSON file.
-690    full_output : bool
-691        If True, a dict containing auxiliary information and the data is returned.
-692        If False, only the data is returned.
-693    reps : str
-694        Specify the structure of the placeholder in imported dict to be reps[0-9]+.
-695    """
-696    indata = load_json(fname, verbose=verbose, gz=gz, full_output=True)
-697    description = indata['description']['description']
-698    indict = indata['description']['OBSDICT']
-699    ol = indata['obsdata']
-700    od = _od_from_list_and_dict(ol, indict, reps=reps)
-701
-702    if full_output:
-703        indata['description'] = description
-704        indata['obsdata'] = od
-705        return indata
-706    else:
-707        return od
+            
669def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'):
+670    """Import a dict of Obs or structures containing Obs from a .json(.gz) file.
+671
+672    The following structures are supported: Obs, list, numpy.ndarray, Corr
+673
+674    Parameters
+675    ----------
+676    fname : str
+677        Filename of the input file.
+678    verbose : bool
+679        Print additional information that was written to the file.
+680    gz : bool
+681        If True, assumes that data is gzipped. If False, assumes JSON file.
+682    full_output : bool
+683        If True, a dict containing auxiliary information and the data is returned.
+684        If False, only the data is returned.
+685    reps : str
+686        Specify the structure of the placeholder in imported dict to be reps[0-9]+.
+687    """
+688    indata = load_json(fname, verbose=verbose, gz=gz, full_output=True)
+689    description = indata['description']['description']
+690    indict = indata['description']['OBSDICT']
+691    ol = indata['obsdata']
+692    od = _od_from_list_and_dict(ol, indict, reps=reps)
+693
+694    if full_output:
+695        indata['description'] = description
+696        indata['obsdata'] = od
+697        return indata
+698    else:
+699        return od
 
diff --git a/docs/pyerrors/input/openQCD.html b/docs/pyerrors/input/openQCD.html index 4c034dc4..e61d699f 100644 --- a/docs/pyerrors/input/openQCD.html +++ b/docs/pyerrors/input/openQCD.html @@ -1028,205 +1028,204 @@
932 proj_qtop.append(np.array([1 if round(qtop.r_values[n] + q) == target else 0 for q in qtop.deltas[n]])) 933 934 reto = Obs(proj_qtop, qtop.names, idl=[qtop.idl[name] for name in qtop.names]) - 935 reto.is_merged = qtop.is_merged - 936 return reto + 935 return reto + 936 937 - 938 - 939def read_qtop_sector(path, prefix, c, target=0, **kwargs): - 940 """Constructs reweighting factors to a specified topological sector. - 941 - 942 Parameters - 943 ---------- - 944 path : str - 945 path of the measurement files - 946 prefix : str - 947 prefix of the measurement files, e.g. <prefix>_id0_r0.ms.dat - 948 c : double - 949 Smearing radius in units of the lattice extent, c = sqrt(8 t0) / L - 950 target : int - 951 Specifies the topological sector to be reweighted to (default 0) - 952 dtr_cnfg : int - 953 (optional) parameter that specifies the number of trajectories - 954 between two configs. - 955 if it is not set, the distance between two measurements - 956 in the file is assumed to be the distance between two configurations. - 957 steps : int - 958 (optional) Distance between two configurations in units of trajectories / - 959 cycles. Assumed to be the distance between two measurements * dtr_cnfg if not given - 960 version : str - 961 version string of the openQCD (sfqcd) version used to create - 962 the ensemble. Default is 2.0. May also be set to sfqcd. - 963 L : int - 964 spatial length of the lattice in L/a. - 965 HAS to be set if version != sfqcd, since openQCD does not provide - 966 this in the header - 967 r_start : list - 968 offset of the first ensemble, making it easier to match - 969 later on with other Obs - 970 r_stop : list - 971 last configurations that need to be read (per replicum) - 972 files : list - 973 specify the exact files that need to be read - 974 from path, practical if e.g. only one replicum is needed - 975 names : list - 976 Alternative labeling for replicas/ensembles. - 977 Has to have the appropriate length - 978 Zeuthen_flow : bool - 979 (optional) If True, the Zeuthen flow is used for Qtop. Only possible - 980 for version=='sfqcd' If False, the Wilson flow is used. - 981 """ - 982 - 983 if not isinstance(target, int): - 984 raise Exception("'target' has to be an integer.") - 985 - 986 kwargs['integer_charge'] = True - 987 qtop = read_qtop(path, prefix, c, **kwargs) - 988 - 989 return qtop_projection(qtop, target=target) + 938def read_qtop_sector(path, prefix, c, target=0, **kwargs): + 939 """Constructs reweighting factors to a specified topological sector. + 940 + 941 Parameters + 942 ---------- + 943 path : str + 944 path of the measurement files + 945 prefix : str + 946 prefix of the measurement files, e.g. <prefix>_id0_r0.ms.dat + 947 c : double + 948 Smearing radius in units of the lattice extent, c = sqrt(8 t0) / L + 949 target : int + 950 Specifies the topological sector to be reweighted to (default 0) + 951 dtr_cnfg : int + 952 (optional) parameter that specifies the number of trajectories + 953 between two configs. + 954 if it is not set, the distance between two measurements + 955 in the file is assumed to be the distance between two configurations. + 956 steps : int + 957 (optional) Distance between two configurations in units of trajectories / + 958 cycles. Assumed to be the distance between two measurements * dtr_cnfg if not given + 959 version : str + 960 version string of the openQCD (sfqcd) version used to create + 961 the ensemble. Default is 2.0. May also be set to sfqcd. + 962 L : int + 963 spatial length of the lattice in L/a. + 964 HAS to be set if version != sfqcd, since openQCD does not provide + 965 this in the header + 966 r_start : list + 967 offset of the first ensemble, making it easier to match + 968 later on with other Obs + 969 r_stop : list + 970 last configurations that need to be read (per replicum) + 971 files : list + 972 specify the exact files that need to be read + 973 from path, practical if e.g. only one replicum is needed + 974 names : list + 975 Alternative labeling for replicas/ensembles. + 976 Has to have the appropriate length + 977 Zeuthen_flow : bool + 978 (optional) If True, the Zeuthen flow is used for Qtop. Only possible + 979 for version=='sfqcd' If False, the Wilson flow is used. + 980 """ + 981 + 982 if not isinstance(target, int): + 983 raise Exception("'target' has to be an integer.") + 984 + 985 kwargs['integer_charge'] = True + 986 qtop = read_qtop(path, prefix, c, **kwargs) + 987 + 988 return qtop_projection(qtop, target=target) + 989 990 - 991 - 992def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs): - 993 """ - 994 Read data from files in the specified directory with the specified prefix and quark combination extension, and return a `Corr` object containing the data. - 995 - 996 Parameters - 997 ---------- - 998 path : str - 999 The directory to search for the files in. -1000 prefix : str -1001 The prefix to match the files against. -1002 qc : str -1003 The quark combination extension to match the files against. -1004 corr : str -1005 The correlator to extract data for. -1006 sep : str, optional -1007 The separator to use when parsing the replika names. -1008 **kwargs -1009 Additional keyword arguments. The following keyword arguments are recognized: -1010 -1011 - names (List[str]): A list of names to use for the replicas. -1012 -1013 Returns -1014 ------- -1015 Corr -1016 A complex valued `Corr` object containing the data read from the files. In case of boudary to bulk correlators. -1017 or -1018 CObs -1019 A complex valued `CObs` object containing the data read from the files. In case of boudary to boundary correlators. + 991def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs): + 992 """ + 993 Read data from files in the specified directory with the specified prefix and quark combination extension, and return a `Corr` object containing the data. + 994 + 995 Parameters + 996 ---------- + 997 path : str + 998 The directory to search for the files in. + 999 prefix : str +1000 The prefix to match the files against. +1001 qc : str +1002 The quark combination extension to match the files against. +1003 corr : str +1004 The correlator to extract data for. +1005 sep : str, optional +1006 The separator to use when parsing the replika names. +1007 **kwargs +1008 Additional keyword arguments. The following keyword arguments are recognized: +1009 +1010 - names (List[str]): A list of names to use for the replicas. +1011 +1012 Returns +1013 ------- +1014 Corr +1015 A complex valued `Corr` object containing the data read from the files. In case of boudary to bulk correlators. +1016 or +1017 CObs +1018 A complex valued `CObs` object containing the data read from the files. In case of boudary to boundary correlators. +1019 1020 -1021 -1022 Raises -1023 ------ -1024 FileNotFoundError -1025 If no files matching the specified prefix and quark combination extension are found in the specified directory. -1026 IOError -1027 If there is an error reading a file. -1028 struct.error -1029 If there is an error unpacking binary data. -1030 """ -1031 -1032 found = [] -1033 files = [] -1034 names = [] -1035 for (dirpath, dirnames, filenames) in os.walk(path + "/"): -1036 found.extend(filenames) -1037 break -1038 -1039 for f in found: -1040 if fnmatch.fnmatch(f, prefix + "*.ms5_xsf_" + qc + ".dat"): -1041 files.append(f) -1042 if not sep == "": -1043 names.append(prefix + "|r" + f.split(".")[0].split(sep)[1]) -1044 else: -1045 names.append(prefix) -1046 files = sorted(files) -1047 -1048 if "names" in kwargs: -1049 names = kwargs.get("names") -1050 else: -1051 names = sorted(names) -1052 -1053 cnfgs = [] -1054 realsamples = [] -1055 imagsamples = [] -1056 repnum = 0 -1057 for file in files: -1058 with open(path + "/" + file, "rb") as fp: -1059 -1060 t = fp.read(8) -1061 kappa = struct.unpack('d', t)[0] -1062 t = fp.read(8) -1063 csw = struct.unpack('d', t)[0] -1064 t = fp.read(8) -1065 dF = struct.unpack('d', t)[0] -1066 t = fp.read(8) -1067 zF = struct.unpack('d', t)[0] -1068 -1069 t = fp.read(4) -1070 tmax = struct.unpack('i', t)[0] -1071 t = fp.read(4) -1072 bnd = struct.unpack('i', t)[0] -1073 -1074 placesBI = ["gS", "gP", -1075 "gA", "gV", -1076 "gVt", "lA", -1077 "lV", "lVt", -1078 "lT", "lTt"] -1079 placesBB = ["g1", "l1"] -1080 -1081 # the chunks have the following structure: -1082 # confignumber, 10x timedependent complex correlators as doubles, 2x timeindependent complex correlators as doubles -1083 -1084 chunksize = 4 + (8 * 2 * tmax * 10) + (8 * 2 * 2) -1085 packstr = '=i' + ('d' * 2 * tmax * 10) + ('d' * 2 * 2) -1086 cnfgs.append([]) -1087 realsamples.append([]) -1088 imagsamples.append([]) -1089 for t in range(tmax): -1090 realsamples[repnum].append([]) -1091 imagsamples[repnum].append([]) -1092 -1093 while True: -1094 cnfgt = fp.read(chunksize) -1095 if not cnfgt: -1096 break -1097 asascii = struct.unpack(packstr, cnfgt) -1098 cnfg = asascii[0] -1099 cnfgs[repnum].append(cnfg) -1100 -1101 if corr not in placesBB: -1102 tmpcorr = asascii[1 + 2 * tmax * placesBI.index(corr):1 + 2 * tmax * placesBI.index(corr) + 2 * tmax] -1103 else: -1104 tmpcorr = asascii[1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr):1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr) + 2] -1105 -1106 corrres = [[], []] -1107 for i in range(len(tmpcorr)): -1108 corrres[i % 2].append(tmpcorr[i]) -1109 for t in range(int(len(tmpcorr) / 2)): -1110 realsamples[repnum][t].append(corrres[0][t]) -1111 for t in range(int(len(tmpcorr) / 2)): -1112 imagsamples[repnum][t].append(corrres[1][t]) -1113 repnum += 1 -1114 -1115 s = "Read correlator " + corr + " from " + str(repnum) + " replika with " + str(len(realsamples[0][t])) -1116 for rep in range(1, repnum): -1117 s += ", " + str(len(realsamples[rep][t])) -1118 s += " samples" -1119 print(s) -1120 print("Asserted run parameters:\n T:", tmax, "kappa:", kappa, "csw:", csw, "dF:", dF, "zF:", zF, "bnd:", bnd) -1121 -1122 # we have the data now... but we need to re format the whole thing and put it into Corr objects. -1123 -1124 compObs = [] -1125 -1126 for t in range(int(len(tmpcorr) / 2)): -1127 compObs.append(CObs(Obs([realsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs), -1128 Obs([imagsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs))) -1129 -1130 if len(compObs) == 1: -1131 return compObs[0] -1132 else: -1133 return Corr(compObs) +1021 Raises +1022 ------ +1023 FileNotFoundError +1024 If no files matching the specified prefix and quark combination extension are found in the specified directory. +1025 IOError +1026 If there is an error reading a file. +1027 struct.error +1028 If there is an error unpacking binary data. +1029 """ +1030 +1031 found = [] +1032 files = [] +1033 names = [] +1034 for (dirpath, dirnames, filenames) in os.walk(path + "/"): +1035 found.extend(filenames) +1036 break +1037 +1038 for f in found: +1039 if fnmatch.fnmatch(f, prefix + "*.ms5_xsf_" + qc + ".dat"): +1040 files.append(f) +1041 if not sep == "": +1042 names.append(prefix + "|r" + f.split(".")[0].split(sep)[1]) +1043 else: +1044 names.append(prefix) +1045 files = sorted(files) +1046 +1047 if "names" in kwargs: +1048 names = kwargs.get("names") +1049 else: +1050 names = sorted(names) +1051 +1052 cnfgs = [] +1053 realsamples = [] +1054 imagsamples = [] +1055 repnum = 0 +1056 for file in files: +1057 with open(path + "/" + file, "rb") as fp: +1058 +1059 t = fp.read(8) +1060 kappa = struct.unpack('d', t)[0] +1061 t = fp.read(8) +1062 csw = struct.unpack('d', t)[0] +1063 t = fp.read(8) +1064 dF = struct.unpack('d', t)[0] +1065 t = fp.read(8) +1066 zF = struct.unpack('d', t)[0] +1067 +1068 t = fp.read(4) +1069 tmax = struct.unpack('i', t)[0] +1070 t = fp.read(4) +1071 bnd = struct.unpack('i', t)[0] +1072 +1073 placesBI = ["gS", "gP", +1074 "gA", "gV", +1075 "gVt", "lA", +1076 "lV", "lVt", +1077 "lT", "lTt"] +1078 placesBB = ["g1", "l1"] +1079 +1080 # the chunks have the following structure: +1081 # confignumber, 10x timedependent complex correlators as doubles, 2x timeindependent complex correlators as doubles +1082 +1083 chunksize = 4 + (8 * 2 * tmax * 10) + (8 * 2 * 2) +1084 packstr = '=i' + ('d' * 2 * tmax * 10) + ('d' * 2 * 2) +1085 cnfgs.append([]) +1086 realsamples.append([]) +1087 imagsamples.append([]) +1088 for t in range(tmax): +1089 realsamples[repnum].append([]) +1090 imagsamples[repnum].append([]) +1091 +1092 while True: +1093 cnfgt = fp.read(chunksize) +1094 if not cnfgt: +1095 break +1096 asascii = struct.unpack(packstr, cnfgt) +1097 cnfg = asascii[0] +1098 cnfgs[repnum].append(cnfg) +1099 +1100 if corr not in placesBB: +1101 tmpcorr = asascii[1 + 2 * tmax * placesBI.index(corr):1 + 2 * tmax * placesBI.index(corr) + 2 * tmax] +1102 else: +1103 tmpcorr = asascii[1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr):1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr) + 2] +1104 +1105 corrres = [[], []] +1106 for i in range(len(tmpcorr)): +1107 corrres[i % 2].append(tmpcorr[i]) +1108 for t in range(int(len(tmpcorr) / 2)): +1109 realsamples[repnum][t].append(corrres[0][t]) +1110 for t in range(int(len(tmpcorr) / 2)): +1111 imagsamples[repnum][t].append(corrres[1][t]) +1112 repnum += 1 +1113 +1114 s = "Read correlator " + corr + " from " + str(repnum) + " replika with " + str(len(realsamples[0][t])) +1115 for rep in range(1, repnum): +1116 s += ", " + str(len(realsamples[rep][t])) +1117 s += " samples" +1118 print(s) +1119 print("Asserted run parameters:\n T:", tmax, "kappa:", kappa, "csw:", csw, "dF:", dF, "zF:", zF, "bnd:", bnd) +1120 +1121 # we have the data now... but we need to re format the whole thing and put it into Corr objects. +1122 +1123 compObs = [] +1124 +1125 for t in range(int(len(tmpcorr) / 2)): +1126 compObs.append(CObs(Obs([realsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs), +1127 Obs([imagsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs))) +1128 +1129 if len(compObs) == 1: +1130 return compObs[0] +1131 else: +1132 return Corr(compObs)
@@ -2076,8 +2075,7 @@ postfix of the file to read, e.g. '.gfms.dat' for openQCD-files 933 proj_qtop.append(np.array([1 if round(qtop.r_values[n] + q) == target else 0 for q in qtop.deltas[n]])) 934 935 reto = Obs(proj_qtop, qtop.names, idl=[qtop.idl[name] for name in qtop.names]) -936 reto.is_merged = qtop.is_merged -937 return reto +936 return reto @@ -2106,57 +2104,57 @@ Specifies the topological sector to be reweighted to (default 0) -
940def read_qtop_sector(path, prefix, c, target=0, **kwargs):
-941    """Constructs reweighting factors to a specified topological sector.
-942
-943    Parameters
-944    ----------
-945    path : str
-946        path of the measurement files
-947    prefix : str
-948        prefix of the measurement files, e.g. <prefix>_id0_r0.ms.dat
-949    c : double
-950        Smearing radius in units of the lattice extent, c = sqrt(8 t0) / L
-951    target : int
-952        Specifies the topological sector to be reweighted to (default 0)
-953    dtr_cnfg : int
-954        (optional) parameter that specifies the number of trajectories
-955        between two configs.
-956        if it is not set, the distance between two measurements
-957        in the file is assumed to be the distance between two configurations.
-958    steps : int
-959        (optional) Distance between two configurations in units of trajectories /
-960         cycles. Assumed to be the distance between two measurements * dtr_cnfg if not given
-961    version : str
-962        version string of the openQCD (sfqcd) version used to create
-963        the ensemble. Default is 2.0. May also be set to sfqcd.
-964    L : int
-965        spatial length of the lattice in L/a.
-966        HAS to be set if version != sfqcd, since openQCD does not provide
-967        this in the header
-968    r_start : list
-969        offset of the first ensemble, making it easier to match
-970        later on with other Obs
-971    r_stop : list
-972        last configurations that need to be read (per replicum)
-973    files : list
-974        specify the exact files that need to be read
-975        from path, practical if e.g. only one replicum is needed
-976    names : list
-977        Alternative labeling for replicas/ensembles.
-978        Has to have the appropriate length
-979    Zeuthen_flow : bool
-980        (optional) If True, the Zeuthen flow is used for Qtop. Only possible
-981        for version=='sfqcd' If False, the Wilson flow is used.
-982    """
-983
-984    if not isinstance(target, int):
-985        raise Exception("'target' has to be an integer.")
-986
-987    kwargs['integer_charge'] = True
-988    qtop = read_qtop(path, prefix, c, **kwargs)
-989
-990    return qtop_projection(qtop, target=target)
+            
939def read_qtop_sector(path, prefix, c, target=0, **kwargs):
+940    """Constructs reweighting factors to a specified topological sector.
+941
+942    Parameters
+943    ----------
+944    path : str
+945        path of the measurement files
+946    prefix : str
+947        prefix of the measurement files, e.g. <prefix>_id0_r0.ms.dat
+948    c : double
+949        Smearing radius in units of the lattice extent, c = sqrt(8 t0) / L
+950    target : int
+951        Specifies the topological sector to be reweighted to (default 0)
+952    dtr_cnfg : int
+953        (optional) parameter that specifies the number of trajectories
+954        between two configs.
+955        if it is not set, the distance between two measurements
+956        in the file is assumed to be the distance between two configurations.
+957    steps : int
+958        (optional) Distance between two configurations in units of trajectories /
+959         cycles. Assumed to be the distance between two measurements * dtr_cnfg if not given
+960    version : str
+961        version string of the openQCD (sfqcd) version used to create
+962        the ensemble. Default is 2.0. May also be set to sfqcd.
+963    L : int
+964        spatial length of the lattice in L/a.
+965        HAS to be set if version != sfqcd, since openQCD does not provide
+966        this in the header
+967    r_start : list
+968        offset of the first ensemble, making it easier to match
+969        later on with other Obs
+970    r_stop : list
+971        last configurations that need to be read (per replicum)
+972    files : list
+973        specify the exact files that need to be read
+974        from path, practical if e.g. only one replicum is needed
+975    names : list
+976        Alternative labeling for replicas/ensembles.
+977        Has to have the appropriate length
+978    Zeuthen_flow : bool
+979        (optional) If True, the Zeuthen flow is used for Qtop. Only possible
+980        for version=='sfqcd' If False, the Wilson flow is used.
+981    """
+982
+983    if not isinstance(target, int):
+984        raise Exception("'target' has to be an integer.")
+985
+986    kwargs['integer_charge'] = True
+987    qtop = read_qtop(path, prefix, c, **kwargs)
+988
+989    return qtop_projection(qtop, target=target)
 
@@ -2218,148 +2216,148 @@ for version=='sfqcd' If False, the Wilson flow is used.
-
 993def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
- 994    """
- 995    Read data from files in the specified directory with the specified prefix and quark combination extension, and return a `Corr` object containing the data.
- 996
- 997    Parameters
- 998    ----------
- 999    path : str
-1000        The directory to search for the files in.
-1001    prefix : str
-1002        The prefix to match the files against.
-1003    qc : str
-1004        The quark combination extension to match the files against.
-1005    corr : str
-1006        The correlator to extract data for.
-1007    sep : str, optional
-1008        The separator to use when parsing the replika names.
-1009    **kwargs
-1010        Additional keyword arguments. The following keyword arguments are recognized:
-1011
-1012        - names (List[str]): A list of names to use for the replicas.
-1013
-1014    Returns
-1015    -------
-1016    Corr
-1017        A complex valued `Corr` object containing the data read from the files. In case of boudary to bulk correlators.
-1018    or
-1019    CObs
-1020        A complex valued `CObs` object containing the data read from the files. In case of boudary to boundary correlators.
+            
 992def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
+ 993    """
+ 994    Read data from files in the specified directory with the specified prefix and quark combination extension, and return a `Corr` object containing the data.
+ 995
+ 996    Parameters
+ 997    ----------
+ 998    path : str
+ 999        The directory to search for the files in.
+1000    prefix : str
+1001        The prefix to match the files against.
+1002    qc : str
+1003        The quark combination extension to match the files against.
+1004    corr : str
+1005        The correlator to extract data for.
+1006    sep : str, optional
+1007        The separator to use when parsing the replika names.
+1008    **kwargs
+1009        Additional keyword arguments. The following keyword arguments are recognized:
+1010
+1011        - names (List[str]): A list of names to use for the replicas.
+1012
+1013    Returns
+1014    -------
+1015    Corr
+1016        A complex valued `Corr` object containing the data read from the files. In case of boudary to bulk correlators.
+1017    or
+1018    CObs
+1019        A complex valued `CObs` object containing the data read from the files. In case of boudary to boundary correlators.
+1020
 1021
-1022
-1023    Raises
-1024    ------
-1025    FileNotFoundError
-1026        If no files matching the specified prefix and quark combination extension are found in the specified directory.
-1027    IOError
-1028        If there is an error reading a file.
-1029    struct.error
-1030        If there is an error unpacking binary data.
-1031    """
-1032
-1033    found = []
-1034    files = []
-1035    names = []
-1036    for (dirpath, dirnames, filenames) in os.walk(path + "/"):
-1037        found.extend(filenames)
-1038        break
-1039
-1040    for f in found:
-1041        if fnmatch.fnmatch(f, prefix + "*.ms5_xsf_" + qc + ".dat"):
-1042            files.append(f)
-1043            if not sep == "":
-1044                names.append(prefix + "|r" + f.split(".")[0].split(sep)[1])
-1045            else:
-1046                names.append(prefix)
-1047    files = sorted(files)
-1048
-1049    if "names" in kwargs:
-1050        names = kwargs.get("names")
-1051    else:
-1052        names = sorted(names)
-1053
-1054    cnfgs = []
-1055    realsamples = []
-1056    imagsamples = []
-1057    repnum = 0
-1058    for file in files:
-1059        with open(path + "/" + file, "rb") as fp:
-1060
-1061            t = fp.read(8)
-1062            kappa = struct.unpack('d', t)[0]
-1063            t = fp.read(8)
-1064            csw = struct.unpack('d', t)[0]
-1065            t = fp.read(8)
-1066            dF = struct.unpack('d', t)[0]
-1067            t = fp.read(8)
-1068            zF = struct.unpack('d', t)[0]
-1069
-1070            t = fp.read(4)
-1071            tmax = struct.unpack('i', t)[0]
-1072            t = fp.read(4)
-1073            bnd = struct.unpack('i', t)[0]
-1074
-1075            placesBI = ["gS", "gP",
-1076                        "gA", "gV",
-1077                        "gVt", "lA",
-1078                        "lV", "lVt",
-1079                        "lT", "lTt"]
-1080            placesBB = ["g1", "l1"]
-1081
-1082            # the chunks have the following structure:
-1083            # confignumber, 10x timedependent complex correlators as doubles, 2x timeindependent complex correlators as doubles
-1084
-1085            chunksize = 4 + (8 * 2 * tmax * 10) + (8 * 2 * 2)
-1086            packstr = '=i' + ('d' * 2 * tmax * 10) + ('d' * 2 * 2)
-1087            cnfgs.append([])
-1088            realsamples.append([])
-1089            imagsamples.append([])
-1090            for t in range(tmax):
-1091                realsamples[repnum].append([])
-1092                imagsamples[repnum].append([])
-1093
-1094            while True:
-1095                cnfgt = fp.read(chunksize)
-1096                if not cnfgt:
-1097                    break
-1098                asascii = struct.unpack(packstr, cnfgt)
-1099                cnfg = asascii[0]
-1100                cnfgs[repnum].append(cnfg)
-1101
-1102                if corr not in placesBB:
-1103                    tmpcorr = asascii[1 + 2 * tmax * placesBI.index(corr):1 + 2 * tmax * placesBI.index(corr) + 2 * tmax]
-1104                else:
-1105                    tmpcorr = asascii[1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr):1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr) + 2]
-1106
-1107                corrres = [[], []]
-1108                for i in range(len(tmpcorr)):
-1109                    corrres[i % 2].append(tmpcorr[i])
-1110                for t in range(int(len(tmpcorr) / 2)):
-1111                    realsamples[repnum][t].append(corrres[0][t])
-1112                for t in range(int(len(tmpcorr) / 2)):
-1113                    imagsamples[repnum][t].append(corrres[1][t])
-1114        repnum += 1
-1115
-1116    s = "Read correlator " + corr + " from " + str(repnum) + " replika with " + str(len(realsamples[0][t]))
-1117    for rep in range(1, repnum):
-1118        s += ", " + str(len(realsamples[rep][t]))
-1119    s += " samples"
-1120    print(s)
-1121    print("Asserted run parameters:\n T:", tmax, "kappa:", kappa, "csw:", csw, "dF:", dF, "zF:", zF, "bnd:", bnd)
-1122
-1123    # we have the data now... but we need to re format the whole thing and put it into Corr objects.
-1124
-1125    compObs = []
-1126
-1127    for t in range(int(len(tmpcorr) / 2)):
-1128        compObs.append(CObs(Obs([realsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs),
-1129                            Obs([imagsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs)))
-1130
-1131    if len(compObs) == 1:
-1132        return compObs[0]
-1133    else:
-1134        return Corr(compObs)
+1022    Raises
+1023    ------
+1024    FileNotFoundError
+1025        If no files matching the specified prefix and quark combination extension are found in the specified directory.
+1026    IOError
+1027        If there is an error reading a file.
+1028    struct.error
+1029        If there is an error unpacking binary data.
+1030    """
+1031
+1032    found = []
+1033    files = []
+1034    names = []
+1035    for (dirpath, dirnames, filenames) in os.walk(path + "/"):
+1036        found.extend(filenames)
+1037        break
+1038
+1039    for f in found:
+1040        if fnmatch.fnmatch(f, prefix + "*.ms5_xsf_" + qc + ".dat"):
+1041            files.append(f)
+1042            if not sep == "":
+1043                names.append(prefix + "|r" + f.split(".")[0].split(sep)[1])
+1044            else:
+1045                names.append(prefix)
+1046    files = sorted(files)
+1047
+1048    if "names" in kwargs:
+1049        names = kwargs.get("names")
+1050    else:
+1051        names = sorted(names)
+1052
+1053    cnfgs = []
+1054    realsamples = []
+1055    imagsamples = []
+1056    repnum = 0
+1057    for file in files:
+1058        with open(path + "/" + file, "rb") as fp:
+1059
+1060            t = fp.read(8)
+1061            kappa = struct.unpack('d', t)[0]
+1062            t = fp.read(8)
+1063            csw = struct.unpack('d', t)[0]
+1064            t = fp.read(8)
+1065            dF = struct.unpack('d', t)[0]
+1066            t = fp.read(8)
+1067            zF = struct.unpack('d', t)[0]
+1068
+1069            t = fp.read(4)
+1070            tmax = struct.unpack('i', t)[0]
+1071            t = fp.read(4)
+1072            bnd = struct.unpack('i', t)[0]
+1073
+1074            placesBI = ["gS", "gP",
+1075                        "gA", "gV",
+1076                        "gVt", "lA",
+1077                        "lV", "lVt",
+1078                        "lT", "lTt"]
+1079            placesBB = ["g1", "l1"]
+1080
+1081            # the chunks have the following structure:
+1082            # confignumber, 10x timedependent complex correlators as doubles, 2x timeindependent complex correlators as doubles
+1083
+1084            chunksize = 4 + (8 * 2 * tmax * 10) + (8 * 2 * 2)
+1085            packstr = '=i' + ('d' * 2 * tmax * 10) + ('d' * 2 * 2)
+1086            cnfgs.append([])
+1087            realsamples.append([])
+1088            imagsamples.append([])
+1089            for t in range(tmax):
+1090                realsamples[repnum].append([])
+1091                imagsamples[repnum].append([])
+1092
+1093            while True:
+1094                cnfgt = fp.read(chunksize)
+1095                if not cnfgt:
+1096                    break
+1097                asascii = struct.unpack(packstr, cnfgt)
+1098                cnfg = asascii[0]
+1099                cnfgs[repnum].append(cnfg)
+1100
+1101                if corr not in placesBB:
+1102                    tmpcorr = asascii[1 + 2 * tmax * placesBI.index(corr):1 + 2 * tmax * placesBI.index(corr) + 2 * tmax]
+1103                else:
+1104                    tmpcorr = asascii[1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr):1 + 2 * tmax * len(placesBI) + 2 * placesBB.index(corr) + 2]
+1105
+1106                corrres = [[], []]
+1107                for i in range(len(tmpcorr)):
+1108                    corrres[i % 2].append(tmpcorr[i])
+1109                for t in range(int(len(tmpcorr) / 2)):
+1110                    realsamples[repnum][t].append(corrres[0][t])
+1111                for t in range(int(len(tmpcorr) / 2)):
+1112                    imagsamples[repnum][t].append(corrres[1][t])
+1113        repnum += 1
+1114
+1115    s = "Read correlator " + corr + " from " + str(repnum) + " replika with " + str(len(realsamples[0][t]))
+1116    for rep in range(1, repnum):
+1117        s += ", " + str(len(realsamples[rep][t]))
+1118    s += " samples"
+1119    print(s)
+1120    print("Asserted run parameters:\n T:", tmax, "kappa:", kappa, "csw:", csw, "dF:", dF, "zF:", zF, "bnd:", bnd)
+1121
+1122    # we have the data now... but we need to re format the whole thing and put it into Corr objects.
+1123
+1124    compObs = []
+1125
+1126    for t in range(int(len(tmpcorr) / 2)):
+1127        compObs.append(CObs(Obs([realsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs),
+1128                            Obs([imagsamples[rep][t] for rep in range(repnum)], names=names, idl=cnfgs)))
+1129
+1130    if len(compObs) == 1:
+1131        return compObs[0]
+1132    else:
+1133        return Corr(compObs)
 
diff --git a/docs/pyerrors/misc.html b/docs/pyerrors/misc.html index 7b214c64..444e13fc 100644 --- a/docs/pyerrors/misc.html +++ b/docs/pyerrors/misc.html @@ -196,7 +196,7 @@
109 for o in ol[1:]: 110 if not isinstance(o, otype): 111 raise Exception("Wrong data type in list.") -112 for attr in ["is_merged", "reweighted", "e_content", "idl"]: +112 for attr in ["reweighted", "e_content", "idl"]: 113 if hasattr(ol[0], attr): 114 if not getattr(ol[0], attr) == getattr(o, attr): 115 raise Exception(f"All Obs in list have to have the same state '{attr}'.") diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 28e60b9f..f8fe5f29 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -256,7 +256,7 @@ 49 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 50 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', 51 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', - 52 'idl', 'is_merged', 'tag', '_covobs', '__dict__'] + 52 'idl', 'tag', '_covobs', '__dict__'] 53 54 S_global = 2.0 55 S_dict = {} @@ -304,1544 +304,1536 @@ 97 98 self._value = 0 99 self.N = 0 - 100 self.is_merged = {} - 101 self.idl = {} - 102 if idl is not None: - 103 for name, idx in sorted(zip(names, idl)): - 104 if isinstance(idx, range): - 105 self.idl[name] = idx - 106 elif isinstance(idx, (list, np.ndarray)): - 107 dc = np.unique(np.diff(idx)) - 108 if np.any(dc < 0): - 109 raise Exception("Unsorted idx for idl[%s]" % (name)) - 110 if len(dc) == 1: - 111 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) - 112 else: - 113 self.idl[name] = list(idx) - 114 else: - 115 raise Exception('incompatible type for idl[%s].' % (name)) - 116 else: - 117 for name, sample in sorted(zip(names, samples)): - 118 self.idl[name] = range(1, len(sample) + 1) - 119 - 120 if kwargs.get("means") is not None: - 121 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): - 122 self.shape[name] = len(self.idl[name]) - 123 self.N += self.shape[name] - 124 self.r_values[name] = mean - 125 self.deltas[name] = sample - 126 else: - 127 for name, sample in sorted(zip(names, samples)): - 128 self.shape[name] = len(self.idl[name]) - 129 self.N += self.shape[name] - 130 if len(sample) != self.shape[name]: - 131 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) - 132 self.r_values[name] = np.mean(sample) - 133 self.deltas[name] = sample - self.r_values[name] - 134 self._value += self.shape[name] * self.r_values[name] - 135 self._value /= self.N - 136 - 137 self._dvalue = 0.0 - 138 self.ddvalue = 0.0 - 139 self.reweighted = False - 140 - 141 self.tag = None - 142 - 143 @property - 144 def value(self): - 145 return self._value - 146 - 147 @property - 148 def dvalue(self): - 149 return self._dvalue - 150 - 151 @property - 152 def e_names(self): - 153 return sorted(set([o.split('|')[0] for o in self.names])) - 154 - 155 @property - 156 def cov_names(self): - 157 return sorted(set([o for o in self.covobs.keys()])) - 158 - 159 @property - 160 def mc_names(self): - 161 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) - 162 - 163 @property - 164 def e_content(self): - 165 res = {} - 166 for e, e_name in enumerate(self.e_names): - 167 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) - 168 if e_name in self.names: - 169 res[e_name].append(e_name) - 170 return res - 171 - 172 @property - 173 def covobs(self): - 174 return self._covobs - 175 - 176 def gamma_method(self, **kwargs): - 177 """Estimate the error and related properties of the Obs. - 178 - 179 Parameters - 180 ---------- - 181 S : float - 182 specifies a custom value for the parameter S (default 2.0). - 183 If set to 0 it is assumed that the data exhibits no - 184 autocorrelation. In this case the error estimates coincides - 185 with the sample standard error. - 186 tau_exp : float - 187 positive value triggers the critical slowing down analysis - 188 (default 0.0). - 189 N_sigma : float - 190 number of standard deviations from zero until the tail is - 191 attached to the autocorrelation function (default 1). - 192 fft : bool - 193 determines whether the fft algorithm is used for the computation - 194 of the autocorrelation function (default True) - 195 """ - 196 - 197 e_content = self.e_content - 198 self.e_dvalue = {} - 199 self.e_ddvalue = {} - 200 self.e_tauint = {} - 201 self.e_dtauint = {} - 202 self.e_windowsize = {} - 203 self.e_n_tauint = {} - 204 self.e_n_dtauint = {} - 205 e_gamma = {} - 206 self.e_rho = {} - 207 self.e_drho = {} - 208 self._dvalue = 0 - 209 self.ddvalue = 0 - 210 - 211 self.S = {} - 212 self.tau_exp = {} - 213 self.N_sigma = {} - 214 - 215 if kwargs.get('fft') is False: - 216 fft = False - 217 else: - 218 fft = True - 219 - 220 def _parse_kwarg(kwarg_name): - 221 if kwarg_name in kwargs: - 222 tmp = kwargs.get(kwarg_name) - 223 if isinstance(tmp, (int, float)): - 224 if tmp < 0: - 225 raise Exception(kwarg_name + ' has to be larger or equal to 0.') - 226 for e, e_name in enumerate(self.e_names): - 227 getattr(self, kwarg_name)[e_name] = tmp - 228 else: - 229 raise TypeError(kwarg_name + ' is not in proper format.') - 230 else: - 231 for e, e_name in enumerate(self.e_names): - 232 if e_name in getattr(Obs, kwarg_name + '_dict'): - 233 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] - 234 else: - 235 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') - 236 - 237 _parse_kwarg('S') - 238 _parse_kwarg('tau_exp') - 239 _parse_kwarg('N_sigma') - 240 - 241 for e, e_name in enumerate(self.mc_names): - 242 r_length = [] - 243 for r_name in e_content[e_name]: - 244 if isinstance(self.idl[r_name], range): - 245 r_length.append(len(self.idl[r_name])) - 246 else: - 247 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) - 248 - 249 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) - 250 w_max = max(r_length) // 2 - 251 e_gamma[e_name] = np.zeros(w_max) - 252 self.e_rho[e_name] = np.zeros(w_max) - 253 self.e_drho[e_name] = np.zeros(w_max) - 254 - 255 for r_name in e_content[e_name]: - 256 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) - 257 - 258 gamma_div = np.zeros(w_max) - 259 for r_name in e_content[e_name]: - 260 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) - 261 gamma_div[gamma_div < 1] = 1.0 - 262 e_gamma[e_name] /= gamma_div[:w_max] - 263 - 264 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero - 265 self.e_tauint[e_name] = 0.5 - 266 self.e_dtauint[e_name] = 0.0 - 267 self.e_dvalue[e_name] = 0.0 - 268 self.e_ddvalue[e_name] = 0.0 - 269 self.e_windowsize[e_name] = 0 - 270 continue - 271 - 272 gaps = [] - 273 for r_name in e_content[e_name]: - 274 if isinstance(self.idl[r_name], range): - 275 gaps.append(1) - 276 else: - 277 gaps.append(np.min(np.diff(self.idl[r_name]))) - 278 - 279 if not np.all([gi == gaps[0] for gi in gaps]): - 280 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) - 281 else: - 282 gapsize = gaps[0] - 283 - 284 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] - 285 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) - 286 # Make sure no entry of tauint is smaller than 0.5 - 287 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps - 288 # hep-lat/0306017 eq. (42) - 289 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) - 290 self.e_n_dtauint[e_name][0] = 0.0 - 291 - 292 def _compute_drho(i): - 293 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] - 294 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) - 295 - 296 _compute_drho(gapsize) - 297 if self.tau_exp[e_name] > 0: - 298 texp = self.tau_exp[e_name] - 299 # Critical slowing down analysis - 300 if w_max // 2 <= 1: - 301 raise Exception("Need at least 8 samples for tau_exp error analysis") - 302 for n in range(gapsize, w_max // 2, gapsize): - 303 _compute_drho(n + gapsize) - 304 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: - 305 # Bias correction hep-lat/0306017 eq. (49) included - 306 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive - 307 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) - 308 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 - 309 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 310 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) - 311 self.e_windowsize[e_name] = n - 312 break - 313 else: - 314 if self.S[e_name] == 0.0: - 315 self.e_tauint[e_name] = 0.5 - 316 self.e_dtauint[e_name] = 0.0 - 317 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) - 318 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) - 319 self.e_windowsize[e_name] = 0 - 320 else: - 321 # Standard automatic windowing procedure - 322 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) - 323 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) - 324 for n in range(1, w_max): - 325 if n < w_max // 2 - 2: - 326 _compute_drho(gapsize * n + gapsize) - 327 if g_w[n - 1] < 0 or n >= w_max - 1: - 328 n *= gapsize - 329 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) - 330 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] - 331 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 332 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) - 333 self.e_windowsize[e_name] = n - 334 break - 335 - 336 self._dvalue += self.e_dvalue[e_name] ** 2 - 337 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 - 338 - 339 for e_name in self.cov_names: - 340 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) - 341 self.e_ddvalue[e_name] = 0 - 342 self._dvalue += self.e_dvalue[e_name]**2 - 343 - 344 self._dvalue = np.sqrt(self._dvalue) - 345 if self._dvalue == 0.0: - 346 self.ddvalue = 0.0 - 347 else: - 348 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue - 349 return - 350 - 351 gm = gamma_method - 352 - 353 def _calc_gamma(self, deltas, idx, shape, w_max, fft): - 354 """Calculate Gamma_{AA} from the deltas, which are defined on idx. - 355 idx is assumed to be a contiguous range (possibly with a stepsize != 1) - 356 - 357 Parameters - 358 ---------- - 359 deltas : list - 360 List of fluctuations - 361 idx : list - 362 List or range of configurations on which the deltas are defined. - 363 shape : int - 364 Number of configurations in idx. - 365 w_max : int - 366 Upper bound for the summation window. - 367 fft : bool - 368 determines whether the fft algorithm is used for the computation - 369 of the autocorrelation function. - 370 """ - 371 gamma = np.zeros(w_max) - 372 deltas = _expand_deltas(deltas, idx, shape) - 373 new_shape = len(deltas) - 374 if fft: - 375 max_gamma = min(new_shape, w_max) - 376 # The padding for the fft has to be even - 377 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 - 378 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] - 379 else: - 380 for n in range(w_max): - 381 if new_shape - n >= 0: - 382 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) - 383 - 384 return gamma - 385 - 386 def details(self, ens_content=True): - 387 """Output detailed properties of the Obs. - 388 - 389 Parameters - 390 ---------- - 391 ens_content : bool - 392 print details about the ensembles and replica if true. - 393 """ - 394 if self.tag is not None: - 395 print("Description:", self.tag) - 396 if not hasattr(self, 'e_dvalue'): - 397 print('Result\t %3.8e' % (self.value)) - 398 else: - 399 if self.value == 0.0: - 400 percentage = np.nan - 401 else: - 402 percentage = np.abs(self._dvalue / self.value) * 100 - 403 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) - 404 if len(self.e_names) > 1: - 405 print(' Ensemble errors:') - 406 e_content = self.e_content - 407 for e_name in self.mc_names: - 408 if isinstance(self.idl[e_content[e_name][0]], range): - 409 gap = self.idl[e_content[e_name][0]].step - 410 else: - 411 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) - 412 - 413 if len(self.e_names) > 1: - 414 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - 415 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) - 416 tau_string += f" in units of {gap} config" - 417 if gap > 1: - 418 tau_string += "s" - 419 if self.tau_exp[e_name] > 0: - 420 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) - 421 else: - 422 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) - 423 print(tau_string) - 424 for e_name in self.cov_names: - 425 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) - 426 if ens_content is True: - 427 if len(self.e_names) == 1: - 428 print(self.N, 'samples in', len(self.e_names), 'ensemble:') - 429 else: - 430 print(self.N, 'samples in', len(self.e_names), 'ensembles:') - 431 my_string_list = [] - 432 for key, value in sorted(self.e_content.items()): - 433 if key not in self.covobs: - 434 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " - 435 if len(value) == 1: - 436 my_string += f': {self.shape[value[0]]} configurations' - 437 if isinstance(self.idl[value[0]], range): - 438 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' - 439 else: - 440 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' - 441 else: - 442 sublist = [] - 443 for v in value: - 444 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " - 445 my_substring += f': {self.shape[v]} configurations' - 446 if isinstance(self.idl[v], range): - 447 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' - 448 else: - 449 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' - 450 sublist.append(my_substring) - 451 - 452 my_string += '\n' + '\n'.join(sublist) - 453 else: - 454 my_string = ' ' + "\u00B7 Covobs '" + key + "' " - 455 my_string_list.append(my_string) - 456 print('\n'.join(my_string_list)) - 457 - 458 def reweight(self, weight): - 459 """Reweight the obs with given rewighting factors. - 460 - 461 Parameters - 462 ---------- - 463 weight : Obs - 464 Reweighting factor. An Observable that has to be defined on a superset of the - 465 configurations in obs[i].idl for all i. - 466 all_configs : bool - 467 if True, the reweighted observables are normalized by the average of - 468 the reweighting factor on all configurations in weight.idl and not - 469 on the configurations in obs[i].idl. Default False. - 470 """ - 471 return reweight(weight, [self])[0] - 472 - 473 def is_zero_within_error(self, sigma=1): - 474 """Checks whether the observable is zero within 'sigma' standard errors. - 475 - 476 Parameters - 477 ---------- - 478 sigma : int - 479 Number of standard errors used for the check. - 480 - 481 Works only properly when the gamma method was run. - 482 """ - 483 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue - 484 - 485 def is_zero(self, atol=1e-10): - 486 """Checks whether the observable is zero within a given tolerance. - 487 - 488 Parameters - 489 ---------- - 490 atol : float - 491 Absolute tolerance (for details see numpy documentation). - 492 """ - 493 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) - 494 - 495 def plot_tauint(self, save=None): - 496 """Plot integrated autocorrelation time for each ensemble. - 497 - 498 Parameters - 499 ---------- - 500 save : str - 501 saves the figure to a file named 'save' if. - 502 """ - 503 if not hasattr(self, 'e_dvalue'): - 504 raise Exception('Run the gamma method first.') - 505 - 506 for e, e_name in enumerate(self.mc_names): - 507 fig = plt.figure() - 508 plt.xlabel(r'$W$') - 509 plt.ylabel(r'$\tau_\mathrm{int}$') - 510 length = int(len(self.e_n_tauint[e_name])) - 511 if self.tau_exp[e_name] > 0: - 512 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] - 513 x_help = np.arange(2 * self.tau_exp[e_name]) - 514 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base - 515 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) - 516 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') - 517 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], - 518 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) - 519 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 520 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) - 521 else: - 522 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) - 523 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 524 - 525 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) - 526 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') - 527 plt.legend() - 528 plt.xlim(-0.5, xmax) - 529 ylim = plt.ylim() - 530 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) - 531 plt.draw() - 532 if save: - 533 fig.savefig(save + "_" + str(e)) - 534 - 535 def plot_rho(self, save=None): - 536 """Plot normalized autocorrelation function time for each ensemble. - 537 - 538 Parameters - 539 ---------- - 540 save : str - 541 saves the figure to a file named 'save' if. - 542 """ - 543 if not hasattr(self, 'e_dvalue'): - 544 raise Exception('Run the gamma method first.') - 545 for e, e_name in enumerate(self.mc_names): - 546 fig = plt.figure() - 547 plt.xlabel('W') - 548 plt.ylabel('rho') - 549 length = int(len(self.e_drho[e_name])) - 550 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) - 551 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') - 552 if self.tau_exp[e_name] > 0: - 553 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], - 554 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) - 555 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 556 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) - 557 else: - 558 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 559 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) - 560 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) - 561 plt.xlim(-0.5, xmax) - 562 plt.draw() - 563 if save: - 564 fig.savefig(save + "_" + str(e)) - 565 - 566 def plot_rep_dist(self): - 567 """Plot replica distribution for each ensemble with more than one replicum.""" - 568 if not hasattr(self, 'e_dvalue'): - 569 raise Exception('Run the gamma method first.') - 570 for e, e_name in enumerate(self.mc_names): - 571 if len(self.e_content[e_name]) == 1: - 572 print('No replica distribution for a single replicum (', e_name, ')') - 573 continue - 574 r_length = [] - 575 sub_r_mean = 0 - 576 for r, r_name in enumerate(self.e_content[e_name]): - 577 r_length.append(len(self.deltas[r_name])) - 578 sub_r_mean += self.shape[r_name] * self.r_values[r_name] - 579 e_N = np.sum(r_length) - 580 sub_r_mean /= e_N - 581 arr = np.zeros(len(self.e_content[e_name])) - 582 for r, r_name in enumerate(self.e_content[e_name]): - 583 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) - 584 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) - 585 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') - 586 plt.draw() - 587 - 588 def plot_history(self, expand=True): - 589 """Plot derived Monte Carlo history for each ensemble - 590 - 591 Parameters - 592 ---------- - 593 expand : bool - 594 show expanded history for irregular Monte Carlo chains (default: True). - 595 """ - 596 for e, e_name in enumerate(self.mc_names): - 597 plt.figure() - 598 r_length = [] - 599 tmp = [] - 600 tmp_expanded = [] - 601 for r, r_name in enumerate(self.e_content[e_name]): - 602 tmp.append(self.deltas[r_name] + self.r_values[r_name]) - 603 if expand: - 604 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) - 605 r_length.append(len(tmp_expanded[-1])) - 606 else: - 607 r_length.append(len(tmp[-1])) - 608 e_N = np.sum(r_length) - 609 x = np.arange(e_N) - 610 y_test = np.concatenate(tmp, axis=0) - 611 if expand: - 612 y = np.concatenate(tmp_expanded, axis=0) - 613 else: - 614 y = y_test - 615 plt.errorbar(x, y, fmt='.', markersize=3) - 616 plt.xlim(-0.5, e_N - 0.5) - 617 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') - 618 plt.draw() - 619 - 620 def plot_piechart(self, save=None): - 621 """Plot piechart which shows the fractional contribution of each - 622 ensemble to the error and returns a dictionary containing the fractions. - 623 - 624 Parameters - 625 ---------- - 626 save : str - 627 saves the figure to a file named 'save' if. - 628 """ - 629 if not hasattr(self, 'e_dvalue'): - 630 raise Exception('Run the gamma method first.') - 631 if np.isclose(0.0, self._dvalue, atol=1e-15): - 632 raise Exception('Error is 0.0') - 633 labels = self.e_names - 634 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 - 635 fig1, ax1 = plt.subplots() - 636 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) - 637 ax1.axis('equal') - 638 plt.draw() - 639 if save: - 640 fig1.savefig(save) - 641 - 642 return dict(zip(self.e_names, sizes)) - 643 - 644 def dump(self, filename, datatype="json.gz", description="", **kwargs): - 645 """Dump the Obs to a file 'name' of chosen format. - 646 - 647 Parameters - 648 ---------- - 649 filename : str - 650 name of the file to be saved. - 651 datatype : str - 652 Format of the exported file. Supported formats include - 653 "json.gz" and "pickle" - 654 description : str - 655 Description for output file, only relevant for json.gz format. - 656 path : str - 657 specifies a custom path for the file (default '.') - 658 """ - 659 if 'path' in kwargs: - 660 file_name = kwargs.get('path') + '/' + filename - 661 else: - 662 file_name = filename - 663 - 664 if datatype == "json.gz": - 665 from .input.json import dump_to_json - 666 dump_to_json([self], file_name, description=description) - 667 elif datatype == "pickle": - 668 with open(file_name + '.p', 'wb') as fb: - 669 pickle.dump(self, fb) - 670 else: - 671 raise Exception("Unknown datatype " + str(datatype)) - 672 - 673 def export_jackknife(self): - 674 """Export jackknife samples from the Obs - 675 - 676 Returns - 677 ------- - 678 numpy.ndarray - 679 Returns a numpy array of length N + 1 where N is the number of samples - 680 for the given ensemble and replicum. The zeroth entry of the array contains - 681 the mean value of the Obs, entries 1 to N contain the N jackknife samples - 682 derived from the Obs. The current implementation only works for observables - 683 defined on exactly one ensemble and replicum. The derived jackknife samples - 684 should agree with samples from a full jackknife analysis up to O(1/N). - 685 """ - 686 - 687 if len(self.names) != 1: - 688 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") - 689 - 690 name = self.names[0] - 691 full_data = self.deltas[name] + self.r_values[name] - 692 n = full_data.size - 693 mean = self.value - 694 tmp_jacks = np.zeros(n + 1) - 695 tmp_jacks[0] = mean - 696 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) - 697 return tmp_jacks - 698 - 699 def __float__(self): - 700 return float(self.value) - 701 - 702 def __repr__(self): - 703 return 'Obs[' + str(self) + ']' - 704 - 705 def __str__(self): - 706 return _format_uncertainty(self.value, self._dvalue) - 707 - 708 def __hash__(self): - 709 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 710 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 711 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 712 hash_tuple += tuple([o.encode() for o in self.names]) - 713 m = hashlib.md5() - 714 [m.update(o) for o in hash_tuple] - 715 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 716 - 717 # Overload comparisons - 718 def __lt__(self, other): - 719 return self.value < other - 720 - 721 def __le__(self, other): - 722 return self.value <= other - 723 - 724 def __gt__(self, other): - 725 return self.value > other - 726 - 727 def __ge__(self, other): - 728 return self.value >= other - 729 - 730 def __eq__(self, other): - 731 return (self - other).is_zero() - 732 - 733 def __ne__(self, other): - 734 return not (self - other).is_zero() - 735 - 736 # Overload math operations - 737 def __add__(self, y): - 738 if isinstance(y, Obs): - 739 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 740 else: - 741 if isinstance(y, np.ndarray): - 742 return np.array([self + o for o in y]) - 743 elif y.__class__.__name__ in ['Corr', 'CObs']: - 744 return NotImplemented - 745 else: - 746 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 747 - 748 def __radd__(self, y): - 749 return self + y - 750 - 751 def __mul__(self, y): - 752 if isinstance(y, Obs): - 753 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 754 else: - 755 if isinstance(y, np.ndarray): - 756 return np.array([self * o for o in y]) - 757 elif isinstance(y, complex): - 758 return CObs(self * y.real, self * y.imag) - 759 elif y.__class__.__name__ in ['Corr', 'CObs']: - 760 return NotImplemented - 761 else: - 762 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 763 - 764 def __rmul__(self, y): - 765 return self * y - 766 - 767 def __sub__(self, y): - 768 if isinstance(y, Obs): - 769 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 770 else: - 771 if isinstance(y, np.ndarray): - 772 return np.array([self - o for o in y]) - 773 elif y.__class__.__name__ in ['Corr', 'CObs']: - 774 return NotImplemented - 775 else: - 776 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 777 - 778 def __rsub__(self, y): - 779 return -1 * (self - y) - 780 - 781 def __pos__(self): - 782 return self - 783 - 784 def __neg__(self): - 785 return -1 * self - 786 - 787 def __truediv__(self, y): - 788 if isinstance(y, Obs): - 789 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 790 else: - 791 if isinstance(y, np.ndarray): - 792 return np.array([self / o for o in y]) - 793 elif y.__class__.__name__ in ['Corr', 'CObs']: - 794 return NotImplemented - 795 else: - 796 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 797 - 798 def __rtruediv__(self, y): - 799 if isinstance(y, Obs): - 800 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 801 else: - 802 if isinstance(y, np.ndarray): - 803 return np.array([o / self for o in y]) - 804 elif y.__class__.__name__ in ['Corr', 'CObs']: - 805 return NotImplemented - 806 else: - 807 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 808 - 809 def __pow__(self, y): - 810 if isinstance(y, Obs): - 811 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 812 else: - 813 return derived_observable(lambda x: x[0] ** y, [self]) - 814 - 815 def __rpow__(self, y): - 816 if isinstance(y, Obs): - 817 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 818 else: - 819 return derived_observable(lambda x: y ** x[0], [self]) - 820 - 821 def __abs__(self): - 822 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 823 - 824 # Overload numpy functions - 825 def sqrt(self): - 826 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 827 - 828 def log(self): - 829 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 830 - 831 def exp(self): - 832 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 833 - 834 def sin(self): - 835 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 836 - 837 def cos(self): - 838 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 839 - 840 def tan(self): - 841 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 842 - 843 def arcsin(self): - 844 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 845 - 846 def arccos(self): - 847 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 848 - 849 def arctan(self): - 850 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 851 - 852 def sinh(self): - 853 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 854 - 855 def cosh(self): - 856 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 857 - 858 def tanh(self): - 859 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 860 - 861 def arcsinh(self): - 862 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 863 - 864 def arccosh(self): - 865 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 866 - 867 def arctanh(self): - 868 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 100 self.idl = {} + 101 if idl is not None: + 102 for name, idx in sorted(zip(names, idl)): + 103 if isinstance(idx, range): + 104 self.idl[name] = idx + 105 elif isinstance(idx, (list, np.ndarray)): + 106 dc = np.unique(np.diff(idx)) + 107 if np.any(dc < 0): + 108 raise Exception("Unsorted idx for idl[%s]" % (name)) + 109 if len(dc) == 1: + 110 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) + 111 else: + 112 self.idl[name] = list(idx) + 113 else: + 114 raise Exception('incompatible type for idl[%s].' % (name)) + 115 else: + 116 for name, sample in sorted(zip(names, samples)): + 117 self.idl[name] = range(1, len(sample) + 1) + 118 + 119 if kwargs.get("means") is not None: + 120 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): + 121 self.shape[name] = len(self.idl[name]) + 122 self.N += self.shape[name] + 123 self.r_values[name] = mean + 124 self.deltas[name] = sample + 125 else: + 126 for name, sample in sorted(zip(names, samples)): + 127 self.shape[name] = len(self.idl[name]) + 128 self.N += self.shape[name] + 129 if len(sample) != self.shape[name]: + 130 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) + 131 self.r_values[name] = np.mean(sample) + 132 self.deltas[name] = sample - self.r_values[name] + 133 self._value += self.shape[name] * self.r_values[name] + 134 self._value /= self.N + 135 + 136 self._dvalue = 0.0 + 137 self.ddvalue = 0.0 + 138 self.reweighted = False + 139 + 140 self.tag = None + 141 + 142 @property + 143 def value(self): + 144 return self._value + 145 + 146 @property + 147 def dvalue(self): + 148 return self._dvalue + 149 + 150 @property + 151 def e_names(self): + 152 return sorted(set([o.split('|')[0] for o in self.names])) + 153 + 154 @property + 155 def cov_names(self): + 156 return sorted(set([o for o in self.covobs.keys()])) + 157 + 158 @property + 159 def mc_names(self): + 160 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) + 161 + 162 @property + 163 def e_content(self): + 164 res = {} + 165 for e, e_name in enumerate(self.e_names): + 166 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) + 167 if e_name in self.names: + 168 res[e_name].append(e_name) + 169 return res + 170 + 171 @property + 172 def covobs(self): + 173 return self._covobs + 174 + 175 def gamma_method(self, **kwargs): + 176 """Estimate the error and related properties of the Obs. + 177 + 178 Parameters + 179 ---------- + 180 S : float + 181 specifies a custom value for the parameter S (default 2.0). + 182 If set to 0 it is assumed that the data exhibits no + 183 autocorrelation. In this case the error estimates coincides + 184 with the sample standard error. + 185 tau_exp : float + 186 positive value triggers the critical slowing down analysis + 187 (default 0.0). + 188 N_sigma : float + 189 number of standard deviations from zero until the tail is + 190 attached to the autocorrelation function (default 1). + 191 fft : bool + 192 determines whether the fft algorithm is used for the computation + 193 of the autocorrelation function (default True) + 194 """ + 195 + 196 e_content = self.e_content + 197 self.e_dvalue = {} + 198 self.e_ddvalue = {} + 199 self.e_tauint = {} + 200 self.e_dtauint = {} + 201 self.e_windowsize = {} + 202 self.e_n_tauint = {} + 203 self.e_n_dtauint = {} + 204 e_gamma = {} + 205 self.e_rho = {} + 206 self.e_drho = {} + 207 self._dvalue = 0 + 208 self.ddvalue = 0 + 209 + 210 self.S = {} + 211 self.tau_exp = {} + 212 self.N_sigma = {} + 213 + 214 if kwargs.get('fft') is False: + 215 fft = False + 216 else: + 217 fft = True + 218 + 219 def _parse_kwarg(kwarg_name): + 220 if kwarg_name in kwargs: + 221 tmp = kwargs.get(kwarg_name) + 222 if isinstance(tmp, (int, float)): + 223 if tmp < 0: + 224 raise Exception(kwarg_name + ' has to be larger or equal to 0.') + 225 for e, e_name in enumerate(self.e_names): + 226 getattr(self, kwarg_name)[e_name] = tmp + 227 else: + 228 raise TypeError(kwarg_name + ' is not in proper format.') + 229 else: + 230 for e, e_name in enumerate(self.e_names): + 231 if e_name in getattr(Obs, kwarg_name + '_dict'): + 232 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] + 233 else: + 234 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') + 235 + 236 _parse_kwarg('S') + 237 _parse_kwarg('tau_exp') + 238 _parse_kwarg('N_sigma') + 239 + 240 for e, e_name in enumerate(self.mc_names): + 241 r_length = [] + 242 for r_name in e_content[e_name]: + 243 if isinstance(self.idl[r_name], range): + 244 r_length.append(len(self.idl[r_name])) + 245 else: + 246 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) + 247 + 248 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) + 249 w_max = max(r_length) // 2 + 250 e_gamma[e_name] = np.zeros(w_max) + 251 self.e_rho[e_name] = np.zeros(w_max) + 252 self.e_drho[e_name] = np.zeros(w_max) + 253 + 254 for r_name in e_content[e_name]: + 255 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) + 256 + 257 gamma_div = np.zeros(w_max) + 258 for r_name in e_content[e_name]: + 259 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) + 260 gamma_div[gamma_div < 1] = 1.0 + 261 e_gamma[e_name] /= gamma_div[:w_max] + 262 + 263 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero + 264 self.e_tauint[e_name] = 0.5 + 265 self.e_dtauint[e_name] = 0.0 + 266 self.e_dvalue[e_name] = 0.0 + 267 self.e_ddvalue[e_name] = 0.0 + 268 self.e_windowsize[e_name] = 0 + 269 continue + 270 + 271 gaps = [] + 272 for r_name in e_content[e_name]: + 273 if isinstance(self.idl[r_name], range): + 274 gaps.append(1) + 275 else: + 276 gaps.append(np.min(np.diff(self.idl[r_name]))) + 277 + 278 if not np.all([gi == gaps[0] for gi in gaps]): + 279 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) + 280 else: + 281 gapsize = gaps[0] + 282 + 283 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] + 284 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) + 285 # Make sure no entry of tauint is smaller than 0.5 + 286 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps + 287 # hep-lat/0306017 eq. (42) + 288 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) + 289 self.e_n_dtauint[e_name][0] = 0.0 + 290 + 291 def _compute_drho(i): + 292 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] + 293 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) + 294 + 295 _compute_drho(gapsize) + 296 if self.tau_exp[e_name] > 0: + 297 texp = self.tau_exp[e_name] + 298 # Critical slowing down analysis + 299 if w_max // 2 <= 1: + 300 raise Exception("Need at least 8 samples for tau_exp error analysis") + 301 for n in range(gapsize, w_max // 2, gapsize): + 302 _compute_drho(n + gapsize) + 303 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: + 304 # Bias correction hep-lat/0306017 eq. (49) included + 305 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive + 306 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) + 307 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 + 308 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 309 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) + 310 self.e_windowsize[e_name] = n + 311 break + 312 else: + 313 if self.S[e_name] == 0.0: + 314 self.e_tauint[e_name] = 0.5 + 315 self.e_dtauint[e_name] = 0.0 + 316 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) + 317 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) + 318 self.e_windowsize[e_name] = 0 + 319 else: + 320 # Standard automatic windowing procedure + 321 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) + 322 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) + 323 for n in range(1, w_max): + 324 if n < w_max // 2 - 2: + 325 _compute_drho(gapsize * n + gapsize) + 326 if g_w[n - 1] < 0 or n >= w_max - 1: + 327 n *= gapsize + 328 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) + 329 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] + 330 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 331 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) + 332 self.e_windowsize[e_name] = n + 333 break + 334 + 335 self._dvalue += self.e_dvalue[e_name] ** 2 + 336 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + 337 + 338 for e_name in self.cov_names: + 339 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) + 340 self.e_ddvalue[e_name] = 0 + 341 self._dvalue += self.e_dvalue[e_name]**2 + 342 + 343 self._dvalue = np.sqrt(self._dvalue) + 344 if self._dvalue == 0.0: + 345 self.ddvalue = 0.0 + 346 else: + 347 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue + 348 return + 349 + 350 gm = gamma_method + 351 + 352 def _calc_gamma(self, deltas, idx, shape, w_max, fft): + 353 """Calculate Gamma_{AA} from the deltas, which are defined on idx. + 354 idx is assumed to be a contiguous range (possibly with a stepsize != 1) + 355 + 356 Parameters + 357 ---------- + 358 deltas : list + 359 List of fluctuations + 360 idx : list + 361 List or range of configurations on which the deltas are defined. + 362 shape : int + 363 Number of configurations in idx. + 364 w_max : int + 365 Upper bound for the summation window. + 366 fft : bool + 367 determines whether the fft algorithm is used for the computation + 368 of the autocorrelation function. + 369 """ + 370 gamma = np.zeros(w_max) + 371 deltas = _expand_deltas(deltas, idx, shape) + 372 new_shape = len(deltas) + 373 if fft: + 374 max_gamma = min(new_shape, w_max) + 375 # The padding for the fft has to be even + 376 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 + 377 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] + 378 else: + 379 for n in range(w_max): + 380 if new_shape - n >= 0: + 381 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) + 382 + 383 return gamma + 384 + 385 def details(self, ens_content=True): + 386 """Output detailed properties of the Obs. + 387 + 388 Parameters + 389 ---------- + 390 ens_content : bool + 391 print details about the ensembles and replica if true. + 392 """ + 393 if self.tag is not None: + 394 print("Description:", self.tag) + 395 if not hasattr(self, 'e_dvalue'): + 396 print('Result\t %3.8e' % (self.value)) + 397 else: + 398 if self.value == 0.0: + 399 percentage = np.nan + 400 else: + 401 percentage = np.abs(self._dvalue / self.value) * 100 + 402 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) + 403 if len(self.e_names) > 1: + 404 print(' Ensemble errors:') + 405 e_content = self.e_content + 406 for e_name in self.mc_names: + 407 if isinstance(self.idl[e_content[e_name][0]], range): + 408 gap = self.idl[e_content[e_name][0]].step + 409 else: + 410 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) + 411 + 412 if len(self.e_names) > 1: + 413 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + 414 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) + 415 tau_string += f" in units of {gap} config" + 416 if gap > 1: + 417 tau_string += "s" + 418 if self.tau_exp[e_name] > 0: + 419 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) + 420 else: + 421 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) + 422 print(tau_string) + 423 for e_name in self.cov_names: + 424 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) + 425 if ens_content is True: + 426 if len(self.e_names) == 1: + 427 print(self.N, 'samples in', len(self.e_names), 'ensemble:') + 428 else: + 429 print(self.N, 'samples in', len(self.e_names), 'ensembles:') + 430 my_string_list = [] + 431 for key, value in sorted(self.e_content.items()): + 432 if key not in self.covobs: + 433 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " + 434 if len(value) == 1: + 435 my_string += f': {self.shape[value[0]]} configurations' + 436 if isinstance(self.idl[value[0]], range): + 437 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' + 438 else: + 439 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' + 440 else: + 441 sublist = [] + 442 for v in value: + 443 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " + 444 my_substring += f': {self.shape[v]} configurations' + 445 if isinstance(self.idl[v], range): + 446 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' + 447 else: + 448 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' + 449 sublist.append(my_substring) + 450 + 451 my_string += '\n' + '\n'.join(sublist) + 452 else: + 453 my_string = ' ' + "\u00B7 Covobs '" + key + "' " + 454 my_string_list.append(my_string) + 455 print('\n'.join(my_string_list)) + 456 + 457 def reweight(self, weight): + 458 """Reweight the obs with given rewighting factors. + 459 + 460 Parameters + 461 ---------- + 462 weight : Obs + 463 Reweighting factor. An Observable that has to be defined on a superset of the + 464 configurations in obs[i].idl for all i. + 465 all_configs : bool + 466 if True, the reweighted observables are normalized by the average of + 467 the reweighting factor on all configurations in weight.idl and not + 468 on the configurations in obs[i].idl. Default False. + 469 """ + 470 return reweight(weight, [self])[0] + 471 + 472 def is_zero_within_error(self, sigma=1): + 473 """Checks whether the observable is zero within 'sigma' standard errors. + 474 + 475 Parameters + 476 ---------- + 477 sigma : int + 478 Number of standard errors used for the check. + 479 + 480 Works only properly when the gamma method was run. + 481 """ + 482 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue + 483 + 484 def is_zero(self, atol=1e-10): + 485 """Checks whether the observable is zero within a given tolerance. + 486 + 487 Parameters + 488 ---------- + 489 atol : float + 490 Absolute tolerance (for details see numpy documentation). + 491 """ + 492 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) + 493 + 494 def plot_tauint(self, save=None): + 495 """Plot integrated autocorrelation time for each ensemble. + 496 + 497 Parameters + 498 ---------- + 499 save : str + 500 saves the figure to a file named 'save' if. + 501 """ + 502 if not hasattr(self, 'e_dvalue'): + 503 raise Exception('Run the gamma method first.') + 504 + 505 for e, e_name in enumerate(self.mc_names): + 506 fig = plt.figure() + 507 plt.xlabel(r'$W$') + 508 plt.ylabel(r'$\tau_\mathrm{int}$') + 509 length = int(len(self.e_n_tauint[e_name])) + 510 if self.tau_exp[e_name] > 0: + 511 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] + 512 x_help = np.arange(2 * self.tau_exp[e_name]) + 513 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base + 514 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) + 515 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') + 516 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], + 517 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) + 518 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 519 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) + 520 else: + 521 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) + 522 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 523 + 524 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) + 525 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') + 526 plt.legend() + 527 plt.xlim(-0.5, xmax) + 528 ylim = plt.ylim() + 529 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) + 530 plt.draw() + 531 if save: + 532 fig.savefig(save + "_" + str(e)) + 533 + 534 def plot_rho(self, save=None): + 535 """Plot normalized autocorrelation function time for each ensemble. + 536 + 537 Parameters + 538 ---------- + 539 save : str + 540 saves the figure to a file named 'save' if. + 541 """ + 542 if not hasattr(self, 'e_dvalue'): + 543 raise Exception('Run the gamma method first.') + 544 for e, e_name in enumerate(self.mc_names): + 545 fig = plt.figure() + 546 plt.xlabel('W') + 547 plt.ylabel('rho') + 548 length = int(len(self.e_drho[e_name])) + 549 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) + 550 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') + 551 if self.tau_exp[e_name] > 0: + 552 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], + 553 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) + 554 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 555 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) + 556 else: + 557 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 558 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) + 559 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) + 560 plt.xlim(-0.5, xmax) + 561 plt.draw() + 562 if save: + 563 fig.savefig(save + "_" + str(e)) + 564 + 565 def plot_rep_dist(self): + 566 """Plot replica distribution for each ensemble with more than one replicum.""" + 567 if not hasattr(self, 'e_dvalue'): + 568 raise Exception('Run the gamma method first.') + 569 for e, e_name in enumerate(self.mc_names): + 570 if len(self.e_content[e_name]) == 1: + 571 print('No replica distribution for a single replicum (', e_name, ')') + 572 continue + 573 r_length = [] + 574 sub_r_mean = 0 + 575 for r, r_name in enumerate(self.e_content[e_name]): + 576 r_length.append(len(self.deltas[r_name])) + 577 sub_r_mean += self.shape[r_name] * self.r_values[r_name] + 578 e_N = np.sum(r_length) + 579 sub_r_mean /= e_N + 580 arr = np.zeros(len(self.e_content[e_name])) + 581 for r, r_name in enumerate(self.e_content[e_name]): + 582 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) + 583 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) + 584 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') + 585 plt.draw() + 586 + 587 def plot_history(self, expand=True): + 588 """Plot derived Monte Carlo history for each ensemble + 589 + 590 Parameters + 591 ---------- + 592 expand : bool + 593 show expanded history for irregular Monte Carlo chains (default: True). + 594 """ + 595 for e, e_name in enumerate(self.mc_names): + 596 plt.figure() + 597 r_length = [] + 598 tmp = [] + 599 tmp_expanded = [] + 600 for r, r_name in enumerate(self.e_content[e_name]): + 601 tmp.append(self.deltas[r_name] + self.r_values[r_name]) + 602 if expand: + 603 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) + 604 r_length.append(len(tmp_expanded[-1])) + 605 else: + 606 r_length.append(len(tmp[-1])) + 607 e_N = np.sum(r_length) + 608 x = np.arange(e_N) + 609 y_test = np.concatenate(tmp, axis=0) + 610 if expand: + 611 y = np.concatenate(tmp_expanded, axis=0) + 612 else: + 613 y = y_test + 614 plt.errorbar(x, y, fmt='.', markersize=3) + 615 plt.xlim(-0.5, e_N - 0.5) + 616 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') + 617 plt.draw() + 618 + 619 def plot_piechart(self, save=None): + 620 """Plot piechart which shows the fractional contribution of each + 621 ensemble to the error and returns a dictionary containing the fractions. + 622 + 623 Parameters + 624 ---------- + 625 save : str + 626 saves the figure to a file named 'save' if. + 627 """ + 628 if not hasattr(self, 'e_dvalue'): + 629 raise Exception('Run the gamma method first.') + 630 if np.isclose(0.0, self._dvalue, atol=1e-15): + 631 raise Exception('Error is 0.0') + 632 labels = self.e_names + 633 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 + 634 fig1, ax1 = plt.subplots() + 635 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) + 636 ax1.axis('equal') + 637 plt.draw() + 638 if save: + 639 fig1.savefig(save) + 640 + 641 return dict(zip(self.e_names, sizes)) + 642 + 643 def dump(self, filename, datatype="json.gz", description="", **kwargs): + 644 """Dump the Obs to a file 'name' of chosen format. + 645 + 646 Parameters + 647 ---------- + 648 filename : str + 649 name of the file to be saved. + 650 datatype : str + 651 Format of the exported file. Supported formats include + 652 "json.gz" and "pickle" + 653 description : str + 654 Description for output file, only relevant for json.gz format. + 655 path : str + 656 specifies a custom path for the file (default '.') + 657 """ + 658 if 'path' in kwargs: + 659 file_name = kwargs.get('path') + '/' + filename + 660 else: + 661 file_name = filename + 662 + 663 if datatype == "json.gz": + 664 from .input.json import dump_to_json + 665 dump_to_json([self], file_name, description=description) + 666 elif datatype == "pickle": + 667 with open(file_name + '.p', 'wb') as fb: + 668 pickle.dump(self, fb) + 669 else: + 670 raise Exception("Unknown datatype " + str(datatype)) + 671 + 672 def export_jackknife(self): + 673 """Export jackknife samples from the Obs + 674 + 675 Returns + 676 ------- + 677 numpy.ndarray + 678 Returns a numpy array of length N + 1 where N is the number of samples + 679 for the given ensemble and replicum. The zeroth entry of the array contains + 680 the mean value of the Obs, entries 1 to N contain the N jackknife samples + 681 derived from the Obs. The current implementation only works for observables + 682 defined on exactly one ensemble and replicum. The derived jackknife samples + 683 should agree with samples from a full jackknife analysis up to O(1/N). + 684 """ + 685 + 686 if len(self.names) != 1: + 687 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 688 + 689 name = self.names[0] + 690 full_data = self.deltas[name] + self.r_values[name] + 691 n = full_data.size + 692 mean = self.value + 693 tmp_jacks = np.zeros(n + 1) + 694 tmp_jacks[0] = mean + 695 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) + 696 return tmp_jacks + 697 + 698 def __float__(self): + 699 return float(self.value) + 700 + 701 def __repr__(self): + 702 return 'Obs[' + str(self) + ']' + 703 + 704 def __str__(self): + 705 return _format_uncertainty(self.value, self._dvalue) + 706 + 707 def __hash__(self): + 708 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 709 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 710 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 711 hash_tuple += tuple([o.encode() for o in self.names]) + 712 m = hashlib.md5() + 713 [m.update(o) for o in hash_tuple] + 714 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 715 + 716 # Overload comparisons + 717 def __lt__(self, other): + 718 return self.value < other + 719 + 720 def __le__(self, other): + 721 return self.value <= other + 722 + 723 def __gt__(self, other): + 724 return self.value > other + 725 + 726 def __ge__(self, other): + 727 return self.value >= other + 728 + 729 def __eq__(self, other): + 730 return (self - other).is_zero() + 731 + 732 def __ne__(self, other): + 733 return not (self - other).is_zero() + 734 + 735 # Overload math operations + 736 def __add__(self, y): + 737 if isinstance(y, Obs): + 738 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 739 else: + 740 if isinstance(y, np.ndarray): + 741 return np.array([self + o for o in y]) + 742 elif y.__class__.__name__ in ['Corr', 'CObs']: + 743 return NotImplemented + 744 else: + 745 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 746 + 747 def __radd__(self, y): + 748 return self + y + 749 + 750 def __mul__(self, y): + 751 if isinstance(y, Obs): + 752 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 753 else: + 754 if isinstance(y, np.ndarray): + 755 return np.array([self * o for o in y]) + 756 elif isinstance(y, complex): + 757 return CObs(self * y.real, self * y.imag) + 758 elif y.__class__.__name__ in ['Corr', 'CObs']: + 759 return NotImplemented + 760 else: + 761 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 762 + 763 def __rmul__(self, y): + 764 return self * y + 765 + 766 def __sub__(self, y): + 767 if isinstance(y, Obs): + 768 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 769 else: + 770 if isinstance(y, np.ndarray): + 771 return np.array([self - o for o in y]) + 772 elif y.__class__.__name__ in ['Corr', 'CObs']: + 773 return NotImplemented + 774 else: + 775 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 776 + 777 def __rsub__(self, y): + 778 return -1 * (self - y) + 779 + 780 def __pos__(self): + 781 return self + 782 + 783 def __neg__(self): + 784 return -1 * self + 785 + 786 def __truediv__(self, y): + 787 if isinstance(y, Obs): + 788 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 789 else: + 790 if isinstance(y, np.ndarray): + 791 return np.array([self / o for o in y]) + 792 elif y.__class__.__name__ in ['Corr', 'CObs']: + 793 return NotImplemented + 794 else: + 795 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 796 + 797 def __rtruediv__(self, y): + 798 if isinstance(y, Obs): + 799 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 800 else: + 801 if isinstance(y, np.ndarray): + 802 return np.array([o / self for o in y]) + 803 elif y.__class__.__name__ in ['Corr', 'CObs']: + 804 return NotImplemented + 805 else: + 806 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 807 + 808 def __pow__(self, y): + 809 if isinstance(y, Obs): + 810 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 811 else: + 812 return derived_observable(lambda x: x[0] ** y, [self]) + 813 + 814 def __rpow__(self, y): + 815 if isinstance(y, Obs): + 816 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 817 else: + 818 return derived_observable(lambda x: y ** x[0], [self]) + 819 + 820 def __abs__(self): + 821 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 822 + 823 # Overload numpy functions + 824 def sqrt(self): + 825 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 826 + 827 def log(self): + 828 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 829 + 830 def exp(self): + 831 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 832 + 833 def sin(self): + 834 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 835 + 836 def cos(self): + 837 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 838 + 839 def tan(self): + 840 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 841 + 842 def arcsin(self): + 843 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 844 + 845 def arccos(self): + 846 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 847 + 848 def arctan(self): + 849 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 850 + 851 def sinh(self): + 852 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 853 + 854 def cosh(self): + 855 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 856 + 857 def tanh(self): + 858 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 859 + 860 def arcsinh(self): + 861 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 862 + 863 def arccosh(self): + 864 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 865 + 866 def arctanh(self): + 867 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 868 869 - 870 - 871class CObs: - 872 """Class for a complex valued observable.""" - 873 __slots__ = ['_real', '_imag', 'tag'] - 874 - 875 def __init__(self, real, imag=0.0): - 876 self._real = real - 877 self._imag = imag - 878 self.tag = None - 879 - 880 @property - 881 def real(self): - 882 return self._real - 883 - 884 @property - 885 def imag(self): - 886 return self._imag - 887 - 888 def gamma_method(self, **kwargs): - 889 """Executes the gamma_method for the real and the imaginary part.""" - 890 if isinstance(self.real, Obs): - 891 self.real.gamma_method(**kwargs) - 892 if isinstance(self.imag, Obs): - 893 self.imag.gamma_method(**kwargs) - 894 - 895 def is_zero(self): - 896 """Checks whether both real and imaginary part are zero within machine precision.""" - 897 return self.real == 0.0 and self.imag == 0.0 - 898 - 899 def conjugate(self): - 900 return CObs(self.real, -self.imag) - 901 - 902 def __add__(self, other): - 903 if isinstance(other, np.ndarray): - 904 return other + self - 905 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 906 return CObs(self.real + other.real, - 907 self.imag + other.imag) - 908 else: - 909 return CObs(self.real + other, self.imag) - 910 - 911 def __radd__(self, y): - 912 return self + y - 913 - 914 def __sub__(self, other): - 915 if isinstance(other, np.ndarray): - 916 return -1 * (other - self) - 917 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 918 return CObs(self.real - other.real, self.imag - other.imag) - 919 else: - 920 return CObs(self.real - other, self.imag) - 921 - 922 def __rsub__(self, other): - 923 return -1 * (self - other) - 924 - 925 def __mul__(self, other): - 926 if isinstance(other, np.ndarray): - 927 return other * self - 928 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 929 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 930 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 931 [self.real, other.real, self.imag, other.imag], - 932 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 933 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 934 [self.real, other.real, self.imag, other.imag], - 935 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 936 elif getattr(other, 'imag', 0) != 0: - 937 return CObs(self.real * other.real - self.imag * other.imag, - 938 self.imag * other.real + self.real * other.imag) - 939 else: - 940 return CObs(self.real * other.real, self.imag * other.real) - 941 else: - 942 return CObs(self.real * other, self.imag * other) - 943 - 944 def __rmul__(self, other): - 945 return self * other - 946 - 947 def __truediv__(self, other): - 948 if isinstance(other, np.ndarray): - 949 return 1 / (other / self) - 950 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 951 r = other.real ** 2 + other.imag ** 2 - 952 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 953 else: - 954 return CObs(self.real / other, self.imag / other) - 955 - 956 def __rtruediv__(self, other): - 957 r = self.real ** 2 + self.imag ** 2 - 958 if hasattr(other, 'real') and hasattr(other, 'imag'): - 959 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 960 else: - 961 return CObs(self.real * other / r, -self.imag * other / r) - 962 - 963 def __abs__(self): - 964 return np.sqrt(self.real**2 + self.imag**2) - 965 - 966 def __pos__(self): - 967 return self - 968 - 969 def __neg__(self): - 970 return -1 * self - 971 - 972 def __eq__(self, other): - 973 return self.real == other.real and self.imag == other.imag - 974 - 975 def __str__(self): - 976 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' - 977 - 978 def __repr__(self): - 979 return 'CObs[' + str(self) + ']' + 870class CObs: + 871 """Class for a complex valued observable.""" + 872 __slots__ = ['_real', '_imag', 'tag'] + 873 + 874 def __init__(self, real, imag=0.0): + 875 self._real = real + 876 self._imag = imag + 877 self.tag = None + 878 + 879 @property + 880 def real(self): + 881 return self._real + 882 + 883 @property + 884 def imag(self): + 885 return self._imag + 886 + 887 def gamma_method(self, **kwargs): + 888 """Executes the gamma_method for the real and the imaginary part.""" + 889 if isinstance(self.real, Obs): + 890 self.real.gamma_method(**kwargs) + 891 if isinstance(self.imag, Obs): + 892 self.imag.gamma_method(**kwargs) + 893 + 894 def is_zero(self): + 895 """Checks whether both real and imaginary part are zero within machine precision.""" + 896 return self.real == 0.0 and self.imag == 0.0 + 897 + 898 def conjugate(self): + 899 return CObs(self.real, -self.imag) + 900 + 901 def __add__(self, other): + 902 if isinstance(other, np.ndarray): + 903 return other + self + 904 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 905 return CObs(self.real + other.real, + 906 self.imag + other.imag) + 907 else: + 908 return CObs(self.real + other, self.imag) + 909 + 910 def __radd__(self, y): + 911 return self + y + 912 + 913 def __sub__(self, other): + 914 if isinstance(other, np.ndarray): + 915 return -1 * (other - self) + 916 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 917 return CObs(self.real - other.real, self.imag - other.imag) + 918 else: + 919 return CObs(self.real - other, self.imag) + 920 + 921 def __rsub__(self, other): + 922 return -1 * (self - other) + 923 + 924 def __mul__(self, other): + 925 if isinstance(other, np.ndarray): + 926 return other * self + 927 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 928 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 929 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 930 [self.real, other.real, self.imag, other.imag], + 931 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 932 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 933 [self.real, other.real, self.imag, other.imag], + 934 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 935 elif getattr(other, 'imag', 0) != 0: + 936 return CObs(self.real * other.real - self.imag * other.imag, + 937 self.imag * other.real + self.real * other.imag) + 938 else: + 939 return CObs(self.real * other.real, self.imag * other.real) + 940 else: + 941 return CObs(self.real * other, self.imag * other) + 942 + 943 def __rmul__(self, other): + 944 return self * other + 945 + 946 def __truediv__(self, other): + 947 if isinstance(other, np.ndarray): + 948 return 1 / (other / self) + 949 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 950 r = other.real ** 2 + other.imag ** 2 + 951 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 952 else: + 953 return CObs(self.real / other, self.imag / other) + 954 + 955 def __rtruediv__(self, other): + 956 r = self.real ** 2 + self.imag ** 2 + 957 if hasattr(other, 'real') and hasattr(other, 'imag'): + 958 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 959 else: + 960 return CObs(self.real * other / r, -self.imag * other / r) + 961 + 962 def __abs__(self): + 963 return np.sqrt(self.real**2 + self.imag**2) + 964 + 965 def __pos__(self): + 966 return self + 967 + 968 def __neg__(self): + 969 return -1 * self + 970 + 971 def __eq__(self, other): + 972 return self.real == other.real and self.imag == other.imag + 973 + 974 def __str__(self): + 975 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 976 + 977 def __repr__(self): + 978 return 'CObs[' + str(self) + ']' + 979 980 - 981 - 982def _format_uncertainty(value, dvalue): - 983 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" - 984 if dvalue == 0.0: - 985 return str(value) - 986 fexp = np.floor(np.log10(dvalue)) - 987 if fexp < 0.0: - 988 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') - 989 elif fexp == 0.0: - 990 return '{:.1f}({:1.1f})'.format(value, dvalue) - 991 else: - 992 return '{:.0f}({:2.0f})'.format(value, dvalue) + 981def _format_uncertainty(value, dvalue): + 982 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" + 983 if dvalue == 0.0: + 984 return str(value) + 985 fexp = np.floor(np.log10(dvalue)) + 986 if fexp < 0.0: + 987 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') + 988 elif fexp == 0.0: + 989 return '{:.1f}({:1.1f})'.format(value, dvalue) + 990 else: + 991 return '{:.0f}({:2.0f})'.format(value, dvalue) + 992 993 - 994 - 995def _expand_deltas(deltas, idx, shape): - 996 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. - 997 If idx is of type range, the deltas are not changed - 998 - 999 Parameters -1000 ---------- -1001 deltas : list -1002 List of fluctuations -1003 idx : list -1004 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1005 shape : int -1006 Number of configs in idx. -1007 """ -1008 if isinstance(idx, range): -1009 return deltas -1010 else: -1011 ret = np.zeros(idx[-1] - idx[0] + 1) -1012 for i in range(shape): -1013 ret[idx[i] - idx[0]] = deltas[i] -1014 return ret + 994def _expand_deltas(deltas, idx, shape): + 995 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. + 996 If idx is of type range, the deltas are not changed + 997 + 998 Parameters + 999 ---------- +1000 deltas : list +1001 List of fluctuations +1002 idx : list +1003 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1004 shape : int +1005 Number of configs in idx. +1006 """ +1007 if isinstance(idx, range): +1008 return deltas +1009 else: +1010 ret = np.zeros(idx[-1] - idx[0] + 1) +1011 for i in range(shape): +1012 ret[idx[i] - idx[0]] = deltas[i] +1013 return ret +1014 1015 -1016 -1017def _merge_idx(idl): -1018 """Returns the union of all lists in idl as sorted list -1019 -1020 Parameters -1021 ---------- -1022 idl : list -1023 List of lists or ranges. -1024 """ -1025 -1026 # Use groupby to efficiently check whether all elements of idl are identical -1027 try: -1028 g = groupby(idl) -1029 if next(g, True) and not next(g, False): -1030 return idl[0] -1031 except Exception: -1032 pass -1033 -1034 if np.all([type(idx) is range for idx in idl]): -1035 if len(set([idx[0] for idx in idl])) == 1: -1036 idstart = min([idx.start for idx in idl]) -1037 idstop = max([idx.stop for idx in idl]) -1038 idstep = min([idx.step for idx in idl]) -1039 return range(idstart, idstop, idstep) -1040 -1041 return sorted(set().union(*idl)) +1016def _merge_idx(idl): +1017 """Returns the union of all lists in idl as sorted list +1018 +1019 Parameters +1020 ---------- +1021 idl : list +1022 List of lists or ranges. +1023 """ +1024 +1025 # Use groupby to efficiently check whether all elements of idl are identical +1026 try: +1027 g = groupby(idl) +1028 if next(g, True) and not next(g, False): +1029 return idl[0] +1030 except Exception: +1031 pass +1032 +1033 if np.all([type(idx) is range for idx in idl]): +1034 if len(set([idx[0] for idx in idl])) == 1: +1035 idstart = min([idx.start for idx in idl]) +1036 idstop = max([idx.stop for idx in idl]) +1037 idstep = min([idx.step for idx in idl]) +1038 return range(idstart, idstop, idstep) +1039 +1040 return sorted(set().union(*idl)) +1041 1042 -1043 -1044def _intersection_idx(idl): -1045 """Returns the intersection of all lists in idl as sorted list -1046 -1047 Parameters -1048 ---------- -1049 idl : list -1050 List of lists or ranges. -1051 """ -1052 -1053 def _lcm(*args): -1054 """Returns the lowest common multiple of args. -1055 -1056 From python 3.9 onwards the math library contains an lcm function.""" -1057 return reduce(lambda a, b: a * b // gcd(a, b), args) -1058 -1059 # Use groupby to efficiently check whether all elements of idl are identical -1060 try: -1061 g = groupby(idl) -1062 if next(g, True) and not next(g, False): -1063 return idl[0] -1064 except Exception: -1065 pass -1066 -1067 if np.all([type(idx) is range for idx in idl]): -1068 if len(set([idx[0] for idx in idl])) == 1: -1069 idstart = max([idx.start for idx in idl]) -1070 idstop = min([idx.stop for idx in idl]) -1071 idstep = _lcm(*[idx.step for idx in idl]) -1072 return range(idstart, idstop, idstep) -1073 -1074 return sorted(set.intersection(*[set(o) for o in idl])) +1043def _intersection_idx(idl): +1044 """Returns the intersection of all lists in idl as sorted list +1045 +1046 Parameters +1047 ---------- +1048 idl : list +1049 List of lists or ranges. +1050 """ +1051 +1052 def _lcm(*args): +1053 """Returns the lowest common multiple of args. +1054 +1055 From python 3.9 onwards the math library contains an lcm function.""" +1056 return reduce(lambda a, b: a * b // gcd(a, b), args) +1057 +1058 # Use groupby to efficiently check whether all elements of idl are identical +1059 try: +1060 g = groupby(idl) +1061 if next(g, True) and not next(g, False): +1062 return idl[0] +1063 except Exception: +1064 pass +1065 +1066 if np.all([type(idx) is range for idx in idl]): +1067 if len(set([idx[0] for idx in idl])) == 1: +1068 idstart = max([idx.start for idx in idl]) +1069 idstop = min([idx.stop for idx in idl]) +1070 idstep = _lcm(*[idx.step for idx in idl]) +1071 return range(idstart, idstop, idstep) +1072 +1073 return sorted(set.intersection(*[set(o) for o in idl])) +1074 1075 -1076 -1077def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1078 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1079 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1080 common divisor of the step sizes is used as new step size. -1081 -1082 Parameters -1083 ---------- -1084 deltas : list -1085 List of fluctuations -1086 idx : list -1087 List or range of configs on which the deltas are defined. -1088 Has to be a subset of new_idx and has to be sorted in ascending order. -1089 shape : list -1090 Number of configs in idx. -1091 new_idx : list -1092 List of configs that defines the new range, has to be sorted in ascending order. -1093 """ -1094 -1095 if type(idx) is range and type(new_idx) is range: -1096 if idx == new_idx: -1097 return deltas -1098 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1099 for i in range(shape): -1100 ret[idx[i] - new_idx[0]] = deltas[i] -1101 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1076def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1077 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1078 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1079 common divisor of the step sizes is used as new step size. +1080 +1081 Parameters +1082 ---------- +1083 deltas : list +1084 List of fluctuations +1085 idx : list +1086 List or range of configs on which the deltas are defined. +1087 Has to be a subset of new_idx and has to be sorted in ascending order. +1088 shape : list +1089 Number of configs in idx. +1090 new_idx : list +1091 List of configs that defines the new range, has to be sorted in ascending order. +1092 """ +1093 +1094 if type(idx) is range and type(new_idx) is range: +1095 if idx == new_idx: +1096 return deltas +1097 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1098 for i in range(shape): +1099 ret[idx[i] - new_idx[0]] = deltas[i] +1100 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1101 1102 -1103 -1104def derived_observable(func, data, array_mode=False, **kwargs): -1105 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1106 -1107 Parameters -1108 ---------- -1109 func : object -1110 arbitrary function of the form func(data, **kwargs). For the -1111 automatic differentiation to work, all numpy functions have to have -1112 the autograd wrapper (use 'import autograd.numpy as anp'). -1113 data : list -1114 list of Obs, e.g. [obs1, obs2, obs3]. -1115 num_grad : bool -1116 if True, numerical derivatives are used instead of autograd -1117 (default False). To control the numerical differentiation the -1118 kwargs of numdifftools.step_generators.MaxStepGenerator -1119 can be used. -1120 man_grad : list -1121 manually supply a list or an array which contains the jacobian -1122 of func. Use cautiously, supplying the wrong derivative will -1123 not be intercepted. -1124 -1125 Notes -1126 ----- -1127 For simple mathematical operations it can be practical to use anonymous -1128 functions. For the ratio of two observables one can e.g. use -1129 -1130 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1131 """ -1132 -1133 data = np.asarray(data) -1134 raveled_data = data.ravel() -1135 -1136 # Workaround for matrix operations containing non Obs data -1137 if not all(isinstance(x, Obs) for x in raveled_data): -1138 for i in range(len(raveled_data)): -1139 if isinstance(raveled_data[i], (int, float)): -1140 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1141 -1142 allcov = {} -1143 for o in raveled_data: -1144 for name in o.cov_names: -1145 if name in allcov: -1146 if not np.allclose(allcov[name], o.covobs[name].cov): -1147 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1148 else: -1149 allcov[name] = o.covobs[name].cov -1150 -1151 n_obs = len(raveled_data) -1152 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1153 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1154 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1155 -1156 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} -1157 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 -1158 -1159 if data.ndim == 1: -1160 values = np.array([o.value for o in data]) -1161 else: -1162 values = np.vectorize(lambda x: x.value)(data) +1103def derived_observable(func, data, array_mode=False, **kwargs): +1104 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1105 +1106 Parameters +1107 ---------- +1108 func : object +1109 arbitrary function of the form func(data, **kwargs). For the +1110 automatic differentiation to work, all numpy functions have to have +1111 the autograd wrapper (use 'import autograd.numpy as anp'). +1112 data : list +1113 list of Obs, e.g. [obs1, obs2, obs3]. +1114 num_grad : bool +1115 if True, numerical derivatives are used instead of autograd +1116 (default False). To control the numerical differentiation the +1117 kwargs of numdifftools.step_generators.MaxStepGenerator +1118 can be used. +1119 man_grad : list +1120 manually supply a list or an array which contains the jacobian +1121 of func. Use cautiously, supplying the wrong derivative will +1122 not be intercepted. +1123 +1124 Notes +1125 ----- +1126 For simple mathematical operations it can be practical to use anonymous +1127 functions. For the ratio of two observables one can e.g. use +1128 +1129 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1130 """ +1131 +1132 data = np.asarray(data) +1133 raveled_data = data.ravel() +1134 +1135 # Workaround for matrix operations containing non Obs data +1136 if not all(isinstance(x, Obs) for x in raveled_data): +1137 for i in range(len(raveled_data)): +1138 if isinstance(raveled_data[i], (int, float)): +1139 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1140 +1141 allcov = {} +1142 for o in raveled_data: +1143 for name in o.cov_names: +1144 if name in allcov: +1145 if not np.allclose(allcov[name], o.covobs[name].cov): +1146 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1147 else: +1148 allcov[name] = o.covobs[name].cov +1149 +1150 n_obs = len(raveled_data) +1151 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1152 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1153 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1154 +1155 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1156 +1157 if data.ndim == 1: +1158 values = np.array([o.value for o in data]) +1159 else: +1160 values = np.vectorize(lambda x: x.value)(data) +1161 +1162 new_values = func(values, **kwargs) 1163 -1164 new_values = func(values, **kwargs) +1164 multi = int(isinstance(new_values, np.ndarray)) 1165 -1166 multi = int(isinstance(new_values, np.ndarray)) -1167 -1168 new_r_values = {} -1169 new_idl_d = {} -1170 for name in new_sample_names: -1171 idl = [] -1172 tmp_values = np.zeros(n_obs) -1173 for i, item in enumerate(raveled_data): -1174 tmp_values[i] = item.r_values.get(name, item.value) -1175 tmp_idl = item.idl.get(name) -1176 if tmp_idl is not None: -1177 idl.append(tmp_idl) -1178 if multi > 0: -1179 tmp_values = np.array(tmp_values).reshape(data.shape) -1180 new_r_values[name] = func(tmp_values, **kwargs) -1181 new_idl_d[name] = _merge_idx(idl) -1182 if not is_merged[name]: -1183 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) -1184 -1185 if 'man_grad' in kwargs: -1186 deriv = np.asarray(kwargs.get('man_grad')) -1187 if new_values.shape + data.shape != deriv.shape: -1188 raise Exception('Manual derivative does not have correct shape.') -1189 elif kwargs.get('num_grad') is True: -1190 if multi > 0: -1191 raise Exception('Multi mode currently not supported for numerical derivative') -1192 options = { -1193 'base_step': 0.1, -1194 'step_ratio': 2.5} -1195 for key in options.keys(): -1196 kwarg = kwargs.get(key) -1197 if kwarg is not None: -1198 options[key] = kwarg -1199 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1200 if tmp_df.size == 1: -1201 deriv = np.array([tmp_df.real]) -1202 else: -1203 deriv = tmp_df.real -1204 else: -1205 deriv = jacobian(func)(values, **kwargs) +1166 new_r_values = {} +1167 new_idl_d = {} +1168 for name in new_sample_names: +1169 idl = [] +1170 tmp_values = np.zeros(n_obs) +1171 for i, item in enumerate(raveled_data): +1172 tmp_values[i] = item.r_values.get(name, item.value) +1173 tmp_idl = item.idl.get(name) +1174 if tmp_idl is not None: +1175 idl.append(tmp_idl) +1176 if multi > 0: +1177 tmp_values = np.array(tmp_values).reshape(data.shape) +1178 new_r_values[name] = func(tmp_values, **kwargs) +1179 new_idl_d[name] = _merge_idx(idl) +1180 +1181 if 'man_grad' in kwargs: +1182 deriv = np.asarray(kwargs.get('man_grad')) +1183 if new_values.shape + data.shape != deriv.shape: +1184 raise Exception('Manual derivative does not have correct shape.') +1185 elif kwargs.get('num_grad') is True: +1186 if multi > 0: +1187 raise Exception('Multi mode currently not supported for numerical derivative') +1188 options = { +1189 'base_step': 0.1, +1190 'step_ratio': 2.5} +1191 for key in options.keys(): +1192 kwarg = kwargs.get(key) +1193 if kwarg is not None: +1194 options[key] = kwarg +1195 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1196 if tmp_df.size == 1: +1197 deriv = np.array([tmp_df.real]) +1198 else: +1199 deriv = tmp_df.real +1200 else: +1201 deriv = jacobian(func)(values, **kwargs) +1202 +1203 final_result = np.zeros(new_values.shape, dtype=object) +1204 +1205 if array_mode is True: 1206 -1207 final_result = np.zeros(new_values.shape, dtype=object) -1208 -1209 if array_mode is True: +1207 class _Zero_grad(): +1208 def __init__(self, N): +1209 self.grad = np.zeros((N, 1)) 1210 -1211 class _Zero_grad(): -1212 def __init__(self, N): -1213 self.grad = np.zeros((N, 1)) -1214 -1215 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1216 d_extracted = {} -1217 g_extracted = {} -1218 for name in new_sample_names: -1219 d_extracted[name] = [] -1220 ens_length = len(new_idl_d[name]) -1221 for i_dat, dat in enumerate(data): -1222 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1223 for name in new_cov_names: -1224 g_extracted[name] = [] -1225 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1226 for i_dat, dat in enumerate(data): -1227 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1228 -1229 for i_val, new_val in np.ndenumerate(new_values): -1230 new_deltas = {} -1231 new_grad = {} -1232 if array_mode is True: -1233 for name in new_sample_names: -1234 ens_length = d_extracted[name][0].shape[-1] -1235 new_deltas[name] = np.zeros(ens_length) -1236 for i_dat, dat in enumerate(d_extracted[name]): -1237 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1238 for name in new_cov_names: -1239 new_grad[name] = 0 -1240 for i_dat, dat in enumerate(g_extracted[name]): -1241 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1242 else: -1243 for j_obs, obs in np.ndenumerate(data): -1244 for name in obs.names: -1245 if name in obs.cov_names: -1246 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1247 else: -1248 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1249 -1250 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1251 -1252 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1253 raise Exception('The same name has been used for deltas and covobs!') -1254 new_samples = [] -1255 new_means = [] -1256 new_idl = [] -1257 new_names_obs = [] -1258 for name in new_names: -1259 if name not in new_covobs: -1260 new_samples.append(new_deltas[name]) -1261 new_idl.append(new_idl_d[name]) -1262 new_means.append(new_r_values[name][i_val]) -1263 new_names_obs.append(name) -1264 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1265 for name in new_covobs: -1266 final_result[i_val].names.append(name) -1267 final_result[i_val]._covobs = new_covobs -1268 final_result[i_val]._value = new_val -1269 final_result[i_val].is_merged = is_merged -1270 final_result[i_val].reweighted = reweighted +1211 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1212 d_extracted = {} +1213 g_extracted = {} +1214 for name in new_sample_names: +1215 d_extracted[name] = [] +1216 ens_length = len(new_idl_d[name]) +1217 for i_dat, dat in enumerate(data): +1218 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1219 for name in new_cov_names: +1220 g_extracted[name] = [] +1221 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1222 for i_dat, dat in enumerate(data): +1223 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1224 +1225 for i_val, new_val in np.ndenumerate(new_values): +1226 new_deltas = {} +1227 new_grad = {} +1228 if array_mode is True: +1229 for name in new_sample_names: +1230 ens_length = d_extracted[name][0].shape[-1] +1231 new_deltas[name] = np.zeros(ens_length) +1232 for i_dat, dat in enumerate(d_extracted[name]): +1233 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1234 for name in new_cov_names: +1235 new_grad[name] = 0 +1236 for i_dat, dat in enumerate(g_extracted[name]): +1237 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1238 else: +1239 for j_obs, obs in np.ndenumerate(data): +1240 for name in obs.names: +1241 if name in obs.cov_names: +1242 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1243 else: +1244 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1245 +1246 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1247 +1248 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1249 raise Exception('The same name has been used for deltas and covobs!') +1250 new_samples = [] +1251 new_means = [] +1252 new_idl = [] +1253 new_names_obs = [] +1254 for name in new_names: +1255 if name not in new_covobs: +1256 new_samples.append(new_deltas[name]) +1257 new_idl.append(new_idl_d[name]) +1258 new_means.append(new_r_values[name][i_val]) +1259 new_names_obs.append(name) +1260 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1261 for name in new_covobs: +1262 final_result[i_val].names.append(name) +1263 final_result[i_val]._covobs = new_covobs +1264 final_result[i_val]._value = new_val +1265 final_result[i_val].reweighted = reweighted +1266 +1267 if multi == 0: +1268 final_result = final_result.item() +1269 +1270 return final_result 1271 -1272 if multi == 0: -1273 final_result = final_result.item() -1274 -1275 return final_result -1276 -1277 -1278def _reduce_deltas(deltas, idx_old, idx_new): -1279 """Extract deltas defined on idx_old on all configs of idx_new. -1280 -1281 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1282 are ordered in an ascending order. -1283 -1284 Parameters -1285 ---------- -1286 deltas : list -1287 List of fluctuations -1288 idx_old : list -1289 List or range of configs on which the deltas are defined -1290 idx_new : list -1291 List of configs for which we want to extract the deltas. -1292 Has to be a subset of idx_old. -1293 """ -1294 if not len(deltas) == len(idx_old): -1295 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1296 if type(idx_old) is range and type(idx_new) is range: -1297 if idx_old == idx_new: +1272 +1273def _reduce_deltas(deltas, idx_old, idx_new): +1274 """Extract deltas defined on idx_old on all configs of idx_new. +1275 +1276 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1277 are ordered in an ascending order. +1278 +1279 Parameters +1280 ---------- +1281 deltas : list +1282 List of fluctuations +1283 idx_old : list +1284 List or range of configs on which the deltas are defined +1285 idx_new : list +1286 List of configs for which we want to extract the deltas. +1287 Has to be a subset of idx_old. +1288 """ +1289 if not len(deltas) == len(idx_old): +1290 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1291 if type(idx_old) is range and type(idx_new) is range: +1292 if idx_old == idx_new: +1293 return deltas +1294 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical +1295 try: +1296 g = groupby([idx_old, idx_new]) +1297 if next(g, True) and not next(g, False): 1298 return deltas -1299 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical -1300 try: -1301 g = groupby([idx_old, idx_new]) -1302 if next(g, True) and not next(g, False): -1303 return deltas -1304 except Exception: -1305 pass -1306 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1307 if len(indices) < len(idx_new): -1308 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1309 return np.array(deltas)[indices] -1310 -1311 -1312def reweight(weight, obs, **kwargs): -1313 """Reweight a list of observables. -1314 -1315 Parameters -1316 ---------- -1317 weight : Obs -1318 Reweighting factor. An Observable that has to be defined on a superset of the -1319 configurations in obs[i].idl for all i. -1320 obs : list -1321 list of Obs, e.g. [obs1, obs2, obs3]. -1322 all_configs : bool -1323 if True, the reweighted observables are normalized by the average of -1324 the reweighting factor on all configurations in weight.idl and not -1325 on the configurations in obs[i].idl. Default False. -1326 """ -1327 result = [] -1328 for i in range(len(obs)): -1329 if len(obs[i].cov_names): -1330 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1331 if not set(obs[i].names).issubset(weight.names): -1332 raise Exception('Error: Ensembles do not fit') -1333 for name in obs[i].names: -1334 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1335 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1336 new_samples = [] -1337 w_deltas = {} -1338 for name in sorted(obs[i].names): -1339 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1340 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1341 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1299 except Exception: +1300 pass +1301 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1302 if len(indices) < len(idx_new): +1303 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1304 return np.array(deltas)[indices] +1305 +1306 +1307def reweight(weight, obs, **kwargs): +1308 """Reweight a list of observables. +1309 +1310 Parameters +1311 ---------- +1312 weight : Obs +1313 Reweighting factor. An Observable that has to be defined on a superset of the +1314 configurations in obs[i].idl for all i. +1315 obs : list +1316 list of Obs, e.g. [obs1, obs2, obs3]. +1317 all_configs : bool +1318 if True, the reweighted observables are normalized by the average of +1319 the reweighting factor on all configurations in weight.idl and not +1320 on the configurations in obs[i].idl. Default False. +1321 """ +1322 result = [] +1323 for i in range(len(obs)): +1324 if len(obs[i].cov_names): +1325 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1326 if not set(obs[i].names).issubset(weight.names): +1327 raise Exception('Error: Ensembles do not fit') +1328 for name in obs[i].names: +1329 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1330 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1331 new_samples = [] +1332 w_deltas = {} +1333 for name in sorted(obs[i].names): +1334 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1335 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1336 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1337 +1338 if kwargs.get('all_configs'): +1339 new_weight = weight +1340 else: +1341 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) 1342 -1343 if kwargs.get('all_configs'): -1344 new_weight = weight -1345 else: -1346 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1343 result.append(tmp_obs / new_weight) +1344 result[-1].reweighted = True +1345 +1346 return result 1347 -1348 result.append(tmp_obs / new_weight) -1349 result[-1].reweighted = True -1350 result[-1].is_merged = obs[i].is_merged +1348 +1349def correlate(obs_a, obs_b): +1350 """Correlate two observables. 1351 -1352 return result -1353 -1354 -1355def correlate(obs_a, obs_b): -1356 """Correlate two observables. -1357 -1358 Parameters -1359 ---------- -1360 obs_a : Obs -1361 First observable -1362 obs_b : Obs -1363 Second observable -1364 -1365 Notes -1366 ----- -1367 Keep in mind to only correlate primary observables which have not been reweighted -1368 yet. The reweighting has to be applied after correlating the observables. -1369 Currently only works if ensembles are identical (this is not strictly necessary). -1370 """ -1371 -1372 if sorted(obs_a.names) != sorted(obs_b.names): -1373 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1374 if len(obs_a.cov_names) or len(obs_b.cov_names): -1375 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1376 for name in obs_a.names: -1377 if obs_a.shape[name] != obs_b.shape[name]: -1378 raise Exception('Shapes of ensemble', name, 'do not fit') -1379 if obs_a.idl[name] != obs_b.idl[name]: -1380 raise Exception('idl of ensemble', name, 'do not fit') -1381 -1382 if obs_a.reweighted is True: -1383 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1384 if obs_b.reweighted is True: -1385 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1352 Parameters +1353 ---------- +1354 obs_a : Obs +1355 First observable +1356 obs_b : Obs +1357 Second observable +1358 +1359 Notes +1360 ----- +1361 Keep in mind to only correlate primary observables which have not been reweighted +1362 yet. The reweighting has to be applied after correlating the observables. +1363 Currently only works if ensembles are identical (this is not strictly necessary). +1364 """ +1365 +1366 if sorted(obs_a.names) != sorted(obs_b.names): +1367 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1368 if len(obs_a.cov_names) or len(obs_b.cov_names): +1369 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1370 for name in obs_a.names: +1371 if obs_a.shape[name] != obs_b.shape[name]: +1372 raise Exception('Shapes of ensemble', name, 'do not fit') +1373 if obs_a.idl[name] != obs_b.idl[name]: +1374 raise Exception('idl of ensemble', name, 'do not fit') +1375 +1376 if obs_a.reweighted is True: +1377 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1378 if obs_b.reweighted is True: +1379 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1380 +1381 new_samples = [] +1382 new_idl = [] +1383 for name in sorted(obs_a.names): +1384 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1385 new_idl.append(obs_a.idl[name]) 1386 -1387 new_samples = [] -1388 new_idl = [] -1389 for name in sorted(obs_a.names): -1390 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1391 new_idl.append(obs_a.idl[name]) -1392 -1393 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1394 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} -1395 o.reweighted = obs_a.reweighted or obs_b.reweighted -1396 return o +1387 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1388 o.reweighted = obs_a.reweighted or obs_b.reweighted +1389 return o +1390 +1391 +1392def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1393 r'''Calculates the error covariance matrix of a set of observables. +1394 +1395 WARNING: This function should be used with care, especially for observables with support on multiple +1396 ensembles with differing autocorrelations. See the notes below for details. 1397 -1398 -1399def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1400 r'''Calculates the error covariance matrix of a set of observables. -1401 -1402 WARNING: This function should be used with care, especially for observables with support on multiple -1403 ensembles with differing autocorrelations. See the notes below for details. -1404 -1405 The gamma method has to be applied first to all observables. -1406 -1407 Parameters -1408 ---------- -1409 obs : list or numpy.ndarray -1410 List or one dimensional array of Obs -1411 visualize : bool -1412 If True plots the corresponding normalized correlation matrix (default False). -1413 correlation : bool -1414 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1415 smooth : None or int -1416 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1417 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1418 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1419 small ones. -1420 -1421 Notes -1422 ----- -1423 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1424 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1425 in the absence of autocorrelation. -1426 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1427 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1428 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1429 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1430 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1431 ''' -1432 -1433 length = len(obs) -1434 -1435 max_samples = np.max([o.N for o in obs]) -1436 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1437 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1438 -1439 cov = np.zeros((length, length)) -1440 for i in range(length): -1441 for j in range(i, length): -1442 cov[i, j] = _covariance_element(obs[i], obs[j]) -1443 cov = cov + cov.T - np.diag(np.diag(cov)) -1444 -1445 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1446 -1447 if isinstance(smooth, int): -1448 corr = _smooth_eigenvalues(corr, smooth) -1449 -1450 if visualize: -1451 plt.matshow(corr, vmin=-1, vmax=1) -1452 plt.set_cmap('RdBu') -1453 plt.colorbar() -1454 plt.draw() -1455 -1456 if correlation is True: -1457 return corr +1398 The gamma method has to be applied first to all observables. +1399 +1400 Parameters +1401 ---------- +1402 obs : list or numpy.ndarray +1403 List or one dimensional array of Obs +1404 visualize : bool +1405 If True plots the corresponding normalized correlation matrix (default False). +1406 correlation : bool +1407 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1408 smooth : None or int +1409 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1410 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1411 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1412 small ones. +1413 +1414 Notes +1415 ----- +1416 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1417 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1418 in the absence of autocorrelation. +1419 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1420 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1421 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1422 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1423 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1424 ''' +1425 +1426 length = len(obs) +1427 +1428 max_samples = np.max([o.N for o in obs]) +1429 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1430 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1431 +1432 cov = np.zeros((length, length)) +1433 for i in range(length): +1434 for j in range(i, length): +1435 cov[i, j] = _covariance_element(obs[i], obs[j]) +1436 cov = cov + cov.T - np.diag(np.diag(cov)) +1437 +1438 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1439 +1440 if isinstance(smooth, int): +1441 corr = _smooth_eigenvalues(corr, smooth) +1442 +1443 if visualize: +1444 plt.matshow(corr, vmin=-1, vmax=1) +1445 plt.set_cmap('RdBu') +1446 plt.colorbar() +1447 plt.draw() +1448 +1449 if correlation is True: +1450 return corr +1451 +1452 errors = [o.dvalue for o in obs] +1453 cov = np.diag(errors) @ corr @ np.diag(errors) +1454 +1455 eigenvalues = np.linalg.eigh(cov)[0] +1456 if not np.all(eigenvalues >= 0): +1457 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) 1458 -1459 errors = [o.dvalue for o in obs] -1460 cov = np.diag(errors) @ corr @ np.diag(errors) +1459 return cov +1460 1461 -1462 eigenvalues = np.linalg.eigh(cov)[0] -1463 if not np.all(eigenvalues >= 0): -1464 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1465 -1466 return cov -1467 -1468 -1469def _smooth_eigenvalues(corr, E): -1470 """Eigenvalue smoothing as described in hep-lat/9412087 -1471 -1472 corr : np.ndarray -1473 correlation matrix -1474 E : integer -1475 Number of eigenvalues to be left substantially unchanged -1476 """ -1477 if not (2 < E < corr.shape[0] - 1): -1478 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1479 vals, vec = np.linalg.eigh(corr) -1480 lambda_min = np.mean(vals[:-E]) -1481 vals[vals < lambda_min] = lambda_min -1482 vals /= np.mean(vals) -1483 return vec @ np.diag(vals) @ vec.T -1484 -1485 -1486def _covariance_element(obs1, obs2): -1487 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1488 -1489 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1490 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1491 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1492 return np.sum(deltas1 * deltas2) -1493 -1494 if set(obs1.names).isdisjoint(set(obs2.names)): -1495 return 0.0 +1462def _smooth_eigenvalues(corr, E): +1463 """Eigenvalue smoothing as described in hep-lat/9412087 +1464 +1465 corr : np.ndarray +1466 correlation matrix +1467 E : integer +1468 Number of eigenvalues to be left substantially unchanged +1469 """ +1470 if not (2 < E < corr.shape[0] - 1): +1471 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1472 vals, vec = np.linalg.eigh(corr) +1473 lambda_min = np.mean(vals[:-E]) +1474 vals[vals < lambda_min] = lambda_min +1475 vals /= np.mean(vals) +1476 return vec @ np.diag(vals) @ vec.T +1477 +1478 +1479def _covariance_element(obs1, obs2): +1480 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1481 +1482 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1483 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1484 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1485 return np.sum(deltas1 * deltas2) +1486 +1487 if set(obs1.names).isdisjoint(set(obs2.names)): +1488 return 0.0 +1489 +1490 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1491 raise Exception('The gamma method has to be applied to both Obs first.') +1492 +1493 dvalue = 0.0 +1494 +1495 for e_name in obs1.mc_names: 1496 -1497 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1498 raise Exception('The gamma method has to be applied to both Obs first.') +1497 if e_name not in obs2.mc_names: +1498 continue 1499 -1500 dvalue = 0.0 -1501 -1502 for e_name in obs1.mc_names: -1503 -1504 if e_name not in obs2.mc_names: -1505 continue -1506 -1507 idl_d = {} +1500 idl_d = {} +1501 for r_name in obs1.e_content[e_name]: +1502 if r_name not in obs2.e_content[e_name]: +1503 continue +1504 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1505 +1506 gamma = 0.0 +1507 1508 for r_name in obs1.e_content[e_name]: 1509 if r_name not in obs2.e_content[e_name]: 1510 continue -1511 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1512 -1513 gamma = 0.0 +1511 if len(idl_d[r_name]) == 0: +1512 continue +1513 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) 1514 -1515 for r_name in obs1.e_content[e_name]: -1516 if r_name not in obs2.e_content[e_name]: -1517 continue -1518 if len(idl_d[r_name]) == 0: -1519 continue -1520 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1521 -1522 if gamma == 0.0: -1523 continue -1524 -1525 gamma_div = 0.0 -1526 for r_name in obs1.e_content[e_name]: -1527 if r_name not in obs2.e_content[e_name]: -1528 continue -1529 if len(idl_d[r_name]) == 0: -1530 continue -1531 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1532 gamma /= gamma_div +1515 if gamma == 0.0: +1516 continue +1517 +1518 gamma_div = 0.0 +1519 for r_name in obs1.e_content[e_name]: +1520 if r_name not in obs2.e_content[e_name]: +1521 continue +1522 if len(idl_d[r_name]) == 0: +1523 continue +1524 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1525 gamma /= gamma_div +1526 +1527 dvalue += gamma +1528 +1529 for e_name in obs1.cov_names: +1530 +1531 if e_name not in obs2.cov_names: +1532 continue 1533 -1534 dvalue += gamma +1534 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) 1535 -1536 for e_name in obs1.cov_names: +1536 return dvalue 1537 -1538 if e_name not in obs2.cov_names: -1539 continue -1540 -1541 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) -1542 -1543 return dvalue -1544 -1545 -1546def import_jackknife(jacks, name, idl=None): -1547 """Imports jackknife samples and returns an Obs -1548 -1549 Parameters -1550 ---------- -1551 jacks : numpy.ndarray -1552 numpy array containing the mean value as zeroth entry and -1553 the N jackknife samples as first to Nth entry. -1554 name : str -1555 name of the ensemble the samples are defined on. -1556 """ -1557 length = len(jacks) - 1 -1558 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1559 samples = jacks[1:] @ prj -1560 mean = np.mean(samples) -1561 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1562 new_obs._value = jacks[0] -1563 return new_obs -1564 -1565 -1566def merge_obs(list_of_obs): -1567 """Combine all observables in list_of_obs into one new observable -1568 -1569 Parameters -1570 ---------- -1571 list_of_obs : list -1572 list of the Obs object to be combined -1573 -1574 Notes -1575 ----- -1576 It is not possible to combine obs which are based on the same replicum -1577 """ -1578 replist = [item for obs in list_of_obs for item in obs.names] -1579 if (len(replist) == len(set(replist))) is False: -1580 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1581 if any([len(o.cov_names) for o in list_of_obs]): -1582 raise Exception('Not possible to merge data that contains covobs!') -1583 new_dict = {} -1584 idl_dict = {} -1585 for o in list_of_obs: -1586 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1587 for key in set(o.deltas) | set(o.r_values)}) -1588 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1589 -1590 names = sorted(new_dict.keys()) -1591 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1592 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} -1593 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1594 return o -1595 -1596 -1597def cov_Obs(means, cov, name, grad=None): -1598 """Create an Obs based on mean(s) and a covariance matrix -1599 -1600 Parameters -1601 ---------- -1602 mean : list of floats or float -1603 N mean value(s) of the new Obs -1604 cov : list or array -1605 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1606 name : str -1607 identifier for the covariance matrix -1608 grad : list or array -1609 Gradient of the Covobs wrt. the means belonging to cov. -1610 """ -1611 -1612 def covobs_to_obs(co): -1613 """Make an Obs out of a Covobs -1614 -1615 Parameters -1616 ---------- -1617 co : Covobs -1618 Covobs to be embedded into the Obs -1619 """ -1620 o = Obs([], [], means=[]) -1621 o._value = co.value -1622 o.names.append(co.name) -1623 o._covobs[co.name] = co -1624 o._dvalue = np.sqrt(co.errsq()) -1625 return o -1626 -1627 ol = [] -1628 if isinstance(means, (float, int)): -1629 means = [means] -1630 -1631 for i in range(len(means)): -1632 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1633 if ol[0].covobs[name].N != len(means): -1634 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1635 if len(ol) == 1: -1636 return ol[0] -1637 return ol +1538 +1539def import_jackknife(jacks, name, idl=None): +1540 """Imports jackknife samples and returns an Obs +1541 +1542 Parameters +1543 ---------- +1544 jacks : numpy.ndarray +1545 numpy array containing the mean value as zeroth entry and +1546 the N jackknife samples as first to Nth entry. +1547 name : str +1548 name of the ensemble the samples are defined on. +1549 """ +1550 length = len(jacks) - 1 +1551 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1552 samples = jacks[1:] @ prj +1553 mean = np.mean(samples) +1554 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1555 new_obs._value = jacks[0] +1556 return new_obs +1557 +1558 +1559def merge_obs(list_of_obs): +1560 """Combine all observables in list_of_obs into one new observable +1561 +1562 Parameters +1563 ---------- +1564 list_of_obs : list +1565 list of the Obs object to be combined +1566 +1567 Notes +1568 ----- +1569 It is not possible to combine obs which are based on the same replicum +1570 """ +1571 replist = [item for obs in list_of_obs for item in obs.names] +1572 if (len(replist) == len(set(replist))) is False: +1573 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1574 if any([len(o.cov_names) for o in list_of_obs]): +1575 raise Exception('Not possible to merge data that contains covobs!') +1576 new_dict = {} +1577 idl_dict = {} +1578 for o in list_of_obs: +1579 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1580 for key in set(o.deltas) | set(o.r_values)}) +1581 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1582 +1583 names = sorted(new_dict.keys()) +1584 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1585 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1586 return o +1587 +1588 +1589def cov_Obs(means, cov, name, grad=None): +1590 """Create an Obs based on mean(s) and a covariance matrix +1591 +1592 Parameters +1593 ---------- +1594 mean : list of floats or float +1595 N mean value(s) of the new Obs +1596 cov : list or array +1597 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1598 name : str +1599 identifier for the covariance matrix +1600 grad : list or array +1601 Gradient of the Covobs wrt. the means belonging to cov. +1602 """ +1603 +1604 def covobs_to_obs(co): +1605 """Make an Obs out of a Covobs +1606 +1607 Parameters +1608 ---------- +1609 co : Covobs +1610 Covobs to be embedded into the Obs +1611 """ +1612 o = Obs([], [], means=[]) +1613 o._value = co.value +1614 o.names.append(co.name) +1615 o._covobs[co.name] = co +1616 o._dvalue = np.sqrt(co.errsq()) +1617 return o +1618 +1619 ol = [] +1620 if isinstance(means, (float, int)): +1621 means = [means] +1622 +1623 for i in range(len(means)): +1624 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1625 if ol[0].covobs[name].N != len(means): +1626 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1627 if len(ol) == 1: +1628 return ol[0] +1629 return ol
@@ -1890,7 +1882,7 @@ 50 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 51 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', 52 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', - 53 'idl', 'is_merged', 'tag', '_covobs', '__dict__'] + 53 'idl', 'tag', '_covobs', '__dict__'] 54 55 S_global = 2.0 56 S_dict = {} @@ -1938,775 +1930,774 @@ 98 99 self._value = 0 100 self.N = 0 -101 self.is_merged = {} -102 self.idl = {} -103 if idl is not None: -104 for name, idx in sorted(zip(names, idl)): -105 if isinstance(idx, range): -106 self.idl[name] = idx -107 elif isinstance(idx, (list, np.ndarray)): -108 dc = np.unique(np.diff(idx)) -109 if np.any(dc < 0): -110 raise Exception("Unsorted idx for idl[%s]" % (name)) -111 if len(dc) == 1: -112 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) -113 else: -114 self.idl[name] = list(idx) -115 else: -116 raise Exception('incompatible type for idl[%s].' % (name)) -117 else: -118 for name, sample in sorted(zip(names, samples)): -119 self.idl[name] = range(1, len(sample) + 1) -120 -121 if kwargs.get("means") is not None: -122 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): -123 self.shape[name] = len(self.idl[name]) -124 self.N += self.shape[name] -125 self.r_values[name] = mean -126 self.deltas[name] = sample -127 else: -128 for name, sample in sorted(zip(names, samples)): -129 self.shape[name] = len(self.idl[name]) -130 self.N += self.shape[name] -131 if len(sample) != self.shape[name]: -132 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) -133 self.r_values[name] = np.mean(sample) -134 self.deltas[name] = sample - self.r_values[name] -135 self._value += self.shape[name] * self.r_values[name] -136 self._value /= self.N -137 -138 self._dvalue = 0.0 -139 self.ddvalue = 0.0 -140 self.reweighted = False -141 -142 self.tag = None -143 -144 @property -145 def value(self): -146 return self._value -147 -148 @property -149 def dvalue(self): -150 return self._dvalue -151 -152 @property -153 def e_names(self): -154 return sorted(set([o.split('|')[0] for o in self.names])) -155 -156 @property -157 def cov_names(self): -158 return sorted(set([o for o in self.covobs.keys()])) -159 -160 @property -161 def mc_names(self): -162 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) -163 -164 @property -165 def e_content(self): -166 res = {} -167 for e, e_name in enumerate(self.e_names): -168 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) -169 if e_name in self.names: -170 res[e_name].append(e_name) -171 return res -172 -173 @property -174 def covobs(self): -175 return self._covobs -176 -177 def gamma_method(self, **kwargs): -178 """Estimate the error and related properties of the Obs. -179 -180 Parameters -181 ---------- -182 S : float -183 specifies a custom value for the parameter S (default 2.0). -184 If set to 0 it is assumed that the data exhibits no -185 autocorrelation. In this case the error estimates coincides -186 with the sample standard error. -187 tau_exp : float -188 positive value triggers the critical slowing down analysis -189 (default 0.0). -190 N_sigma : float -191 number of standard deviations from zero until the tail is -192 attached to the autocorrelation function (default 1). -193 fft : bool -194 determines whether the fft algorithm is used for the computation -195 of the autocorrelation function (default True) -196 """ -197 -198 e_content = self.e_content -199 self.e_dvalue = {} -200 self.e_ddvalue = {} -201 self.e_tauint = {} -202 self.e_dtauint = {} -203 self.e_windowsize = {} -204 self.e_n_tauint = {} -205 self.e_n_dtauint = {} -206 e_gamma = {} -207 self.e_rho = {} -208 self.e_drho = {} -209 self._dvalue = 0 -210 self.ddvalue = 0 -211 -212 self.S = {} -213 self.tau_exp = {} -214 self.N_sigma = {} -215 -216 if kwargs.get('fft') is False: -217 fft = False -218 else: -219 fft = True -220 -221 def _parse_kwarg(kwarg_name): -222 if kwarg_name in kwargs: -223 tmp = kwargs.get(kwarg_name) -224 if isinstance(tmp, (int, float)): -225 if tmp < 0: -226 raise Exception(kwarg_name + ' has to be larger or equal to 0.') -227 for e, e_name in enumerate(self.e_names): -228 getattr(self, kwarg_name)[e_name] = tmp -229 else: -230 raise TypeError(kwarg_name + ' is not in proper format.') -231 else: -232 for e, e_name in enumerate(self.e_names): -233 if e_name in getattr(Obs, kwarg_name + '_dict'): -234 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] -235 else: -236 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') -237 -238 _parse_kwarg('S') -239 _parse_kwarg('tau_exp') -240 _parse_kwarg('N_sigma') -241 -242 for e, e_name in enumerate(self.mc_names): -243 r_length = [] -244 for r_name in e_content[e_name]: -245 if isinstance(self.idl[r_name], range): -246 r_length.append(len(self.idl[r_name])) -247 else: -248 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) -249 -250 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) -251 w_max = max(r_length) // 2 -252 e_gamma[e_name] = np.zeros(w_max) -253 self.e_rho[e_name] = np.zeros(w_max) -254 self.e_drho[e_name] = np.zeros(w_max) -255 -256 for r_name in e_content[e_name]: -257 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) -258 -259 gamma_div = np.zeros(w_max) -260 for r_name in e_content[e_name]: -261 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) -262 gamma_div[gamma_div < 1] = 1.0 -263 e_gamma[e_name] /= gamma_div[:w_max] -264 -265 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero -266 self.e_tauint[e_name] = 0.5 -267 self.e_dtauint[e_name] = 0.0 -268 self.e_dvalue[e_name] = 0.0 -269 self.e_ddvalue[e_name] = 0.0 -270 self.e_windowsize[e_name] = 0 -271 continue -272 -273 gaps = [] -274 for r_name in e_content[e_name]: -275 if isinstance(self.idl[r_name], range): -276 gaps.append(1) -277 else: -278 gaps.append(np.min(np.diff(self.idl[r_name]))) -279 -280 if not np.all([gi == gaps[0] for gi in gaps]): -281 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) -282 else: -283 gapsize = gaps[0] -284 -285 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] -286 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) -287 # Make sure no entry of tauint is smaller than 0.5 -288 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps -289 # hep-lat/0306017 eq. (42) -290 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) -291 self.e_n_dtauint[e_name][0] = 0.0 -292 -293 def _compute_drho(i): -294 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] -295 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) -296 -297 _compute_drho(gapsize) -298 if self.tau_exp[e_name] > 0: -299 texp = self.tau_exp[e_name] -300 # Critical slowing down analysis -301 if w_max // 2 <= 1: -302 raise Exception("Need at least 8 samples for tau_exp error analysis") -303 for n in range(gapsize, w_max // 2, gapsize): -304 _compute_drho(n + gapsize) -305 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -306 # Bias correction hep-lat/0306017 eq. (49) included -307 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -308 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -309 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -310 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -311 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -312 self.e_windowsize[e_name] = n -313 break -314 else: -315 if self.S[e_name] == 0.0: -316 self.e_tauint[e_name] = 0.5 -317 self.e_dtauint[e_name] = 0.0 -318 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -319 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -320 self.e_windowsize[e_name] = 0 -321 else: -322 # Standard automatic windowing procedure -323 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) -324 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) -325 for n in range(1, w_max): -326 if n < w_max // 2 - 2: -327 _compute_drho(gapsize * n + gapsize) -328 if g_w[n - 1] < 0 or n >= w_max - 1: -329 n *= gapsize -330 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -331 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -332 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -333 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -334 self.e_windowsize[e_name] = n -335 break -336 -337 self._dvalue += self.e_dvalue[e_name] ** 2 -338 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -339 -340 for e_name in self.cov_names: -341 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -342 self.e_ddvalue[e_name] = 0 -343 self._dvalue += self.e_dvalue[e_name]**2 -344 -345 self._dvalue = np.sqrt(self._dvalue) -346 if self._dvalue == 0.0: -347 self.ddvalue = 0.0 -348 else: -349 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -350 return -351 -352 gm = gamma_method -353 -354 def _calc_gamma(self, deltas, idx, shape, w_max, fft): -355 """Calculate Gamma_{AA} from the deltas, which are defined on idx. -356 idx is assumed to be a contiguous range (possibly with a stepsize != 1) -357 -358 Parameters -359 ---------- -360 deltas : list -361 List of fluctuations -362 idx : list -363 List or range of configurations on which the deltas are defined. -364 shape : int -365 Number of configurations in idx. -366 w_max : int -367 Upper bound for the summation window. -368 fft : bool -369 determines whether the fft algorithm is used for the computation -370 of the autocorrelation function. -371 """ -372 gamma = np.zeros(w_max) -373 deltas = _expand_deltas(deltas, idx, shape) -374 new_shape = len(deltas) -375 if fft: -376 max_gamma = min(new_shape, w_max) -377 # The padding for the fft has to be even -378 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 -379 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] -380 else: -381 for n in range(w_max): -382 if new_shape - n >= 0: -383 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) -384 -385 return gamma -386 -387 def details(self, ens_content=True): -388 """Output detailed properties of the Obs. -389 -390 Parameters -391 ---------- -392 ens_content : bool -393 print details about the ensembles and replica if true. -394 """ -395 if self.tag is not None: -396 print("Description:", self.tag) -397 if not hasattr(self, 'e_dvalue'): -398 print('Result\t %3.8e' % (self.value)) -399 else: -400 if self.value == 0.0: -401 percentage = np.nan -402 else: -403 percentage = np.abs(self._dvalue / self.value) * 100 -404 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) -405 if len(self.e_names) > 1: -406 print(' Ensemble errors:') -407 e_content = self.e_content -408 for e_name in self.mc_names: -409 if isinstance(self.idl[e_content[e_name][0]], range): -410 gap = self.idl[e_content[e_name][0]].step -411 else: -412 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) -413 -414 if len(self.e_names) > 1: -415 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) -416 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) -417 tau_string += f" in units of {gap} config" -418 if gap > 1: -419 tau_string += "s" -420 if self.tau_exp[e_name] > 0: -421 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) -422 else: -423 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) -424 print(tau_string) -425 for e_name in self.cov_names: -426 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) -427 if ens_content is True: -428 if len(self.e_names) == 1: -429 print(self.N, 'samples in', len(self.e_names), 'ensemble:') -430 else: -431 print(self.N, 'samples in', len(self.e_names), 'ensembles:') -432 my_string_list = [] -433 for key, value in sorted(self.e_content.items()): -434 if key not in self.covobs: -435 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " -436 if len(value) == 1: -437 my_string += f': {self.shape[value[0]]} configurations' -438 if isinstance(self.idl[value[0]], range): -439 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' -440 else: -441 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' -442 else: -443 sublist = [] -444 for v in value: -445 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " -446 my_substring += f': {self.shape[v]} configurations' -447 if isinstance(self.idl[v], range): -448 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' -449 else: -450 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' -451 sublist.append(my_substring) -452 -453 my_string += '\n' + '\n'.join(sublist) -454 else: -455 my_string = ' ' + "\u00B7 Covobs '" + key + "' " -456 my_string_list.append(my_string) -457 print('\n'.join(my_string_list)) -458 -459 def reweight(self, weight): -460 """Reweight the obs with given rewighting factors. -461 -462 Parameters -463 ---------- -464 weight : Obs -465 Reweighting factor. An Observable that has to be defined on a superset of the -466 configurations in obs[i].idl for all i. -467 all_configs : bool -468 if True, the reweighted observables are normalized by the average of -469 the reweighting factor on all configurations in weight.idl and not -470 on the configurations in obs[i].idl. Default False. -471 """ -472 return reweight(weight, [self])[0] -473 -474 def is_zero_within_error(self, sigma=1): -475 """Checks whether the observable is zero within 'sigma' standard errors. -476 -477 Parameters -478 ---------- -479 sigma : int -480 Number of standard errors used for the check. -481 -482 Works only properly when the gamma method was run. -483 """ -484 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue -485 -486 def is_zero(self, atol=1e-10): -487 """Checks whether the observable is zero within a given tolerance. -488 -489 Parameters -490 ---------- -491 atol : float -492 Absolute tolerance (for details see numpy documentation). -493 """ -494 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) -495 -496 def plot_tauint(self, save=None): -497 """Plot integrated autocorrelation time for each ensemble. -498 -499 Parameters -500 ---------- -501 save : str -502 saves the figure to a file named 'save' if. -503 """ -504 if not hasattr(self, 'e_dvalue'): -505 raise Exception('Run the gamma method first.') -506 -507 for e, e_name in enumerate(self.mc_names): -508 fig = plt.figure() -509 plt.xlabel(r'$W$') -510 plt.ylabel(r'$\tau_\mathrm{int}$') -511 length = int(len(self.e_n_tauint[e_name])) -512 if self.tau_exp[e_name] > 0: -513 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] -514 x_help = np.arange(2 * self.tau_exp[e_name]) -515 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base -516 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) -517 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') -518 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], -519 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) -520 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -521 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) -522 else: -523 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) -524 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -525 -526 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) -527 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') -528 plt.legend() -529 plt.xlim(-0.5, xmax) -530 ylim = plt.ylim() -531 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) -532 plt.draw() -533 if save: -534 fig.savefig(save + "_" + str(e)) -535 -536 def plot_rho(self, save=None): -537 """Plot normalized autocorrelation function time for each ensemble. -538 -539 Parameters -540 ---------- -541 save : str -542 saves the figure to a file named 'save' if. -543 """ -544 if not hasattr(self, 'e_dvalue'): -545 raise Exception('Run the gamma method first.') -546 for e, e_name in enumerate(self.mc_names): -547 fig = plt.figure() -548 plt.xlabel('W') -549 plt.ylabel('rho') -550 length = int(len(self.e_drho[e_name])) -551 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) -552 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') -553 if self.tau_exp[e_name] > 0: -554 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], -555 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) -556 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -557 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) -558 else: -559 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -560 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) -561 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) -562 plt.xlim(-0.5, xmax) -563 plt.draw() -564 if save: -565 fig.savefig(save + "_" + str(e)) -566 -567 def plot_rep_dist(self): -568 """Plot replica distribution for each ensemble with more than one replicum.""" -569 if not hasattr(self, 'e_dvalue'): -570 raise Exception('Run the gamma method first.') -571 for e, e_name in enumerate(self.mc_names): -572 if len(self.e_content[e_name]) == 1: -573 print('No replica distribution for a single replicum (', e_name, ')') -574 continue -575 r_length = [] -576 sub_r_mean = 0 -577 for r, r_name in enumerate(self.e_content[e_name]): -578 r_length.append(len(self.deltas[r_name])) -579 sub_r_mean += self.shape[r_name] * self.r_values[r_name] -580 e_N = np.sum(r_length) -581 sub_r_mean /= e_N -582 arr = np.zeros(len(self.e_content[e_name])) -583 for r, r_name in enumerate(self.e_content[e_name]): -584 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) -585 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) -586 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') -587 plt.draw() -588 -589 def plot_history(self, expand=True): -590 """Plot derived Monte Carlo history for each ensemble -591 -592 Parameters -593 ---------- -594 expand : bool -595 show expanded history for irregular Monte Carlo chains (default: True). -596 """ -597 for e, e_name in enumerate(self.mc_names): -598 plt.figure() -599 r_length = [] -600 tmp = [] -601 tmp_expanded = [] -602 for r, r_name in enumerate(self.e_content[e_name]): -603 tmp.append(self.deltas[r_name] + self.r_values[r_name]) -604 if expand: -605 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) -606 r_length.append(len(tmp_expanded[-1])) -607 else: -608 r_length.append(len(tmp[-1])) -609 e_N = np.sum(r_length) -610 x = np.arange(e_N) -611 y_test = np.concatenate(tmp, axis=0) -612 if expand: -613 y = np.concatenate(tmp_expanded, axis=0) -614 else: -615 y = y_test -616 plt.errorbar(x, y, fmt='.', markersize=3) -617 plt.xlim(-0.5, e_N - 0.5) -618 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') -619 plt.draw() -620 -621 def plot_piechart(self, save=None): -622 """Plot piechart which shows the fractional contribution of each -623 ensemble to the error and returns a dictionary containing the fractions. -624 -625 Parameters -626 ---------- -627 save : str -628 saves the figure to a file named 'save' if. -629 """ -630 if not hasattr(self, 'e_dvalue'): -631 raise Exception('Run the gamma method first.') -632 if np.isclose(0.0, self._dvalue, atol=1e-15): -633 raise Exception('Error is 0.0') -634 labels = self.e_names -635 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 -636 fig1, ax1 = plt.subplots() -637 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) -638 ax1.axis('equal') -639 plt.draw() -640 if save: -641 fig1.savefig(save) -642 -643 return dict(zip(self.e_names, sizes)) -644 -645 def dump(self, filename, datatype="json.gz", description="", **kwargs): -646 """Dump the Obs to a file 'name' of chosen format. -647 -648 Parameters -649 ---------- -650 filename : str -651 name of the file to be saved. -652 datatype : str -653 Format of the exported file. Supported formats include -654 "json.gz" and "pickle" -655 description : str -656 Description for output file, only relevant for json.gz format. -657 path : str -658 specifies a custom path for the file (default '.') -659 """ -660 if 'path' in kwargs: -661 file_name = kwargs.get('path') + '/' + filename -662 else: -663 file_name = filename -664 -665 if datatype == "json.gz": -666 from .input.json import dump_to_json -667 dump_to_json([self], file_name, description=description) -668 elif datatype == "pickle": -669 with open(file_name + '.p', 'wb') as fb: -670 pickle.dump(self, fb) -671 else: -672 raise Exception("Unknown datatype " + str(datatype)) -673 -674 def export_jackknife(self): -675 """Export jackknife samples from the Obs -676 -677 Returns -678 ------- -679 numpy.ndarray -680 Returns a numpy array of length N + 1 where N is the number of samples -681 for the given ensemble and replicum. The zeroth entry of the array contains -682 the mean value of the Obs, entries 1 to N contain the N jackknife samples -683 derived from the Obs. The current implementation only works for observables -684 defined on exactly one ensemble and replicum. The derived jackknife samples -685 should agree with samples from a full jackknife analysis up to O(1/N). -686 """ -687 -688 if len(self.names) != 1: -689 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") -690 -691 name = self.names[0] -692 full_data = self.deltas[name] + self.r_values[name] -693 n = full_data.size -694 mean = self.value -695 tmp_jacks = np.zeros(n + 1) -696 tmp_jacks[0] = mean -697 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) -698 return tmp_jacks -699 -700 def __float__(self): -701 return float(self.value) -702 -703 def __repr__(self): -704 return 'Obs[' + str(self) + ']' -705 -706 def __str__(self): -707 return _format_uncertainty(self.value, self._dvalue) -708 -709 def __hash__(self): -710 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -711 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -712 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -713 hash_tuple += tuple([o.encode() for o in self.names]) -714 m = hashlib.md5() -715 [m.update(o) for o in hash_tuple] -716 return int(m.hexdigest(), 16) & 0xFFFFFFFF -717 -718 # Overload comparisons -719 def __lt__(self, other): -720 return self.value < other -721 -722 def __le__(self, other): -723 return self.value <= other -724 -725 def __gt__(self, other): -726 return self.value > other -727 -728 def __ge__(self, other): -729 return self.value >= other -730 -731 def __eq__(self, other): -732 return (self - other).is_zero() -733 -734 def __ne__(self, other): -735 return not (self - other).is_zero() -736 -737 # Overload math operations -738 def __add__(self, y): -739 if isinstance(y, Obs): -740 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -741 else: -742 if isinstance(y, np.ndarray): -743 return np.array([self + o for o in y]) -744 elif y.__class__.__name__ in ['Corr', 'CObs']: -745 return NotImplemented -746 else: -747 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -748 -749 def __radd__(self, y): -750 return self + y -751 -752 def __mul__(self, y): -753 if isinstance(y, Obs): -754 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -755 else: -756 if isinstance(y, np.ndarray): -757 return np.array([self * o for o in y]) -758 elif isinstance(y, complex): -759 return CObs(self * y.real, self * y.imag) -760 elif y.__class__.__name__ in ['Corr', 'CObs']: -761 return NotImplemented -762 else: -763 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -764 -765 def __rmul__(self, y): -766 return self * y -767 -768 def __sub__(self, y): -769 if isinstance(y, Obs): -770 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -771 else: -772 if isinstance(y, np.ndarray): -773 return np.array([self - o for o in y]) -774 elif y.__class__.__name__ in ['Corr', 'CObs']: -775 return NotImplemented -776 else: -777 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -778 -779 def __rsub__(self, y): -780 return -1 * (self - y) -781 -782 def __pos__(self): -783 return self -784 -785 def __neg__(self): -786 return -1 * self -787 -788 def __truediv__(self, y): -789 if isinstance(y, Obs): -790 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -791 else: -792 if isinstance(y, np.ndarray): -793 return np.array([self / o for o in y]) -794 elif y.__class__.__name__ in ['Corr', 'CObs']: -795 return NotImplemented -796 else: -797 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -798 -799 def __rtruediv__(self, y): -800 if isinstance(y, Obs): -801 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -802 else: -803 if isinstance(y, np.ndarray): -804 return np.array([o / self for o in y]) -805 elif y.__class__.__name__ in ['Corr', 'CObs']: -806 return NotImplemented -807 else: -808 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -809 -810 def __pow__(self, y): -811 if isinstance(y, Obs): -812 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -813 else: -814 return derived_observable(lambda x: x[0] ** y, [self]) -815 -816 def __rpow__(self, y): -817 if isinstance(y, Obs): -818 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -819 else: -820 return derived_observable(lambda x: y ** x[0], [self]) -821 -822 def __abs__(self): -823 return derived_observable(lambda x: anp.abs(x[0]), [self]) -824 -825 # Overload numpy functions -826 def sqrt(self): -827 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -828 -829 def log(self): -830 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -831 -832 def exp(self): -833 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -834 -835 def sin(self): -836 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -837 -838 def cos(self): -839 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -840 -841 def tan(self): -842 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -843 -844 def arcsin(self): -845 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -846 -847 def arccos(self): -848 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -849 -850 def arctan(self): -851 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -852 -853 def sinh(self): -854 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -855 -856 def cosh(self): -857 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -858 -859 def tanh(self): -860 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -861 -862 def arcsinh(self): -863 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -864 -865 def arccosh(self): -866 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -867 -868 def arctanh(self): -869 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +101 self.idl = {} +102 if idl is not None: +103 for name, idx in sorted(zip(names, idl)): +104 if isinstance(idx, range): +105 self.idl[name] = idx +106 elif isinstance(idx, (list, np.ndarray)): +107 dc = np.unique(np.diff(idx)) +108 if np.any(dc < 0): +109 raise Exception("Unsorted idx for idl[%s]" % (name)) +110 if len(dc) == 1: +111 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) +112 else: +113 self.idl[name] = list(idx) +114 else: +115 raise Exception('incompatible type for idl[%s].' % (name)) +116 else: +117 for name, sample in sorted(zip(names, samples)): +118 self.idl[name] = range(1, len(sample) + 1) +119 +120 if kwargs.get("means") is not None: +121 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): +122 self.shape[name] = len(self.idl[name]) +123 self.N += self.shape[name] +124 self.r_values[name] = mean +125 self.deltas[name] = sample +126 else: +127 for name, sample in sorted(zip(names, samples)): +128 self.shape[name] = len(self.idl[name]) +129 self.N += self.shape[name] +130 if len(sample) != self.shape[name]: +131 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) +132 self.r_values[name] = np.mean(sample) +133 self.deltas[name] = sample - self.r_values[name] +134 self._value += self.shape[name] * self.r_values[name] +135 self._value /= self.N +136 +137 self._dvalue = 0.0 +138 self.ddvalue = 0.0 +139 self.reweighted = False +140 +141 self.tag = None +142 +143 @property +144 def value(self): +145 return self._value +146 +147 @property +148 def dvalue(self): +149 return self._dvalue +150 +151 @property +152 def e_names(self): +153 return sorted(set([o.split('|')[0] for o in self.names])) +154 +155 @property +156 def cov_names(self): +157 return sorted(set([o for o in self.covobs.keys()])) +158 +159 @property +160 def mc_names(self): +161 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) +162 +163 @property +164 def e_content(self): +165 res = {} +166 for e, e_name in enumerate(self.e_names): +167 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) +168 if e_name in self.names: +169 res[e_name].append(e_name) +170 return res +171 +172 @property +173 def covobs(self): +174 return self._covobs +175 +176 def gamma_method(self, **kwargs): +177 """Estimate the error and related properties of the Obs. +178 +179 Parameters +180 ---------- +181 S : float +182 specifies a custom value for the parameter S (default 2.0). +183 If set to 0 it is assumed that the data exhibits no +184 autocorrelation. In this case the error estimates coincides +185 with the sample standard error. +186 tau_exp : float +187 positive value triggers the critical slowing down analysis +188 (default 0.0). +189 N_sigma : float +190 number of standard deviations from zero until the tail is +191 attached to the autocorrelation function (default 1). +192 fft : bool +193 determines whether the fft algorithm is used for the computation +194 of the autocorrelation function (default True) +195 """ +196 +197 e_content = self.e_content +198 self.e_dvalue = {} +199 self.e_ddvalue = {} +200 self.e_tauint = {} +201 self.e_dtauint = {} +202 self.e_windowsize = {} +203 self.e_n_tauint = {} +204 self.e_n_dtauint = {} +205 e_gamma = {} +206 self.e_rho = {} +207 self.e_drho = {} +208 self._dvalue = 0 +209 self.ddvalue = 0 +210 +211 self.S = {} +212 self.tau_exp = {} +213 self.N_sigma = {} +214 +215 if kwargs.get('fft') is False: +216 fft = False +217 else: +218 fft = True +219 +220 def _parse_kwarg(kwarg_name): +221 if kwarg_name in kwargs: +222 tmp = kwargs.get(kwarg_name) +223 if isinstance(tmp, (int, float)): +224 if tmp < 0: +225 raise Exception(kwarg_name + ' has to be larger or equal to 0.') +226 for e, e_name in enumerate(self.e_names): +227 getattr(self, kwarg_name)[e_name] = tmp +228 else: +229 raise TypeError(kwarg_name + ' is not in proper format.') +230 else: +231 for e, e_name in enumerate(self.e_names): +232 if e_name in getattr(Obs, kwarg_name + '_dict'): +233 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] +234 else: +235 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') +236 +237 _parse_kwarg('S') +238 _parse_kwarg('tau_exp') +239 _parse_kwarg('N_sigma') +240 +241 for e, e_name in enumerate(self.mc_names): +242 r_length = [] +243 for r_name in e_content[e_name]: +244 if isinstance(self.idl[r_name], range): +245 r_length.append(len(self.idl[r_name])) +246 else: +247 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) +248 +249 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) +250 w_max = max(r_length) // 2 +251 e_gamma[e_name] = np.zeros(w_max) +252 self.e_rho[e_name] = np.zeros(w_max) +253 self.e_drho[e_name] = np.zeros(w_max) +254 +255 for r_name in e_content[e_name]: +256 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) +257 +258 gamma_div = np.zeros(w_max) +259 for r_name in e_content[e_name]: +260 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) +261 gamma_div[gamma_div < 1] = 1.0 +262 e_gamma[e_name] /= gamma_div[:w_max] +263 +264 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero +265 self.e_tauint[e_name] = 0.5 +266 self.e_dtauint[e_name] = 0.0 +267 self.e_dvalue[e_name] = 0.0 +268 self.e_ddvalue[e_name] = 0.0 +269 self.e_windowsize[e_name] = 0 +270 continue +271 +272 gaps = [] +273 for r_name in e_content[e_name]: +274 if isinstance(self.idl[r_name], range): +275 gaps.append(1) +276 else: +277 gaps.append(np.min(np.diff(self.idl[r_name]))) +278 +279 if not np.all([gi == gaps[0] for gi in gaps]): +280 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) +281 else: +282 gapsize = gaps[0] +283 +284 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] +285 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) +286 # Make sure no entry of tauint is smaller than 0.5 +287 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps +288 # hep-lat/0306017 eq. (42) +289 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) +290 self.e_n_dtauint[e_name][0] = 0.0 +291 +292 def _compute_drho(i): +293 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] +294 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) +295 +296 _compute_drho(gapsize) +297 if self.tau_exp[e_name] > 0: +298 texp = self.tau_exp[e_name] +299 # Critical slowing down analysis +300 if w_max // 2 <= 1: +301 raise Exception("Need at least 8 samples for tau_exp error analysis") +302 for n in range(gapsize, w_max // 2, gapsize): +303 _compute_drho(n + gapsize) +304 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +305 # Bias correction hep-lat/0306017 eq. (49) included +306 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +307 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +308 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +309 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +310 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) +311 self.e_windowsize[e_name] = n +312 break +313 else: +314 if self.S[e_name] == 0.0: +315 self.e_tauint[e_name] = 0.5 +316 self.e_dtauint[e_name] = 0.0 +317 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +318 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +319 self.e_windowsize[e_name] = 0 +320 else: +321 # Standard automatic windowing procedure +322 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) +323 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +324 for n in range(1, w_max): +325 if n < w_max // 2 - 2: +326 _compute_drho(gapsize * n + gapsize) +327 if g_w[n - 1] < 0 or n >= w_max - 1: +328 n *= gapsize +329 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +330 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +331 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +332 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) +333 self.e_windowsize[e_name] = n +334 break +335 +336 self._dvalue += self.e_dvalue[e_name] ** 2 +337 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +338 +339 for e_name in self.cov_names: +340 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +341 self.e_ddvalue[e_name] = 0 +342 self._dvalue += self.e_dvalue[e_name]**2 +343 +344 self._dvalue = np.sqrt(self._dvalue) +345 if self._dvalue == 0.0: +346 self.ddvalue = 0.0 +347 else: +348 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +349 return +350 +351 gm = gamma_method +352 +353 def _calc_gamma(self, deltas, idx, shape, w_max, fft): +354 """Calculate Gamma_{AA} from the deltas, which are defined on idx. +355 idx is assumed to be a contiguous range (possibly with a stepsize != 1) +356 +357 Parameters +358 ---------- +359 deltas : list +360 List of fluctuations +361 idx : list +362 List or range of configurations on which the deltas are defined. +363 shape : int +364 Number of configurations in idx. +365 w_max : int +366 Upper bound for the summation window. +367 fft : bool +368 determines whether the fft algorithm is used for the computation +369 of the autocorrelation function. +370 """ +371 gamma = np.zeros(w_max) +372 deltas = _expand_deltas(deltas, idx, shape) +373 new_shape = len(deltas) +374 if fft: +375 max_gamma = min(new_shape, w_max) +376 # The padding for the fft has to be even +377 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 +378 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] +379 else: +380 for n in range(w_max): +381 if new_shape - n >= 0: +382 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) +383 +384 return gamma +385 +386 def details(self, ens_content=True): +387 """Output detailed properties of the Obs. +388 +389 Parameters +390 ---------- +391 ens_content : bool +392 print details about the ensembles and replica if true. +393 """ +394 if self.tag is not None: +395 print("Description:", self.tag) +396 if not hasattr(self, 'e_dvalue'): +397 print('Result\t %3.8e' % (self.value)) +398 else: +399 if self.value == 0.0: +400 percentage = np.nan +401 else: +402 percentage = np.abs(self._dvalue / self.value) * 100 +403 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) +404 if len(self.e_names) > 1: +405 print(' Ensemble errors:') +406 e_content = self.e_content +407 for e_name in self.mc_names: +408 if isinstance(self.idl[e_content[e_name][0]], range): +409 gap = self.idl[e_content[e_name][0]].step +410 else: +411 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) +412 +413 if len(self.e_names) > 1: +414 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) +415 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) +416 tau_string += f" in units of {gap} config" +417 if gap > 1: +418 tau_string += "s" +419 if self.tau_exp[e_name] > 0: +420 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) +421 else: +422 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) +423 print(tau_string) +424 for e_name in self.cov_names: +425 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) +426 if ens_content is True: +427 if len(self.e_names) == 1: +428 print(self.N, 'samples in', len(self.e_names), 'ensemble:') +429 else: +430 print(self.N, 'samples in', len(self.e_names), 'ensembles:') +431 my_string_list = [] +432 for key, value in sorted(self.e_content.items()): +433 if key not in self.covobs: +434 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " +435 if len(value) == 1: +436 my_string += f': {self.shape[value[0]]} configurations' +437 if isinstance(self.idl[value[0]], range): +438 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' +439 else: +440 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' +441 else: +442 sublist = [] +443 for v in value: +444 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " +445 my_substring += f': {self.shape[v]} configurations' +446 if isinstance(self.idl[v], range): +447 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' +448 else: +449 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' +450 sublist.append(my_substring) +451 +452 my_string += '\n' + '\n'.join(sublist) +453 else: +454 my_string = ' ' + "\u00B7 Covobs '" + key + "' " +455 my_string_list.append(my_string) +456 print('\n'.join(my_string_list)) +457 +458 def reweight(self, weight): +459 """Reweight the obs with given rewighting factors. +460 +461 Parameters +462 ---------- +463 weight : Obs +464 Reweighting factor. An Observable that has to be defined on a superset of the +465 configurations in obs[i].idl for all i. +466 all_configs : bool +467 if True, the reweighted observables are normalized by the average of +468 the reweighting factor on all configurations in weight.idl and not +469 on the configurations in obs[i].idl. Default False. +470 """ +471 return reweight(weight, [self])[0] +472 +473 def is_zero_within_error(self, sigma=1): +474 """Checks whether the observable is zero within 'sigma' standard errors. +475 +476 Parameters +477 ---------- +478 sigma : int +479 Number of standard errors used for the check. +480 +481 Works only properly when the gamma method was run. +482 """ +483 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue +484 +485 def is_zero(self, atol=1e-10): +486 """Checks whether the observable is zero within a given tolerance. +487 +488 Parameters +489 ---------- +490 atol : float +491 Absolute tolerance (for details see numpy documentation). +492 """ +493 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) +494 +495 def plot_tauint(self, save=None): +496 """Plot integrated autocorrelation time for each ensemble. +497 +498 Parameters +499 ---------- +500 save : str +501 saves the figure to a file named 'save' if. +502 """ +503 if not hasattr(self, 'e_dvalue'): +504 raise Exception('Run the gamma method first.') +505 +506 for e, e_name in enumerate(self.mc_names): +507 fig = plt.figure() +508 plt.xlabel(r'$W$') +509 plt.ylabel(r'$\tau_\mathrm{int}$') +510 length = int(len(self.e_n_tauint[e_name])) +511 if self.tau_exp[e_name] > 0: +512 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] +513 x_help = np.arange(2 * self.tau_exp[e_name]) +514 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base +515 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) +516 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') +517 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], +518 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) +519 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +520 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) +521 else: +522 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) +523 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +524 +525 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) +526 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') +527 plt.legend() +528 plt.xlim(-0.5, xmax) +529 ylim = plt.ylim() +530 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) +531 plt.draw() +532 if save: +533 fig.savefig(save + "_" + str(e)) +534 +535 def plot_rho(self, save=None): +536 """Plot normalized autocorrelation function time for each ensemble. +537 +538 Parameters +539 ---------- +540 save : str +541 saves the figure to a file named 'save' if. +542 """ +543 if not hasattr(self, 'e_dvalue'): +544 raise Exception('Run the gamma method first.') +545 for e, e_name in enumerate(self.mc_names): +546 fig = plt.figure() +547 plt.xlabel('W') +548 plt.ylabel('rho') +549 length = int(len(self.e_drho[e_name])) +550 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) +551 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') +552 if self.tau_exp[e_name] > 0: +553 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], +554 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) +555 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +556 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) +557 else: +558 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +559 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) +560 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) +561 plt.xlim(-0.5, xmax) +562 plt.draw() +563 if save: +564 fig.savefig(save + "_" + str(e)) +565 +566 def plot_rep_dist(self): +567 """Plot replica distribution for each ensemble with more than one replicum.""" +568 if not hasattr(self, 'e_dvalue'): +569 raise Exception('Run the gamma method first.') +570 for e, e_name in enumerate(self.mc_names): +571 if len(self.e_content[e_name]) == 1: +572 print('No replica distribution for a single replicum (', e_name, ')') +573 continue +574 r_length = [] +575 sub_r_mean = 0 +576 for r, r_name in enumerate(self.e_content[e_name]): +577 r_length.append(len(self.deltas[r_name])) +578 sub_r_mean += self.shape[r_name] * self.r_values[r_name] +579 e_N = np.sum(r_length) +580 sub_r_mean /= e_N +581 arr = np.zeros(len(self.e_content[e_name])) +582 for r, r_name in enumerate(self.e_content[e_name]): +583 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) +584 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) +585 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') +586 plt.draw() +587 +588 def plot_history(self, expand=True): +589 """Plot derived Monte Carlo history for each ensemble +590 +591 Parameters +592 ---------- +593 expand : bool +594 show expanded history for irregular Monte Carlo chains (default: True). +595 """ +596 for e, e_name in enumerate(self.mc_names): +597 plt.figure() +598 r_length = [] +599 tmp = [] +600 tmp_expanded = [] +601 for r, r_name in enumerate(self.e_content[e_name]): +602 tmp.append(self.deltas[r_name] + self.r_values[r_name]) +603 if expand: +604 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) +605 r_length.append(len(tmp_expanded[-1])) +606 else: +607 r_length.append(len(tmp[-1])) +608 e_N = np.sum(r_length) +609 x = np.arange(e_N) +610 y_test = np.concatenate(tmp, axis=0) +611 if expand: +612 y = np.concatenate(tmp_expanded, axis=0) +613 else: +614 y = y_test +615 plt.errorbar(x, y, fmt='.', markersize=3) +616 plt.xlim(-0.5, e_N - 0.5) +617 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') +618 plt.draw() +619 +620 def plot_piechart(self, save=None): +621 """Plot piechart which shows the fractional contribution of each +622 ensemble to the error and returns a dictionary containing the fractions. +623 +624 Parameters +625 ---------- +626 save : str +627 saves the figure to a file named 'save' if. +628 """ +629 if not hasattr(self, 'e_dvalue'): +630 raise Exception('Run the gamma method first.') +631 if np.isclose(0.0, self._dvalue, atol=1e-15): +632 raise Exception('Error is 0.0') +633 labels = self.e_names +634 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 +635 fig1, ax1 = plt.subplots() +636 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) +637 ax1.axis('equal') +638 plt.draw() +639 if save: +640 fig1.savefig(save) +641 +642 return dict(zip(self.e_names, sizes)) +643 +644 def dump(self, filename, datatype="json.gz", description="", **kwargs): +645 """Dump the Obs to a file 'name' of chosen format. +646 +647 Parameters +648 ---------- +649 filename : str +650 name of the file to be saved. +651 datatype : str +652 Format of the exported file. Supported formats include +653 "json.gz" and "pickle" +654 description : str +655 Description for output file, only relevant for json.gz format. +656 path : str +657 specifies a custom path for the file (default '.') +658 """ +659 if 'path' in kwargs: +660 file_name = kwargs.get('path') + '/' + filename +661 else: +662 file_name = filename +663 +664 if datatype == "json.gz": +665 from .input.json import dump_to_json +666 dump_to_json([self], file_name, description=description) +667 elif datatype == "pickle": +668 with open(file_name + '.p', 'wb') as fb: +669 pickle.dump(self, fb) +670 else: +671 raise Exception("Unknown datatype " + str(datatype)) +672 +673 def export_jackknife(self): +674 """Export jackknife samples from the Obs +675 +676 Returns +677 ------- +678 numpy.ndarray +679 Returns a numpy array of length N + 1 where N is the number of samples +680 for the given ensemble and replicum. The zeroth entry of the array contains +681 the mean value of the Obs, entries 1 to N contain the N jackknife samples +682 derived from the Obs. The current implementation only works for observables +683 defined on exactly one ensemble and replicum. The derived jackknife samples +684 should agree with samples from a full jackknife analysis up to O(1/N). +685 """ +686 +687 if len(self.names) != 1: +688 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") +689 +690 name = self.names[0] +691 full_data = self.deltas[name] + self.r_values[name] +692 n = full_data.size +693 mean = self.value +694 tmp_jacks = np.zeros(n + 1) +695 tmp_jacks[0] = mean +696 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) +697 return tmp_jacks +698 +699 def __float__(self): +700 return float(self.value) +701 +702 def __repr__(self): +703 return 'Obs[' + str(self) + ']' +704 +705 def __str__(self): +706 return _format_uncertainty(self.value, self._dvalue) +707 +708 def __hash__(self): +709 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +710 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +711 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +712 hash_tuple += tuple([o.encode() for o in self.names]) +713 m = hashlib.md5() +714 [m.update(o) for o in hash_tuple] +715 return int(m.hexdigest(), 16) & 0xFFFFFFFF +716 +717 # Overload comparisons +718 def __lt__(self, other): +719 return self.value < other +720 +721 def __le__(self, other): +722 return self.value <= other +723 +724 def __gt__(self, other): +725 return self.value > other +726 +727 def __ge__(self, other): +728 return self.value >= other +729 +730 def __eq__(self, other): +731 return (self - other).is_zero() +732 +733 def __ne__(self, other): +734 return not (self - other).is_zero() +735 +736 # Overload math operations +737 def __add__(self, y): +738 if isinstance(y, Obs): +739 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +740 else: +741 if isinstance(y, np.ndarray): +742 return np.array([self + o for o in y]) +743 elif y.__class__.__name__ in ['Corr', 'CObs']: +744 return NotImplemented +745 else: +746 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +747 +748 def __radd__(self, y): +749 return self + y +750 +751 def __mul__(self, y): +752 if isinstance(y, Obs): +753 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +754 else: +755 if isinstance(y, np.ndarray): +756 return np.array([self * o for o in y]) +757 elif isinstance(y, complex): +758 return CObs(self * y.real, self * y.imag) +759 elif y.__class__.__name__ in ['Corr', 'CObs']: +760 return NotImplemented +761 else: +762 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +763 +764 def __rmul__(self, y): +765 return self * y +766 +767 def __sub__(self, y): +768 if isinstance(y, Obs): +769 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +770 else: +771 if isinstance(y, np.ndarray): +772 return np.array([self - o for o in y]) +773 elif y.__class__.__name__ in ['Corr', 'CObs']: +774 return NotImplemented +775 else: +776 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +777 +778 def __rsub__(self, y): +779 return -1 * (self - y) +780 +781 def __pos__(self): +782 return self +783 +784 def __neg__(self): +785 return -1 * self +786 +787 def __truediv__(self, y): +788 if isinstance(y, Obs): +789 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +790 else: +791 if isinstance(y, np.ndarray): +792 return np.array([self / o for o in y]) +793 elif y.__class__.__name__ in ['Corr', 'CObs']: +794 return NotImplemented +795 else: +796 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +797 +798 def __rtruediv__(self, y): +799 if isinstance(y, Obs): +800 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +801 else: +802 if isinstance(y, np.ndarray): +803 return np.array([o / self for o in y]) +804 elif y.__class__.__name__ in ['Corr', 'CObs']: +805 return NotImplemented +806 else: +807 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +808 +809 def __pow__(self, y): +810 if isinstance(y, Obs): +811 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +812 else: +813 return derived_observable(lambda x: x[0] ** y, [self]) +814 +815 def __rpow__(self, y): +816 if isinstance(y, Obs): +817 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +818 else: +819 return derived_observable(lambda x: y ** x[0], [self]) +820 +821 def __abs__(self): +822 return derived_observable(lambda x: anp.abs(x[0]), [self]) +823 +824 # Overload numpy functions +825 def sqrt(self): +826 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +827 +828 def log(self): +829 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +830 +831 def exp(self): +832 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +833 +834 def sin(self): +835 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +836 +837 def cos(self): +838 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +839 +840 def tan(self): +841 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +842 +843 def arcsin(self): +844 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +845 +846 def arccos(self): +847 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +848 +849 def arctan(self): +850 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +851 +852 def sinh(self): +853 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +854 +855 def cosh(self): +856 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +857 +858 def tanh(self): +859 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +860 +861 def arcsinh(self): +862 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +863 +864 def arccosh(self): +865 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +866 +867 def arctanh(self): +868 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -2791,48 +2782,47 @@ this overwrites the standard value for that ensemble. 98 99 self._value = 0 100 self.N = 0 -101 self.is_merged = {} -102 self.idl = {} -103 if idl is not None: -104 for name, idx in sorted(zip(names, idl)): -105 if isinstance(idx, range): -106 self.idl[name] = idx -107 elif isinstance(idx, (list, np.ndarray)): -108 dc = np.unique(np.diff(idx)) -109 if np.any(dc < 0): -110 raise Exception("Unsorted idx for idl[%s]" % (name)) -111 if len(dc) == 1: -112 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) -113 else: -114 self.idl[name] = list(idx) -115 else: -116 raise Exception('incompatible type for idl[%s].' % (name)) -117 else: -118 for name, sample in sorted(zip(names, samples)): -119 self.idl[name] = range(1, len(sample) + 1) -120 -121 if kwargs.get("means") is not None: -122 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): -123 self.shape[name] = len(self.idl[name]) -124 self.N += self.shape[name] -125 self.r_values[name] = mean -126 self.deltas[name] = sample -127 else: -128 for name, sample in sorted(zip(names, samples)): -129 self.shape[name] = len(self.idl[name]) -130 self.N += self.shape[name] -131 if len(sample) != self.shape[name]: -132 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) -133 self.r_values[name] = np.mean(sample) -134 self.deltas[name] = sample - self.r_values[name] -135 self._value += self.shape[name] * self.r_values[name] -136 self._value /= self.N -137 -138 self._dvalue = 0.0 -139 self.ddvalue = 0.0 -140 self.reweighted = False -141 -142 self.tag = None +101 self.idl = {} +102 if idl is not None: +103 for name, idx in sorted(zip(names, idl)): +104 if isinstance(idx, range): +105 self.idl[name] = idx +106 elif isinstance(idx, (list, np.ndarray)): +107 dc = np.unique(np.diff(idx)) +108 if np.any(dc < 0): +109 raise Exception("Unsorted idx for idl[%s]" % (name)) +110 if len(dc) == 1: +111 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) +112 else: +113 self.idl[name] = list(idx) +114 else: +115 raise Exception('incompatible type for idl[%s].' % (name)) +116 else: +117 for name, sample in sorted(zip(names, samples)): +118 self.idl[name] = range(1, len(sample) + 1) +119 +120 if kwargs.get("means") is not None: +121 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): +122 self.shape[name] = len(self.idl[name]) +123 self.N += self.shape[name] +124 self.r_values[name] = mean +125 self.deltas[name] = sample +126 else: +127 for name, sample in sorted(zip(names, samples)): +128 self.shape[name] = len(self.idl[name]) +129 self.N += self.shape[name] +130 if len(sample) != self.shape[name]: +131 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) +132 self.r_values[name] = np.mean(sample) +133 self.deltas[name] = sample - self.r_values[name] +134 self._value += self.shape[name] * self.r_values[name] +135 self._value /= self.N +136 +137 self._dvalue = 0.0 +138 self.ddvalue = 0.0 +139 self.reweighted = False +140 +141 self.tag = None @@ -2863,180 +2853,180 @@ list of ranges or lists on which the samples are defined -
177    def gamma_method(self, **kwargs):
-178        """Estimate the error and related properties of the Obs.
-179
-180        Parameters
-181        ----------
-182        S : float
-183            specifies a custom value for the parameter S (default 2.0).
-184            If set to 0 it is assumed that the data exhibits no
-185            autocorrelation. In this case the error estimates coincides
-186            with the sample standard error.
-187        tau_exp : float
-188            positive value triggers the critical slowing down analysis
-189            (default 0.0).
-190        N_sigma : float
-191            number of standard deviations from zero until the tail is
-192            attached to the autocorrelation function (default 1).
-193        fft : bool
-194            determines whether the fft algorithm is used for the computation
-195            of the autocorrelation function (default True)
-196        """
-197
-198        e_content = self.e_content
-199        self.e_dvalue = {}
-200        self.e_ddvalue = {}
-201        self.e_tauint = {}
-202        self.e_dtauint = {}
-203        self.e_windowsize = {}
-204        self.e_n_tauint = {}
-205        self.e_n_dtauint = {}
-206        e_gamma = {}
-207        self.e_rho = {}
-208        self.e_drho = {}
-209        self._dvalue = 0
-210        self.ddvalue = 0
-211
-212        self.S = {}
-213        self.tau_exp = {}
-214        self.N_sigma = {}
-215
-216        if kwargs.get('fft') is False:
-217            fft = False
-218        else:
-219            fft = True
-220
-221        def _parse_kwarg(kwarg_name):
-222            if kwarg_name in kwargs:
-223                tmp = kwargs.get(kwarg_name)
-224                if isinstance(tmp, (int, float)):
-225                    if tmp < 0:
-226                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
-227                    for e, e_name in enumerate(self.e_names):
-228                        getattr(self, kwarg_name)[e_name] = tmp
-229                else:
-230                    raise TypeError(kwarg_name + ' is not in proper format.')
-231            else:
-232                for e, e_name in enumerate(self.e_names):
-233                    if e_name in getattr(Obs, kwarg_name + '_dict'):
-234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
-235                    else:
-236                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
-237
-238        _parse_kwarg('S')
-239        _parse_kwarg('tau_exp')
-240        _parse_kwarg('N_sigma')
-241
-242        for e, e_name in enumerate(self.mc_names):
-243            r_length = []
-244            for r_name in e_content[e_name]:
-245                if isinstance(self.idl[r_name], range):
-246                    r_length.append(len(self.idl[r_name]))
-247                else:
-248                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
-249
-250            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
-251            w_max = max(r_length) // 2
-252            e_gamma[e_name] = np.zeros(w_max)
-253            self.e_rho[e_name] = np.zeros(w_max)
-254            self.e_drho[e_name] = np.zeros(w_max)
-255
-256            for r_name in e_content[e_name]:
-257                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
-258
-259            gamma_div = np.zeros(w_max)
-260            for r_name in e_content[e_name]:
-261                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
-262            gamma_div[gamma_div < 1] = 1.0
-263            e_gamma[e_name] /= gamma_div[:w_max]
-264
-265            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
-266                self.e_tauint[e_name] = 0.5
-267                self.e_dtauint[e_name] = 0.0
-268                self.e_dvalue[e_name] = 0.0
-269                self.e_ddvalue[e_name] = 0.0
-270                self.e_windowsize[e_name] = 0
-271                continue
-272
-273            gaps = []
-274            for r_name in e_content[e_name]:
-275                if isinstance(self.idl[r_name], range):
-276                    gaps.append(1)
-277                else:
-278                    gaps.append(np.min(np.diff(self.idl[r_name])))
-279
-280            if not np.all([gi == gaps[0] for gi in gaps]):
-281                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
-282            else:
-283                gapsize = gaps[0]
-284
-285            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
-286            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
-287            # Make sure no entry of tauint is smaller than 0.5
-288            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
-289            # hep-lat/0306017 eq. (42)
-290            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
-291            self.e_n_dtauint[e_name][0] = 0.0
-292
-293            def _compute_drho(i):
-294                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
-295                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
-296
-297            _compute_drho(gapsize)
-298            if self.tau_exp[e_name] > 0:
-299                texp = self.tau_exp[e_name]
-300                # Critical slowing down analysis
-301                if w_max // 2 <= 1:
-302                    raise Exception("Need at least 8 samples for tau_exp error analysis")
-303                for n in range(gapsize, w_max // 2, gapsize):
-304                    _compute_drho(n + gapsize)
-305                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
-306                        # Bias correction hep-lat/0306017 eq. (49) included
-307                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
-308                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
-309                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
-310                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-311                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-312                        self.e_windowsize[e_name] = n
-313                        break
-314            else:
-315                if self.S[e_name] == 0.0:
-316                    self.e_tauint[e_name] = 0.5
-317                    self.e_dtauint[e_name] = 0.0
-318                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
-319                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
-320                    self.e_windowsize[e_name] = 0
-321                else:
-322                    # Standard automatic windowing procedure
-323                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
-324                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
-325                    for n in range(1, w_max):
-326                        if n < w_max // 2 - 2:
-327                            _compute_drho(gapsize * n + gapsize)
-328                        if g_w[n - 1] < 0 or n >= w_max - 1:
-329                            n *= gapsize
-330                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
-331                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
-332                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-333                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-334                            self.e_windowsize[e_name] = n
-335                            break
-336
-337            self._dvalue += self.e_dvalue[e_name] ** 2
-338            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
-339
-340        for e_name in self.cov_names:
-341            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
-342            self.e_ddvalue[e_name] = 0
-343            self._dvalue += self.e_dvalue[e_name]**2
-344
-345        self._dvalue = np.sqrt(self._dvalue)
-346        if self._dvalue == 0.0:
-347            self.ddvalue = 0.0
-348        else:
-349            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
-350        return
+            
176    def gamma_method(self, **kwargs):
+177        """Estimate the error and related properties of the Obs.
+178
+179        Parameters
+180        ----------
+181        S : float
+182            specifies a custom value for the parameter S (default 2.0).
+183            If set to 0 it is assumed that the data exhibits no
+184            autocorrelation. In this case the error estimates coincides
+185            with the sample standard error.
+186        tau_exp : float
+187            positive value triggers the critical slowing down analysis
+188            (default 0.0).
+189        N_sigma : float
+190            number of standard deviations from zero until the tail is
+191            attached to the autocorrelation function (default 1).
+192        fft : bool
+193            determines whether the fft algorithm is used for the computation
+194            of the autocorrelation function (default True)
+195        """
+196
+197        e_content = self.e_content
+198        self.e_dvalue = {}
+199        self.e_ddvalue = {}
+200        self.e_tauint = {}
+201        self.e_dtauint = {}
+202        self.e_windowsize = {}
+203        self.e_n_tauint = {}
+204        self.e_n_dtauint = {}
+205        e_gamma = {}
+206        self.e_rho = {}
+207        self.e_drho = {}
+208        self._dvalue = 0
+209        self.ddvalue = 0
+210
+211        self.S = {}
+212        self.tau_exp = {}
+213        self.N_sigma = {}
+214
+215        if kwargs.get('fft') is False:
+216            fft = False
+217        else:
+218            fft = True
+219
+220        def _parse_kwarg(kwarg_name):
+221            if kwarg_name in kwargs:
+222                tmp = kwargs.get(kwarg_name)
+223                if isinstance(tmp, (int, float)):
+224                    if tmp < 0:
+225                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
+226                    for e, e_name in enumerate(self.e_names):
+227                        getattr(self, kwarg_name)[e_name] = tmp
+228                else:
+229                    raise TypeError(kwarg_name + ' is not in proper format.')
+230            else:
+231                for e, e_name in enumerate(self.e_names):
+232                    if e_name in getattr(Obs, kwarg_name + '_dict'):
+233                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
+234                    else:
+235                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
+236
+237        _parse_kwarg('S')
+238        _parse_kwarg('tau_exp')
+239        _parse_kwarg('N_sigma')
+240
+241        for e, e_name in enumerate(self.mc_names):
+242            r_length = []
+243            for r_name in e_content[e_name]:
+244                if isinstance(self.idl[r_name], range):
+245                    r_length.append(len(self.idl[r_name]))
+246                else:
+247                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+248
+249            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+250            w_max = max(r_length) // 2
+251            e_gamma[e_name] = np.zeros(w_max)
+252            self.e_rho[e_name] = np.zeros(w_max)
+253            self.e_drho[e_name] = np.zeros(w_max)
+254
+255            for r_name in e_content[e_name]:
+256                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+257
+258            gamma_div = np.zeros(w_max)
+259            for r_name in e_content[e_name]:
+260                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+261            gamma_div[gamma_div < 1] = 1.0
+262            e_gamma[e_name] /= gamma_div[:w_max]
+263
+264            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+265                self.e_tauint[e_name] = 0.5
+266                self.e_dtauint[e_name] = 0.0
+267                self.e_dvalue[e_name] = 0.0
+268                self.e_ddvalue[e_name] = 0.0
+269                self.e_windowsize[e_name] = 0
+270                continue
+271
+272            gaps = []
+273            for r_name in e_content[e_name]:
+274                if isinstance(self.idl[r_name], range):
+275                    gaps.append(1)
+276                else:
+277                    gaps.append(np.min(np.diff(self.idl[r_name])))
+278
+279            if not np.all([gi == gaps[0] for gi in gaps]):
+280                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
+281            else:
+282                gapsize = gaps[0]
+283
+284            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+285            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+286            # Make sure no entry of tauint is smaller than 0.5
+287            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+288            # hep-lat/0306017 eq. (42)
+289            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
+290            self.e_n_dtauint[e_name][0] = 0.0
+291
+292            def _compute_drho(i):
+293                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+294                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+295
+296            _compute_drho(gapsize)
+297            if self.tau_exp[e_name] > 0:
+298                texp = self.tau_exp[e_name]
+299                # Critical slowing down analysis
+300                if w_max // 2 <= 1:
+301                    raise Exception("Need at least 8 samples for tau_exp error analysis")
+302                for n in range(gapsize, w_max // 2, gapsize):
+303                    _compute_drho(n + gapsize)
+304                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+305                        # Bias correction hep-lat/0306017 eq. (49) included
+306                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+307                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+308                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+309                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+310                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+311                        self.e_windowsize[e_name] = n
+312                        break
+313            else:
+314                if self.S[e_name] == 0.0:
+315                    self.e_tauint[e_name] = 0.5
+316                    self.e_dtauint[e_name] = 0.0
+317                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
+318                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
+319                    self.e_windowsize[e_name] = 0
+320                else:
+321                    # Standard automatic windowing procedure
+322                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
+323                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
+324                    for n in range(1, w_max):
+325                        if n < w_max // 2 - 2:
+326                            _compute_drho(gapsize * n + gapsize)
+327                        if g_w[n - 1] < 0 or n >= w_max - 1:
+328                            n *= gapsize
+329                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+330                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+331                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+332                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+333                            self.e_windowsize[e_name] = n
+334                            break
+335
+336            self._dvalue += self.e_dvalue[e_name] ** 2
+337            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+338
+339        for e_name in self.cov_names:
+340            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
+341            self.e_ddvalue[e_name] = 0
+342            self._dvalue += self.e_dvalue[e_name]**2
+343
+344        self._dvalue = np.sqrt(self._dvalue)
+345        if self._dvalue == 0.0:
+346            self.ddvalue = 0.0
+347        else:
+348            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
+349        return
 
@@ -3075,180 +3065,180 @@ of the autocorrelation function (default True)
-
177    def gamma_method(self, **kwargs):
-178        """Estimate the error and related properties of the Obs.
-179
-180        Parameters
-181        ----------
-182        S : float
-183            specifies a custom value for the parameter S (default 2.0).
-184            If set to 0 it is assumed that the data exhibits no
-185            autocorrelation. In this case the error estimates coincides
-186            with the sample standard error.
-187        tau_exp : float
-188            positive value triggers the critical slowing down analysis
-189            (default 0.0).
-190        N_sigma : float
-191            number of standard deviations from zero until the tail is
-192            attached to the autocorrelation function (default 1).
-193        fft : bool
-194            determines whether the fft algorithm is used for the computation
-195            of the autocorrelation function (default True)
-196        """
-197
-198        e_content = self.e_content
-199        self.e_dvalue = {}
-200        self.e_ddvalue = {}
-201        self.e_tauint = {}
-202        self.e_dtauint = {}
-203        self.e_windowsize = {}
-204        self.e_n_tauint = {}
-205        self.e_n_dtauint = {}
-206        e_gamma = {}
-207        self.e_rho = {}
-208        self.e_drho = {}
-209        self._dvalue = 0
-210        self.ddvalue = 0
-211
-212        self.S = {}
-213        self.tau_exp = {}
-214        self.N_sigma = {}
-215
-216        if kwargs.get('fft') is False:
-217            fft = False
-218        else:
-219            fft = True
-220
-221        def _parse_kwarg(kwarg_name):
-222            if kwarg_name in kwargs:
-223                tmp = kwargs.get(kwarg_name)
-224                if isinstance(tmp, (int, float)):
-225                    if tmp < 0:
-226                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
-227                    for e, e_name in enumerate(self.e_names):
-228                        getattr(self, kwarg_name)[e_name] = tmp
-229                else:
-230                    raise TypeError(kwarg_name + ' is not in proper format.')
-231            else:
-232                for e, e_name in enumerate(self.e_names):
-233                    if e_name in getattr(Obs, kwarg_name + '_dict'):
-234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
-235                    else:
-236                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
-237
-238        _parse_kwarg('S')
-239        _parse_kwarg('tau_exp')
-240        _parse_kwarg('N_sigma')
-241
-242        for e, e_name in enumerate(self.mc_names):
-243            r_length = []
-244            for r_name in e_content[e_name]:
-245                if isinstance(self.idl[r_name], range):
-246                    r_length.append(len(self.idl[r_name]))
-247                else:
-248                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
-249
-250            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
-251            w_max = max(r_length) // 2
-252            e_gamma[e_name] = np.zeros(w_max)
-253            self.e_rho[e_name] = np.zeros(w_max)
-254            self.e_drho[e_name] = np.zeros(w_max)
-255
-256            for r_name in e_content[e_name]:
-257                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
-258
-259            gamma_div = np.zeros(w_max)
-260            for r_name in e_content[e_name]:
-261                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
-262            gamma_div[gamma_div < 1] = 1.0
-263            e_gamma[e_name] /= gamma_div[:w_max]
-264
-265            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
-266                self.e_tauint[e_name] = 0.5
-267                self.e_dtauint[e_name] = 0.0
-268                self.e_dvalue[e_name] = 0.0
-269                self.e_ddvalue[e_name] = 0.0
-270                self.e_windowsize[e_name] = 0
-271                continue
-272
-273            gaps = []
-274            for r_name in e_content[e_name]:
-275                if isinstance(self.idl[r_name], range):
-276                    gaps.append(1)
-277                else:
-278                    gaps.append(np.min(np.diff(self.idl[r_name])))
-279
-280            if not np.all([gi == gaps[0] for gi in gaps]):
-281                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
-282            else:
-283                gapsize = gaps[0]
-284
-285            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
-286            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
-287            # Make sure no entry of tauint is smaller than 0.5
-288            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
-289            # hep-lat/0306017 eq. (42)
-290            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
-291            self.e_n_dtauint[e_name][0] = 0.0
-292
-293            def _compute_drho(i):
-294                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
-295                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
-296
-297            _compute_drho(gapsize)
-298            if self.tau_exp[e_name] > 0:
-299                texp = self.tau_exp[e_name]
-300                # Critical slowing down analysis
-301                if w_max // 2 <= 1:
-302                    raise Exception("Need at least 8 samples for tau_exp error analysis")
-303                for n in range(gapsize, w_max // 2, gapsize):
-304                    _compute_drho(n + gapsize)
-305                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
-306                        # Bias correction hep-lat/0306017 eq. (49) included
-307                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
-308                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
-309                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
-310                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-311                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-312                        self.e_windowsize[e_name] = n
-313                        break
-314            else:
-315                if self.S[e_name] == 0.0:
-316                    self.e_tauint[e_name] = 0.5
-317                    self.e_dtauint[e_name] = 0.0
-318                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
-319                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
-320                    self.e_windowsize[e_name] = 0
-321                else:
-322                    # Standard automatic windowing procedure
-323                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
-324                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
-325                    for n in range(1, w_max):
-326                        if n < w_max // 2 - 2:
-327                            _compute_drho(gapsize * n + gapsize)
-328                        if g_w[n - 1] < 0 or n >= w_max - 1:
-329                            n *= gapsize
-330                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
-331                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
-332                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-333                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-334                            self.e_windowsize[e_name] = n
-335                            break
-336
-337            self._dvalue += self.e_dvalue[e_name] ** 2
-338            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
-339
-340        for e_name in self.cov_names:
-341            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
-342            self.e_ddvalue[e_name] = 0
-343            self._dvalue += self.e_dvalue[e_name]**2
-344
-345        self._dvalue = np.sqrt(self._dvalue)
-346        if self._dvalue == 0.0:
-347            self.ddvalue = 0.0
-348        else:
-349            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
-350        return
+            
176    def gamma_method(self, **kwargs):
+177        """Estimate the error and related properties of the Obs.
+178
+179        Parameters
+180        ----------
+181        S : float
+182            specifies a custom value for the parameter S (default 2.0).
+183            If set to 0 it is assumed that the data exhibits no
+184            autocorrelation. In this case the error estimates coincides
+185            with the sample standard error.
+186        tau_exp : float
+187            positive value triggers the critical slowing down analysis
+188            (default 0.0).
+189        N_sigma : float
+190            number of standard deviations from zero until the tail is
+191            attached to the autocorrelation function (default 1).
+192        fft : bool
+193            determines whether the fft algorithm is used for the computation
+194            of the autocorrelation function (default True)
+195        """
+196
+197        e_content = self.e_content
+198        self.e_dvalue = {}
+199        self.e_ddvalue = {}
+200        self.e_tauint = {}
+201        self.e_dtauint = {}
+202        self.e_windowsize = {}
+203        self.e_n_tauint = {}
+204        self.e_n_dtauint = {}
+205        e_gamma = {}
+206        self.e_rho = {}
+207        self.e_drho = {}
+208        self._dvalue = 0
+209        self.ddvalue = 0
+210
+211        self.S = {}
+212        self.tau_exp = {}
+213        self.N_sigma = {}
+214
+215        if kwargs.get('fft') is False:
+216            fft = False
+217        else:
+218            fft = True
+219
+220        def _parse_kwarg(kwarg_name):
+221            if kwarg_name in kwargs:
+222                tmp = kwargs.get(kwarg_name)
+223                if isinstance(tmp, (int, float)):
+224                    if tmp < 0:
+225                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
+226                    for e, e_name in enumerate(self.e_names):
+227                        getattr(self, kwarg_name)[e_name] = tmp
+228                else:
+229                    raise TypeError(kwarg_name + ' is not in proper format.')
+230            else:
+231                for e, e_name in enumerate(self.e_names):
+232                    if e_name in getattr(Obs, kwarg_name + '_dict'):
+233                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
+234                    else:
+235                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
+236
+237        _parse_kwarg('S')
+238        _parse_kwarg('tau_exp')
+239        _parse_kwarg('N_sigma')
+240
+241        for e, e_name in enumerate(self.mc_names):
+242            r_length = []
+243            for r_name in e_content[e_name]:
+244                if isinstance(self.idl[r_name], range):
+245                    r_length.append(len(self.idl[r_name]))
+246                else:
+247                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+248
+249            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+250            w_max = max(r_length) // 2
+251            e_gamma[e_name] = np.zeros(w_max)
+252            self.e_rho[e_name] = np.zeros(w_max)
+253            self.e_drho[e_name] = np.zeros(w_max)
+254
+255            for r_name in e_content[e_name]:
+256                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+257
+258            gamma_div = np.zeros(w_max)
+259            for r_name in e_content[e_name]:
+260                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+261            gamma_div[gamma_div < 1] = 1.0
+262            e_gamma[e_name] /= gamma_div[:w_max]
+263
+264            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+265                self.e_tauint[e_name] = 0.5
+266                self.e_dtauint[e_name] = 0.0
+267                self.e_dvalue[e_name] = 0.0
+268                self.e_ddvalue[e_name] = 0.0
+269                self.e_windowsize[e_name] = 0
+270                continue
+271
+272            gaps = []
+273            for r_name in e_content[e_name]:
+274                if isinstance(self.idl[r_name], range):
+275                    gaps.append(1)
+276                else:
+277                    gaps.append(np.min(np.diff(self.idl[r_name])))
+278
+279            if not np.all([gi == gaps[0] for gi in gaps]):
+280                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
+281            else:
+282                gapsize = gaps[0]
+283
+284            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+285            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+286            # Make sure no entry of tauint is smaller than 0.5
+287            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+288            # hep-lat/0306017 eq. (42)
+289            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
+290            self.e_n_dtauint[e_name][0] = 0.0
+291
+292            def _compute_drho(i):
+293                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+294                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+295
+296            _compute_drho(gapsize)
+297            if self.tau_exp[e_name] > 0:
+298                texp = self.tau_exp[e_name]
+299                # Critical slowing down analysis
+300                if w_max // 2 <= 1:
+301                    raise Exception("Need at least 8 samples for tau_exp error analysis")
+302                for n in range(gapsize, w_max // 2, gapsize):
+303                    _compute_drho(n + gapsize)
+304                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+305                        # Bias correction hep-lat/0306017 eq. (49) included
+306                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+307                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+308                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+309                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+310                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+311                        self.e_windowsize[e_name] = n
+312                        break
+313            else:
+314                if self.S[e_name] == 0.0:
+315                    self.e_tauint[e_name] = 0.5
+316                    self.e_dtauint[e_name] = 0.0
+317                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
+318                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
+319                    self.e_windowsize[e_name] = 0
+320                else:
+321                    # Standard automatic windowing procedure
+322                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
+323                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
+324                    for n in range(1, w_max):
+325                        if n < w_max // 2 - 2:
+326                            _compute_drho(gapsize * n + gapsize)
+327                        if g_w[n - 1] < 0 or n >= w_max - 1:
+328                            n *= gapsize
+329                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+330                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+331                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+332                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+333                            self.e_windowsize[e_name] = n
+334                            break
+335
+336            self._dvalue += self.e_dvalue[e_name] ** 2
+337            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+338
+339        for e_name in self.cov_names:
+340            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
+341            self.e_ddvalue[e_name] = 0
+342            self._dvalue += self.e_dvalue[e_name]**2
+343
+344        self._dvalue = np.sqrt(self._dvalue)
+345        if self._dvalue == 0.0:
+346            self.ddvalue = 0.0
+347        else:
+348            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
+349        return
 
@@ -3287,77 +3277,77 @@ of the autocorrelation function (default True)
-
387    def details(self, ens_content=True):
-388        """Output detailed properties of the Obs.
-389
-390        Parameters
-391        ----------
-392        ens_content : bool
-393            print details about the ensembles and replica if true.
-394        """
-395        if self.tag is not None:
-396            print("Description:", self.tag)
-397        if not hasattr(self, 'e_dvalue'):
-398            print('Result\t %3.8e' % (self.value))
-399        else:
-400            if self.value == 0.0:
-401                percentage = np.nan
-402            else:
-403                percentage = np.abs(self._dvalue / self.value) * 100
-404            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
-405            if len(self.e_names) > 1:
-406                print(' Ensemble errors:')
-407            e_content = self.e_content
-408            for e_name in self.mc_names:
-409                if isinstance(self.idl[e_content[e_name][0]], range):
-410                    gap = self.idl[e_content[e_name][0]].step
-411                else:
-412                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
-413
-414                if len(self.e_names) > 1:
-415                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
-416                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
-417                tau_string += f" in units of {gap} config"
-418                if gap > 1:
-419                    tau_string += "s"
-420                if self.tau_exp[e_name] > 0:
-421                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
-422                else:
-423                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
-424                print(tau_string)
-425            for e_name in self.cov_names:
-426                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
-427        if ens_content is True:
-428            if len(self.e_names) == 1:
-429                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
-430            else:
-431                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
-432            my_string_list = []
-433            for key, value in sorted(self.e_content.items()):
-434                if key not in self.covobs:
-435                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
-436                    if len(value) == 1:
-437                        my_string += f': {self.shape[value[0]]} configurations'
-438                        if isinstance(self.idl[value[0]], range):
-439                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
-440                        else:
-441                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
-442                    else:
-443                        sublist = []
-444                        for v in value:
-445                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
-446                            my_substring += f': {self.shape[v]} configurations'
-447                            if isinstance(self.idl[v], range):
-448                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
-449                            else:
-450                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
-451                            sublist.append(my_substring)
-452
-453                        my_string += '\n' + '\n'.join(sublist)
-454                else:
-455                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
-456                my_string_list.append(my_string)
-457            print('\n'.join(my_string_list))
+            
386    def details(self, ens_content=True):
+387        """Output detailed properties of the Obs.
+388
+389        Parameters
+390        ----------
+391        ens_content : bool
+392            print details about the ensembles and replica if true.
+393        """
+394        if self.tag is not None:
+395            print("Description:", self.tag)
+396        if not hasattr(self, 'e_dvalue'):
+397            print('Result\t %3.8e' % (self.value))
+398        else:
+399            if self.value == 0.0:
+400                percentage = np.nan
+401            else:
+402                percentage = np.abs(self._dvalue / self.value) * 100
+403            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
+404            if len(self.e_names) > 1:
+405                print(' Ensemble errors:')
+406            e_content = self.e_content
+407            for e_name in self.mc_names:
+408                if isinstance(self.idl[e_content[e_name][0]], range):
+409                    gap = self.idl[e_content[e_name][0]].step
+410                else:
+411                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
+412
+413                if len(self.e_names) > 1:
+414                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+415                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
+416                tau_string += f" in units of {gap} config"
+417                if gap > 1:
+418                    tau_string += "s"
+419                if self.tau_exp[e_name] > 0:
+420                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
+421                else:
+422                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
+423                print(tau_string)
+424            for e_name in self.cov_names:
+425                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
+426        if ens_content is True:
+427            if len(self.e_names) == 1:
+428                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+429            else:
+430                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+431            my_string_list = []
+432            for key, value in sorted(self.e_content.items()):
+433                if key not in self.covobs:
+434                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
+435                    if len(value) == 1:
+436                        my_string += f': {self.shape[value[0]]} configurations'
+437                        if isinstance(self.idl[value[0]], range):
+438                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
+439                        else:
+440                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
+441                    else:
+442                        sublist = []
+443                        for v in value:
+444                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
+445                            my_substring += f': {self.shape[v]} configurations'
+446                            if isinstance(self.idl[v], range):
+447                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
+448                            else:
+449                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
+450                            sublist.append(my_substring)
+451
+452                        my_string += '\n' + '\n'.join(sublist)
+453                else:
+454                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
+455                my_string_list.append(my_string)
+456            print('\n'.join(my_string_list))
 
@@ -3384,20 +3374,20 @@ print details about the ensembles and replica if true.
-
459    def reweight(self, weight):
-460        """Reweight the obs with given rewighting factors.
-461
-462        Parameters
-463        ----------
-464        weight : Obs
-465            Reweighting factor. An Observable that has to be defined on a superset of the
-466            configurations in obs[i].idl for all i.
-467        all_configs : bool
-468            if True, the reweighted observables are normalized by the average of
-469            the reweighting factor on all configurations in weight.idl and not
-470            on the configurations in obs[i].idl. Default False.
-471        """
-472        return reweight(weight, [self])[0]
+            
458    def reweight(self, weight):
+459        """Reweight the obs with given rewighting factors.
+460
+461        Parameters
+462        ----------
+463        weight : Obs
+464            Reweighting factor. An Observable that has to be defined on a superset of the
+465            configurations in obs[i].idl for all i.
+466        all_configs : bool
+467            if True, the reweighted observables are normalized by the average of
+468            the reweighting factor on all configurations in weight.idl and not
+469            on the configurations in obs[i].idl. Default False.
+470        """
+471        return reweight(weight, [self])[0]
 
@@ -3429,17 +3419,17 @@ on the configurations in obs[i].idl. Default False.
-
474    def is_zero_within_error(self, sigma=1):
-475        """Checks whether the observable is zero within 'sigma' standard errors.
-476
-477        Parameters
-478        ----------
-479        sigma : int
-480            Number of standard errors used for the check.
-481
-482        Works only properly when the gamma method was run.
-483        """
-484        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
+            
473    def is_zero_within_error(self, sigma=1):
+474        """Checks whether the observable is zero within 'sigma' standard errors.
+475
+476        Parameters
+477        ----------
+478        sigma : int
+479            Number of standard errors used for the check.
+480
+481        Works only properly when the gamma method was run.
+482        """
+483        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 
@@ -3467,15 +3457,15 @@ Number of standard errors used for the check.
-
486    def is_zero(self, atol=1e-10):
-487        """Checks whether the observable is zero within a given tolerance.
-488
-489        Parameters
-490        ----------
-491        atol : float
-492            Absolute tolerance (for details see numpy documentation).
-493        """
-494        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
+            
485    def is_zero(self, atol=1e-10):
+486        """Checks whether the observable is zero within a given tolerance.
+487
+488        Parameters
+489        ----------
+490        atol : float
+491            Absolute tolerance (for details see numpy documentation).
+492        """
+493        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 
@@ -3502,45 +3492,45 @@ Absolute tolerance (for details see numpy documentation).
-
496    def plot_tauint(self, save=None):
-497        """Plot integrated autocorrelation time for each ensemble.
-498
-499        Parameters
-500        ----------
-501        save : str
-502            saves the figure to a file named 'save' if.
-503        """
-504        if not hasattr(self, 'e_dvalue'):
-505            raise Exception('Run the gamma method first.')
-506
-507        for e, e_name in enumerate(self.mc_names):
-508            fig = plt.figure()
-509            plt.xlabel(r'$W$')
-510            plt.ylabel(r'$\tau_\mathrm{int}$')
-511            length = int(len(self.e_n_tauint[e_name]))
-512            if self.tau_exp[e_name] > 0:
-513                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-514                x_help = np.arange(2 * self.tau_exp[e_name])
-515                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-516                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-517                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-518                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-519                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-520                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-521                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-522            else:
-523                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-524                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-525
-526            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-527            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-528            plt.legend()
-529            plt.xlim(-0.5, xmax)
-530            ylim = plt.ylim()
-531            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-532            plt.draw()
-533            if save:
-534                fig.savefig(save + "_" + str(e))
+            
495    def plot_tauint(self, save=None):
+496        """Plot integrated autocorrelation time for each ensemble.
+497
+498        Parameters
+499        ----------
+500        save : str
+501            saves the figure to a file named 'save' if.
+502        """
+503        if not hasattr(self, 'e_dvalue'):
+504            raise Exception('Run the gamma method first.')
+505
+506        for e, e_name in enumerate(self.mc_names):
+507            fig = plt.figure()
+508            plt.xlabel(r'$W$')
+509            plt.ylabel(r'$\tau_\mathrm{int}$')
+510            length = int(len(self.e_n_tauint[e_name]))
+511            if self.tau_exp[e_name] > 0:
+512                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+513                x_help = np.arange(2 * self.tau_exp[e_name])
+514                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+515                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+516                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+517                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+518                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+519                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+520                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+521            else:
+522                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+523                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+524
+525            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+526            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+527            plt.legend()
+528            plt.xlim(-0.5, xmax)
+529            ylim = plt.ylim()
+530            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+531            plt.draw()
+532            if save:
+533                fig.savefig(save + "_" + str(e))
 
@@ -3567,36 +3557,36 @@ saves the figure to a file named 'save' if.
-
536    def plot_rho(self, save=None):
-537        """Plot normalized autocorrelation function time for each ensemble.
-538
-539        Parameters
-540        ----------
-541        save : str
-542            saves the figure to a file named 'save' if.
-543        """
-544        if not hasattr(self, 'e_dvalue'):
-545            raise Exception('Run the gamma method first.')
-546        for e, e_name in enumerate(self.mc_names):
-547            fig = plt.figure()
-548            plt.xlabel('W')
-549            plt.ylabel('rho')
-550            length = int(len(self.e_drho[e_name]))
-551            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-552            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-553            if self.tau_exp[e_name] > 0:
-554                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-555                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-556                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-557                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-558            else:
-559                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-560                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-561            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-562            plt.xlim(-0.5, xmax)
-563            plt.draw()
-564            if save:
-565                fig.savefig(save + "_" + str(e))
+            
535    def plot_rho(self, save=None):
+536        """Plot normalized autocorrelation function time for each ensemble.
+537
+538        Parameters
+539        ----------
+540        save : str
+541            saves the figure to a file named 'save' if.
+542        """
+543        if not hasattr(self, 'e_dvalue'):
+544            raise Exception('Run the gamma method first.')
+545        for e, e_name in enumerate(self.mc_names):
+546            fig = plt.figure()
+547            plt.xlabel('W')
+548            plt.ylabel('rho')
+549            length = int(len(self.e_drho[e_name]))
+550            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+551            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+552            if self.tau_exp[e_name] > 0:
+553                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+554                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+555                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+556                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+557            else:
+558                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+559                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+560            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+561            plt.xlim(-0.5, xmax)
+562            plt.draw()
+563            if save:
+564                fig.savefig(save + "_" + str(e))
 
@@ -3623,27 +3613,27 @@ saves the figure to a file named 'save' if.
-
567    def plot_rep_dist(self):
-568        """Plot replica distribution for each ensemble with more than one replicum."""
-569        if not hasattr(self, 'e_dvalue'):
-570            raise Exception('Run the gamma method first.')
-571        for e, e_name in enumerate(self.mc_names):
-572            if len(self.e_content[e_name]) == 1:
-573                print('No replica distribution for a single replicum (', e_name, ')')
-574                continue
-575            r_length = []
-576            sub_r_mean = 0
-577            for r, r_name in enumerate(self.e_content[e_name]):
-578                r_length.append(len(self.deltas[r_name]))
-579                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-580            e_N = np.sum(r_length)
-581            sub_r_mean /= e_N
-582            arr = np.zeros(len(self.e_content[e_name]))
-583            for r, r_name in enumerate(self.e_content[e_name]):
-584                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-585            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-586            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-587            plt.draw()
+            
566    def plot_rep_dist(self):
+567        """Plot replica distribution for each ensemble with more than one replicum."""
+568        if not hasattr(self, 'e_dvalue'):
+569            raise Exception('Run the gamma method first.')
+570        for e, e_name in enumerate(self.mc_names):
+571            if len(self.e_content[e_name]) == 1:
+572                print('No replica distribution for a single replicum (', e_name, ')')
+573                continue
+574            r_length = []
+575            sub_r_mean = 0
+576            for r, r_name in enumerate(self.e_content[e_name]):
+577                r_length.append(len(self.deltas[r_name]))
+578                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+579            e_N = np.sum(r_length)
+580            sub_r_mean /= e_N
+581            arr = np.zeros(len(self.e_content[e_name]))
+582            for r, r_name in enumerate(self.e_content[e_name]):
+583                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+584            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+585            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+586            plt.draw()
 
@@ -3663,37 +3653,37 @@ saves the figure to a file named 'save' if.
-
589    def plot_history(self, expand=True):
-590        """Plot derived Monte Carlo history for each ensemble
-591
-592        Parameters
-593        ----------
-594        expand : bool
-595            show expanded history for irregular Monte Carlo chains (default: True).
-596        """
-597        for e, e_name in enumerate(self.mc_names):
-598            plt.figure()
-599            r_length = []
-600            tmp = []
-601            tmp_expanded = []
-602            for r, r_name in enumerate(self.e_content[e_name]):
-603                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-604                if expand:
-605                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
-606                    r_length.append(len(tmp_expanded[-1]))
-607                else:
-608                    r_length.append(len(tmp[-1]))
-609            e_N = np.sum(r_length)
-610            x = np.arange(e_N)
-611            y_test = np.concatenate(tmp, axis=0)
-612            if expand:
-613                y = np.concatenate(tmp_expanded, axis=0)
-614            else:
-615                y = y_test
-616            plt.errorbar(x, y, fmt='.', markersize=3)
-617            plt.xlim(-0.5, e_N - 0.5)
-618            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-619            plt.draw()
+            
588    def plot_history(self, expand=True):
+589        """Plot derived Monte Carlo history for each ensemble
+590
+591        Parameters
+592        ----------
+593        expand : bool
+594            show expanded history for irregular Monte Carlo chains (default: True).
+595        """
+596        for e, e_name in enumerate(self.mc_names):
+597            plt.figure()
+598            r_length = []
+599            tmp = []
+600            tmp_expanded = []
+601            for r, r_name in enumerate(self.e_content[e_name]):
+602                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+603                if expand:
+604                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
+605                    r_length.append(len(tmp_expanded[-1]))
+606                else:
+607                    r_length.append(len(tmp[-1]))
+608            e_N = np.sum(r_length)
+609            x = np.arange(e_N)
+610            y_test = np.concatenate(tmp, axis=0)
+611            if expand:
+612                y = np.concatenate(tmp_expanded, axis=0)
+613            else:
+614                y = y_test
+615            plt.errorbar(x, y, fmt='.', markersize=3)
+616            plt.xlim(-0.5, e_N - 0.5)
+617            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+618            plt.draw()
 
@@ -3720,29 +3710,29 @@ show expanded history for irregular Monte Carlo chains (default: True).
-
621    def plot_piechart(self, save=None):
-622        """Plot piechart which shows the fractional contribution of each
-623        ensemble to the error and returns a dictionary containing the fractions.
-624
-625        Parameters
-626        ----------
-627        save : str
-628            saves the figure to a file named 'save' if.
-629        """
-630        if not hasattr(self, 'e_dvalue'):
-631            raise Exception('Run the gamma method first.')
-632        if np.isclose(0.0, self._dvalue, atol=1e-15):
-633            raise Exception('Error is 0.0')
-634        labels = self.e_names
-635        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-636        fig1, ax1 = plt.subplots()
-637        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-638        ax1.axis('equal')
-639        plt.draw()
-640        if save:
-641            fig1.savefig(save)
-642
-643        return dict(zip(self.e_names, sizes))
+            
620    def plot_piechart(self, save=None):
+621        """Plot piechart which shows the fractional contribution of each
+622        ensemble to the error and returns a dictionary containing the fractions.
+623
+624        Parameters
+625        ----------
+626        save : str
+627            saves the figure to a file named 'save' if.
+628        """
+629        if not hasattr(self, 'e_dvalue'):
+630            raise Exception('Run the gamma method first.')
+631        if np.isclose(0.0, self._dvalue, atol=1e-15):
+632            raise Exception('Error is 0.0')
+633        labels = self.e_names
+634        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+635        fig1, ax1 = plt.subplots()
+636        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+637        ax1.axis('equal')
+638        plt.draw()
+639        if save:
+640            fig1.savefig(save)
+641
+642        return dict(zip(self.e_names, sizes))
 
@@ -3770,34 +3760,34 @@ saves the figure to a file named 'save' if.
-
645    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-646        """Dump the Obs to a file 'name' of chosen format.
-647
-648        Parameters
-649        ----------
-650        filename : str
-651            name of the file to be saved.
-652        datatype : str
-653            Format of the exported file. Supported formats include
-654            "json.gz" and "pickle"
-655        description : str
-656            Description for output file, only relevant for json.gz format.
-657        path : str
-658            specifies a custom path for the file (default '.')
-659        """
-660        if 'path' in kwargs:
-661            file_name = kwargs.get('path') + '/' + filename
-662        else:
-663            file_name = filename
-664
-665        if datatype == "json.gz":
-666            from .input.json import dump_to_json
-667            dump_to_json([self], file_name, description=description)
-668        elif datatype == "pickle":
-669            with open(file_name + '.p', 'wb') as fb:
-670                pickle.dump(self, fb)
-671        else:
-672            raise Exception("Unknown datatype " + str(datatype))
+            
644    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+645        """Dump the Obs to a file 'name' of chosen format.
+646
+647        Parameters
+648        ----------
+649        filename : str
+650            name of the file to be saved.
+651        datatype : str
+652            Format of the exported file. Supported formats include
+653            "json.gz" and "pickle"
+654        description : str
+655            Description for output file, only relevant for json.gz format.
+656        path : str
+657            specifies a custom path for the file (default '.')
+658        """
+659        if 'path' in kwargs:
+660            file_name = kwargs.get('path') + '/' + filename
+661        else:
+662            file_name = filename
+663
+664        if datatype == "json.gz":
+665            from .input.json import dump_to_json
+666            dump_to_json([self], file_name, description=description)
+667        elif datatype == "pickle":
+668            with open(file_name + '.p', 'wb') as fb:
+669                pickle.dump(self, fb)
+670        else:
+671            raise Exception("Unknown datatype " + str(datatype))
 
@@ -3831,31 +3821,31 @@ specifies a custom path for the file (default '.')
-
674    def export_jackknife(self):
-675        """Export jackknife samples from the Obs
-676
-677        Returns
-678        -------
-679        numpy.ndarray
-680            Returns a numpy array of length N + 1 where N is the number of samples
-681            for the given ensemble and replicum. The zeroth entry of the array contains
-682            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-683            derived from the Obs. The current implementation only works for observables
-684            defined on exactly one ensemble and replicum. The derived jackknife samples
-685            should agree with samples from a full jackknife analysis up to O(1/N).
-686        """
-687
-688        if len(self.names) != 1:
-689            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
-690
-691        name = self.names[0]
-692        full_data = self.deltas[name] + self.r_values[name]
-693        n = full_data.size
-694        mean = self.value
-695        tmp_jacks = np.zeros(n + 1)
-696        tmp_jacks[0] = mean
-697        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-698        return tmp_jacks
+            
673    def export_jackknife(self):
+674        """Export jackknife samples from the Obs
+675
+676        Returns
+677        -------
+678        numpy.ndarray
+679            Returns a numpy array of length N + 1 where N is the number of samples
+680            for the given ensemble and replicum. The zeroth entry of the array contains
+681            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+682            derived from the Obs. The current implementation only works for observables
+683            defined on exactly one ensemble and replicum. The derived jackknife samples
+684            should agree with samples from a full jackknife analysis up to O(1/N).
+685        """
+686
+687        if len(self.names) != 1:
+688            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+689
+690        name = self.names[0]
+691        full_data = self.deltas[name] + self.r_values[name]
+692        n = full_data.size
+693        mean = self.value
+694        tmp_jacks = np.zeros(n + 1)
+695        tmp_jacks[0] = mean
+696        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+697        return tmp_jacks
 
@@ -3886,8 +3876,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
826    def sqrt(self):
-827        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
825    def sqrt(self):
+826        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3905,8 +3895,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
829    def log(self):
-830        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
828    def log(self):
+829        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3924,8 +3914,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
832    def exp(self):
-833        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
831    def exp(self):
+832        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3943,8 +3933,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
835    def sin(self):
-836        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
834    def sin(self):
+835        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3962,8 +3952,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
838    def cos(self):
-839        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
837    def cos(self):
+838        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3981,8 +3971,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
841    def tan(self):
-842        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
840    def tan(self):
+841        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -4000,8 +3990,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
844    def arcsin(self):
-845        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
843    def arcsin(self):
+844        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -4019,8 +4009,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
847    def arccos(self):
-848        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
846    def arccos(self):
+847        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4038,8 +4028,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
850    def arctan(self):
-851        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
849    def arctan(self):
+850        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4057,8 +4047,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
853    def sinh(self):
-854        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
852    def sinh(self):
+853        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4076,8 +4066,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
856    def cosh(self):
-857        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
855    def cosh(self):
+856        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4095,8 +4085,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
859    def tanh(self):
-860        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
858    def tanh(self):
+859        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4114,8 +4104,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
862    def arcsinh(self):
-863        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
861    def arcsinh(self):
+862        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4133,8 +4123,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
865    def arccosh(self):
-866        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
864    def arccosh(self):
+865        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4152,8 +4142,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
868    def arctanh(self):
-869        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
867    def arctanh(self):
+868        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4172,115 +4162,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
872class CObs:
-873    """Class for a complex valued observable."""
-874    __slots__ = ['_real', '_imag', 'tag']
-875
-876    def __init__(self, real, imag=0.0):
-877        self._real = real
-878        self._imag = imag
-879        self.tag = None
-880
-881    @property
-882    def real(self):
-883        return self._real
-884
-885    @property
-886    def imag(self):
-887        return self._imag
-888
-889    def gamma_method(self, **kwargs):
-890        """Executes the gamma_method for the real and the imaginary part."""
-891        if isinstance(self.real, Obs):
-892            self.real.gamma_method(**kwargs)
-893        if isinstance(self.imag, Obs):
-894            self.imag.gamma_method(**kwargs)
-895
-896    def is_zero(self):
-897        """Checks whether both real and imaginary part are zero within machine precision."""
-898        return self.real == 0.0 and self.imag == 0.0
-899
-900    def conjugate(self):
-901        return CObs(self.real, -self.imag)
-902
-903    def __add__(self, other):
-904        if isinstance(other, np.ndarray):
-905            return other + self
-906        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-907            return CObs(self.real + other.real,
-908                        self.imag + other.imag)
-909        else:
-910            return CObs(self.real + other, self.imag)
-911
-912    def __radd__(self, y):
-913        return self + y
-914
-915    def __sub__(self, other):
-916        if isinstance(other, np.ndarray):
-917            return -1 * (other - self)
-918        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-919            return CObs(self.real - other.real, self.imag - other.imag)
-920        else:
-921            return CObs(self.real - other, self.imag)
-922
-923    def __rsub__(self, other):
-924        return -1 * (self - other)
-925
-926    def __mul__(self, other):
-927        if isinstance(other, np.ndarray):
-928            return other * self
-929        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-930            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-931                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-932                                               [self.real, other.real, self.imag, other.imag],
-933                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-934                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
-935                                               [self.real, other.real, self.imag, other.imag],
-936                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-937            elif getattr(other, 'imag', 0) != 0:
-938                return CObs(self.real * other.real - self.imag * other.imag,
-939                            self.imag * other.real + self.real * other.imag)
-940            else:
-941                return CObs(self.real * other.real, self.imag * other.real)
-942        else:
-943            return CObs(self.real * other, self.imag * other)
-944
-945    def __rmul__(self, other):
-946        return self * other
-947
-948    def __truediv__(self, other):
-949        if isinstance(other, np.ndarray):
-950            return 1 / (other / self)
-951        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-952            r = other.real ** 2 + other.imag ** 2
-953            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-954        else:
-955            return CObs(self.real / other, self.imag / other)
-956
-957    def __rtruediv__(self, other):
-958        r = self.real ** 2 + self.imag ** 2
-959        if hasattr(other, 'real') and hasattr(other, 'imag'):
-960            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-961        else:
-962            return CObs(self.real * other / r, -self.imag * other / r)
-963
-964    def __abs__(self):
-965        return np.sqrt(self.real**2 + self.imag**2)
-966
-967    def __pos__(self):
-968        return self
-969
-970    def __neg__(self):
-971        return -1 * self
-972
-973    def __eq__(self, other):
-974        return self.real == other.real and self.imag == other.imag
-975
-976    def __str__(self):
-977        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-978
-979    def __repr__(self):
-980        return 'CObs[' + str(self) + ']'
+            
871class CObs:
+872    """Class for a complex valued observable."""
+873    __slots__ = ['_real', '_imag', 'tag']
+874
+875    def __init__(self, real, imag=0.0):
+876        self._real = real
+877        self._imag = imag
+878        self.tag = None
+879
+880    @property
+881    def real(self):
+882        return self._real
+883
+884    @property
+885    def imag(self):
+886        return self._imag
+887
+888    def gamma_method(self, **kwargs):
+889        """Executes the gamma_method for the real and the imaginary part."""
+890        if isinstance(self.real, Obs):
+891            self.real.gamma_method(**kwargs)
+892        if isinstance(self.imag, Obs):
+893            self.imag.gamma_method(**kwargs)
+894
+895    def is_zero(self):
+896        """Checks whether both real and imaginary part are zero within machine precision."""
+897        return self.real == 0.0 and self.imag == 0.0
+898
+899    def conjugate(self):
+900        return CObs(self.real, -self.imag)
+901
+902    def __add__(self, other):
+903        if isinstance(other, np.ndarray):
+904            return other + self
+905        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+906            return CObs(self.real + other.real,
+907                        self.imag + other.imag)
+908        else:
+909            return CObs(self.real + other, self.imag)
+910
+911    def __radd__(self, y):
+912        return self + y
+913
+914    def __sub__(self, other):
+915        if isinstance(other, np.ndarray):
+916            return -1 * (other - self)
+917        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+918            return CObs(self.real - other.real, self.imag - other.imag)
+919        else:
+920            return CObs(self.real - other, self.imag)
+921
+922    def __rsub__(self, other):
+923        return -1 * (self - other)
+924
+925    def __mul__(self, other):
+926        if isinstance(other, np.ndarray):
+927            return other * self
+928        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+929            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+930                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+931                                               [self.real, other.real, self.imag, other.imag],
+932                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+933                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+934                                               [self.real, other.real, self.imag, other.imag],
+935                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+936            elif getattr(other, 'imag', 0) != 0:
+937                return CObs(self.real * other.real - self.imag * other.imag,
+938                            self.imag * other.real + self.real * other.imag)
+939            else:
+940                return CObs(self.real * other.real, self.imag * other.real)
+941        else:
+942            return CObs(self.real * other, self.imag * other)
+943
+944    def __rmul__(self, other):
+945        return self * other
+946
+947    def __truediv__(self, other):
+948        if isinstance(other, np.ndarray):
+949            return 1 / (other / self)
+950        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+951            r = other.real ** 2 + other.imag ** 2
+952            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+953        else:
+954            return CObs(self.real / other, self.imag / other)
+955
+956    def __rtruediv__(self, other):
+957        r = self.real ** 2 + self.imag ** 2
+958        if hasattr(other, 'real') and hasattr(other, 'imag'):
+959            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+960        else:
+961            return CObs(self.real * other / r, -self.imag * other / r)
+962
+963    def __abs__(self):
+964        return np.sqrt(self.real**2 + self.imag**2)
+965
+966    def __pos__(self):
+967        return self
+968
+969    def __neg__(self):
+970        return -1 * self
+971
+972    def __eq__(self, other):
+973        return self.real == other.real and self.imag == other.imag
+974
+975    def __str__(self):
+976        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+977
+978    def __repr__(self):
+979        return 'CObs[' + str(self) + ']'
 
@@ -4298,10 +4288,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
876    def __init__(self, real, imag=0.0):
-877        self._real = real
-878        self._imag = imag
-879        self.tag = None
+            
875    def __init__(self, real, imag=0.0):
+876        self._real = real
+877        self._imag = imag
+878        self.tag = None
 
@@ -4319,12 +4309,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
889    def gamma_method(self, **kwargs):
-890        """Executes the gamma_method for the real and the imaginary part."""
-891        if isinstance(self.real, Obs):
-892            self.real.gamma_method(**kwargs)
-893        if isinstance(self.imag, Obs):
-894            self.imag.gamma_method(**kwargs)
+            
888    def gamma_method(self, **kwargs):
+889        """Executes the gamma_method for the real and the imaginary part."""
+890        if isinstance(self.real, Obs):
+891            self.real.gamma_method(**kwargs)
+892        if isinstance(self.imag, Obs):
+893            self.imag.gamma_method(**kwargs)
 
@@ -4344,9 +4334,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
896    def is_zero(self):
-897        """Checks whether both real and imaginary part are zero within machine precision."""
-898        return self.real == 0.0 and self.imag == 0.0
+            
895    def is_zero(self):
+896        """Checks whether both real and imaginary part are zero within machine precision."""
+897        return self.real == 0.0 and self.imag == 0.0
 
@@ -4366,8 +4356,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
900    def conjugate(self):
-901        return CObs(self.real, -self.imag)
+            
899    def conjugate(self):
+900        return CObs(self.real, -self.imag)
 
@@ -4386,178 +4376,174 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1105def derived_observable(func, data, array_mode=False, **kwargs):
-1106    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1107
-1108    Parameters
-1109    ----------
-1110    func : object
-1111        arbitrary function of the form func(data, **kwargs). For the
-1112        automatic differentiation to work, all numpy functions have to have
-1113        the autograd wrapper (use 'import autograd.numpy as anp').
-1114    data : list
-1115        list of Obs, e.g. [obs1, obs2, obs3].
-1116    num_grad : bool
-1117        if True, numerical derivatives are used instead of autograd
-1118        (default False). To control the numerical differentiation the
-1119        kwargs of numdifftools.step_generators.MaxStepGenerator
-1120        can be used.
-1121    man_grad : list
-1122        manually supply a list or an array which contains the jacobian
-1123        of func. Use cautiously, supplying the wrong derivative will
-1124        not be intercepted.
-1125
-1126    Notes
-1127    -----
-1128    For simple mathematical operations it can be practical to use anonymous
-1129    functions. For the ratio of two observables one can e.g. use
-1130
-1131    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1132    """
-1133
-1134    data = np.asarray(data)
-1135    raveled_data = data.ravel()
-1136
-1137    # Workaround for matrix operations containing non Obs data
-1138    if not all(isinstance(x, Obs) for x in raveled_data):
-1139        for i in range(len(raveled_data)):
-1140            if isinstance(raveled_data[i], (int, float)):
-1141                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1142
-1143    allcov = {}
-1144    for o in raveled_data:
-1145        for name in o.cov_names:
-1146            if name in allcov:
-1147                if not np.allclose(allcov[name], o.covobs[name].cov):
-1148                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1149            else:
-1150                allcov[name] = o.covobs[name].cov
-1151
-1152    n_obs = len(raveled_data)
-1153    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1154    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1155    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1156
-1157    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
-1158    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
-1159
-1160    if data.ndim == 1:
-1161        values = np.array([o.value for o in data])
-1162    else:
-1163        values = np.vectorize(lambda x: x.value)(data)
+            
1104def derived_observable(func, data, array_mode=False, **kwargs):
+1105    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1106
+1107    Parameters
+1108    ----------
+1109    func : object
+1110        arbitrary function of the form func(data, **kwargs). For the
+1111        automatic differentiation to work, all numpy functions have to have
+1112        the autograd wrapper (use 'import autograd.numpy as anp').
+1113    data : list
+1114        list of Obs, e.g. [obs1, obs2, obs3].
+1115    num_grad : bool
+1116        if True, numerical derivatives are used instead of autograd
+1117        (default False). To control the numerical differentiation the
+1118        kwargs of numdifftools.step_generators.MaxStepGenerator
+1119        can be used.
+1120    man_grad : list
+1121        manually supply a list or an array which contains the jacobian
+1122        of func. Use cautiously, supplying the wrong derivative will
+1123        not be intercepted.
+1124
+1125    Notes
+1126    -----
+1127    For simple mathematical operations it can be practical to use anonymous
+1128    functions. For the ratio of two observables one can e.g. use
+1129
+1130    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1131    """
+1132
+1133    data = np.asarray(data)
+1134    raveled_data = data.ravel()
+1135
+1136    # Workaround for matrix operations containing non Obs data
+1137    if not all(isinstance(x, Obs) for x in raveled_data):
+1138        for i in range(len(raveled_data)):
+1139            if isinstance(raveled_data[i], (int, float)):
+1140                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1141
+1142    allcov = {}
+1143    for o in raveled_data:
+1144        for name in o.cov_names:
+1145            if name in allcov:
+1146                if not np.allclose(allcov[name], o.covobs[name].cov):
+1147                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1148            else:
+1149                allcov[name] = o.covobs[name].cov
+1150
+1151    n_obs = len(raveled_data)
+1152    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1153    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1154    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1155
+1156    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1157
+1158    if data.ndim == 1:
+1159        values = np.array([o.value for o in data])
+1160    else:
+1161        values = np.vectorize(lambda x: x.value)(data)
+1162
+1163    new_values = func(values, **kwargs)
 1164
-1165    new_values = func(values, **kwargs)
+1165    multi = int(isinstance(new_values, np.ndarray))
 1166
-1167    multi = int(isinstance(new_values, np.ndarray))
-1168
-1169    new_r_values = {}
-1170    new_idl_d = {}
-1171    for name in new_sample_names:
-1172        idl = []
-1173        tmp_values = np.zeros(n_obs)
-1174        for i, item in enumerate(raveled_data):
-1175            tmp_values[i] = item.r_values.get(name, item.value)
-1176            tmp_idl = item.idl.get(name)
-1177            if tmp_idl is not None:
-1178                idl.append(tmp_idl)
-1179        if multi > 0:
-1180            tmp_values = np.array(tmp_values).reshape(data.shape)
-1181        new_r_values[name] = func(tmp_values, **kwargs)
-1182        new_idl_d[name] = _merge_idx(idl)
-1183        if not is_merged[name]:
-1184            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
-1185
-1186    if 'man_grad' in kwargs:
-1187        deriv = np.asarray(kwargs.get('man_grad'))
-1188        if new_values.shape + data.shape != deriv.shape:
-1189            raise Exception('Manual derivative does not have correct shape.')
-1190    elif kwargs.get('num_grad') is True:
-1191        if multi > 0:
-1192            raise Exception('Multi mode currently not supported for numerical derivative')
-1193        options = {
-1194            'base_step': 0.1,
-1195            'step_ratio': 2.5}
-1196        for key in options.keys():
-1197            kwarg = kwargs.get(key)
-1198            if kwarg is not None:
-1199                options[key] = kwarg
-1200        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1201        if tmp_df.size == 1:
-1202            deriv = np.array([tmp_df.real])
-1203        else:
-1204            deriv = tmp_df.real
-1205    else:
-1206        deriv = jacobian(func)(values, **kwargs)
+1167    new_r_values = {}
+1168    new_idl_d = {}
+1169    for name in new_sample_names:
+1170        idl = []
+1171        tmp_values = np.zeros(n_obs)
+1172        for i, item in enumerate(raveled_data):
+1173            tmp_values[i] = item.r_values.get(name, item.value)
+1174            tmp_idl = item.idl.get(name)
+1175            if tmp_idl is not None:
+1176                idl.append(tmp_idl)
+1177        if multi > 0:
+1178            tmp_values = np.array(tmp_values).reshape(data.shape)
+1179        new_r_values[name] = func(tmp_values, **kwargs)
+1180        new_idl_d[name] = _merge_idx(idl)
+1181
+1182    if 'man_grad' in kwargs:
+1183        deriv = np.asarray(kwargs.get('man_grad'))
+1184        if new_values.shape + data.shape != deriv.shape:
+1185            raise Exception('Manual derivative does not have correct shape.')
+1186    elif kwargs.get('num_grad') is True:
+1187        if multi > 0:
+1188            raise Exception('Multi mode currently not supported for numerical derivative')
+1189        options = {
+1190            'base_step': 0.1,
+1191            'step_ratio': 2.5}
+1192        for key in options.keys():
+1193            kwarg = kwargs.get(key)
+1194            if kwarg is not None:
+1195                options[key] = kwarg
+1196        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1197        if tmp_df.size == 1:
+1198            deriv = np.array([tmp_df.real])
+1199        else:
+1200            deriv = tmp_df.real
+1201    else:
+1202        deriv = jacobian(func)(values, **kwargs)
+1203
+1204    final_result = np.zeros(new_values.shape, dtype=object)
+1205
+1206    if array_mode is True:
 1207
-1208    final_result = np.zeros(new_values.shape, dtype=object)
-1209
-1210    if array_mode is True:
+1208        class _Zero_grad():
+1209            def __init__(self, N):
+1210                self.grad = np.zeros((N, 1))
 1211
-1212        class _Zero_grad():
-1213            def __init__(self, N):
-1214                self.grad = np.zeros((N, 1))
-1215
-1216        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1217        d_extracted = {}
-1218        g_extracted = {}
-1219        for name in new_sample_names:
-1220            d_extracted[name] = []
-1221            ens_length = len(new_idl_d[name])
-1222            for i_dat, dat in enumerate(data):
-1223                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1224        for name in new_cov_names:
-1225            g_extracted[name] = []
-1226            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1227            for i_dat, dat in enumerate(data):
-1228                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1229
-1230    for i_val, new_val in np.ndenumerate(new_values):
-1231        new_deltas = {}
-1232        new_grad = {}
-1233        if array_mode is True:
-1234            for name in new_sample_names:
-1235                ens_length = d_extracted[name][0].shape[-1]
-1236                new_deltas[name] = np.zeros(ens_length)
-1237                for i_dat, dat in enumerate(d_extracted[name]):
-1238                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1239            for name in new_cov_names:
-1240                new_grad[name] = 0
-1241                for i_dat, dat in enumerate(g_extracted[name]):
-1242                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1243        else:
-1244            for j_obs, obs in np.ndenumerate(data):
-1245                for name in obs.names:
-1246                    if name in obs.cov_names:
-1247                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1248                    else:
-1249                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1250
-1251        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1252
-1253        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1254            raise Exception('The same name has been used for deltas and covobs!')
-1255        new_samples = []
-1256        new_means = []
-1257        new_idl = []
-1258        new_names_obs = []
-1259        for name in new_names:
-1260            if name not in new_covobs:
-1261                new_samples.append(new_deltas[name])
-1262                new_idl.append(new_idl_d[name])
-1263                new_means.append(new_r_values[name][i_val])
-1264                new_names_obs.append(name)
-1265        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1266        for name in new_covobs:
-1267            final_result[i_val].names.append(name)
-1268        final_result[i_val]._covobs = new_covobs
-1269        final_result[i_val]._value = new_val
-1270        final_result[i_val].is_merged = is_merged
-1271        final_result[i_val].reweighted = reweighted
-1272
-1273    if multi == 0:
-1274        final_result = final_result.item()
-1275
-1276    return final_result
+1212        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1213        d_extracted = {}
+1214        g_extracted = {}
+1215        for name in new_sample_names:
+1216            d_extracted[name] = []
+1217            ens_length = len(new_idl_d[name])
+1218            for i_dat, dat in enumerate(data):
+1219                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1220        for name in new_cov_names:
+1221            g_extracted[name] = []
+1222            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1223            for i_dat, dat in enumerate(data):
+1224                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1225
+1226    for i_val, new_val in np.ndenumerate(new_values):
+1227        new_deltas = {}
+1228        new_grad = {}
+1229        if array_mode is True:
+1230            for name in new_sample_names:
+1231                ens_length = d_extracted[name][0].shape[-1]
+1232                new_deltas[name] = np.zeros(ens_length)
+1233                for i_dat, dat in enumerate(d_extracted[name]):
+1234                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1235            for name in new_cov_names:
+1236                new_grad[name] = 0
+1237                for i_dat, dat in enumerate(g_extracted[name]):
+1238                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1239        else:
+1240            for j_obs, obs in np.ndenumerate(data):
+1241                for name in obs.names:
+1242                    if name in obs.cov_names:
+1243                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1244                    else:
+1245                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1246
+1247        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1248
+1249        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1250            raise Exception('The same name has been used for deltas and covobs!')
+1251        new_samples = []
+1252        new_means = []
+1253        new_idl = []
+1254        new_names_obs = []
+1255        for name in new_names:
+1256            if name not in new_covobs:
+1257                new_samples.append(new_deltas[name])
+1258                new_idl.append(new_idl_d[name])
+1259                new_means.append(new_r_values[name][i_val])
+1260                new_names_obs.append(name)
+1261        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1262        for name in new_covobs:
+1263            final_result[i_val].names.append(name)
+1264        final_result[i_val]._covobs = new_covobs
+1265        final_result[i_val]._value = new_val
+1266        final_result[i_val].reweighted = reweighted
+1267
+1268    if multi == 0:
+1269        final_result = final_result.item()
+1270
+1271    return final_result
 
@@ -4604,47 +4590,46 @@ functions. For the ratio of two observables one can e.g. use

-
1313def reweight(weight, obs, **kwargs):
-1314    """Reweight a list of observables.
-1315
-1316    Parameters
-1317    ----------
-1318    weight : Obs
-1319        Reweighting factor. An Observable that has to be defined on a superset of the
-1320        configurations in obs[i].idl for all i.
-1321    obs : list
-1322        list of Obs, e.g. [obs1, obs2, obs3].
-1323    all_configs : bool
-1324        if True, the reweighted observables are normalized by the average of
-1325        the reweighting factor on all configurations in weight.idl and not
-1326        on the configurations in obs[i].idl. Default False.
-1327    """
-1328    result = []
-1329    for i in range(len(obs)):
-1330        if len(obs[i].cov_names):
-1331            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1332        if not set(obs[i].names).issubset(weight.names):
-1333            raise Exception('Error: Ensembles do not fit')
-1334        for name in obs[i].names:
-1335            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1336                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1337        new_samples = []
-1338        w_deltas = {}
-1339        for name in sorted(obs[i].names):
-1340            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1341            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1342        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+            
1308def reweight(weight, obs, **kwargs):
+1309    """Reweight a list of observables.
+1310
+1311    Parameters
+1312    ----------
+1313    weight : Obs
+1314        Reweighting factor. An Observable that has to be defined on a superset of the
+1315        configurations in obs[i].idl for all i.
+1316    obs : list
+1317        list of Obs, e.g. [obs1, obs2, obs3].
+1318    all_configs : bool
+1319        if True, the reweighted observables are normalized by the average of
+1320        the reweighting factor on all configurations in weight.idl and not
+1321        on the configurations in obs[i].idl. Default False.
+1322    """
+1323    result = []
+1324    for i in range(len(obs)):
+1325        if len(obs[i].cov_names):
+1326            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1327        if not set(obs[i].names).issubset(weight.names):
+1328            raise Exception('Error: Ensembles do not fit')
+1329        for name in obs[i].names:
+1330            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1331                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1332        new_samples = []
+1333        w_deltas = {}
+1334        for name in sorted(obs[i].names):
+1335            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1336            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1337        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1338
+1339        if kwargs.get('all_configs'):
+1340            new_weight = weight
+1341        else:
+1342            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
 1343
-1344        if kwargs.get('all_configs'):
-1345            new_weight = weight
-1346        else:
-1347            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1348
-1349        result.append(tmp_obs / new_weight)
-1350        result[-1].reweighted = True
-1351        result[-1].is_merged = obs[i].is_merged
-1352
-1353    return result
+1344        result.append(tmp_obs / new_weight)
+1345        result[-1].reweighted = True
+1346
+1347    return result
 
@@ -4678,48 +4663,47 @@ on the configurations in obs[i].idl. Default False.
-
1356def correlate(obs_a, obs_b):
-1357    """Correlate two observables.
-1358
-1359    Parameters
-1360    ----------
-1361    obs_a : Obs
-1362        First observable
-1363    obs_b : Obs
-1364        Second observable
-1365
-1366    Notes
-1367    -----
-1368    Keep in mind to only correlate primary observables which have not been reweighted
-1369    yet. The reweighting has to be applied after correlating the observables.
-1370    Currently only works if ensembles are identical (this is not strictly necessary).
-1371    """
-1372
-1373    if sorted(obs_a.names) != sorted(obs_b.names):
-1374        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1375    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1376        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1377    for name in obs_a.names:
-1378        if obs_a.shape[name] != obs_b.shape[name]:
-1379            raise Exception('Shapes of ensemble', name, 'do not fit')
-1380        if obs_a.idl[name] != obs_b.idl[name]:
-1381            raise Exception('idl of ensemble', name, 'do not fit')
-1382
-1383    if obs_a.reweighted is True:
-1384        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1385    if obs_b.reweighted is True:
-1386        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+            
1350def correlate(obs_a, obs_b):
+1351    """Correlate two observables.
+1352
+1353    Parameters
+1354    ----------
+1355    obs_a : Obs
+1356        First observable
+1357    obs_b : Obs
+1358        Second observable
+1359
+1360    Notes
+1361    -----
+1362    Keep in mind to only correlate primary observables which have not been reweighted
+1363    yet. The reweighting has to be applied after correlating the observables.
+1364    Currently only works if ensembles are identical (this is not strictly necessary).
+1365    """
+1366
+1367    if sorted(obs_a.names) != sorted(obs_b.names):
+1368        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1369    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1370        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1371    for name in obs_a.names:
+1372        if obs_a.shape[name] != obs_b.shape[name]:
+1373            raise Exception('Shapes of ensemble', name, 'do not fit')
+1374        if obs_a.idl[name] != obs_b.idl[name]:
+1375            raise Exception('idl of ensemble', name, 'do not fit')
+1376
+1377    if obs_a.reweighted is True:
+1378        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1379    if obs_b.reweighted is True:
+1380        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1381
+1382    new_samples = []
+1383    new_idl = []
+1384    for name in sorted(obs_a.names):
+1385        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1386        new_idl.append(obs_a.idl[name])
 1387
-1388    new_samples = []
-1389    new_idl = []
-1390    for name in sorted(obs_a.names):
-1391        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1392        new_idl.append(obs_a.idl[name])
-1393
-1394    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1395    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
-1396    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1397    return o
+1388    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1389    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1390    return o
 
@@ -4754,74 +4738,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1400def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1401    r'''Calculates the error covariance matrix of a set of observables.
-1402
-1403    WARNING: This function should be used with care, especially for observables with support on multiple
-1404             ensembles with differing autocorrelations. See the notes below for details.
-1405
-1406    The gamma method has to be applied first to all observables.
-1407
-1408    Parameters
-1409    ----------
-1410    obs : list or numpy.ndarray
-1411        List or one dimensional array of Obs
-1412    visualize : bool
-1413        If True plots the corresponding normalized correlation matrix (default False).
-1414    correlation : bool
-1415        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1416    smooth : None or int
-1417        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1418        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1419        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1420        small ones.
-1421
-1422    Notes
-1423    -----
-1424    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1425    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1426    in the absence of autocorrelation.
-1427    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1428    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1429    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1430    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1431    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1432    '''
-1433
-1434    length = len(obs)
-1435
-1436    max_samples = np.max([o.N for o in obs])
-1437    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1438        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1439
-1440    cov = np.zeros((length, length))
-1441    for i in range(length):
-1442        for j in range(i, length):
-1443            cov[i, j] = _covariance_element(obs[i], obs[j])
-1444    cov = cov + cov.T - np.diag(np.diag(cov))
-1445
-1446    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1447
-1448    if isinstance(smooth, int):
-1449        corr = _smooth_eigenvalues(corr, smooth)
-1450
-1451    if visualize:
-1452        plt.matshow(corr, vmin=-1, vmax=1)
-1453        plt.set_cmap('RdBu')
-1454        plt.colorbar()
-1455        plt.draw()
-1456
-1457    if correlation is True:
-1458        return corr
+            
1393def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1394    r'''Calculates the error covariance matrix of a set of observables.
+1395
+1396    WARNING: This function should be used with care, especially for observables with support on multiple
+1397             ensembles with differing autocorrelations. See the notes below for details.
+1398
+1399    The gamma method has to be applied first to all observables.
+1400
+1401    Parameters
+1402    ----------
+1403    obs : list or numpy.ndarray
+1404        List or one dimensional array of Obs
+1405    visualize : bool
+1406        If True plots the corresponding normalized correlation matrix (default False).
+1407    correlation : bool
+1408        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1409    smooth : None or int
+1410        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1411        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1412        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1413        small ones.
+1414
+1415    Notes
+1416    -----
+1417    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1418    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1419    in the absence of autocorrelation.
+1420    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1421    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1422    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1423    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1424    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1425    '''
+1426
+1427    length = len(obs)
+1428
+1429    max_samples = np.max([o.N for o in obs])
+1430    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1431        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1432
+1433    cov = np.zeros((length, length))
+1434    for i in range(length):
+1435        for j in range(i, length):
+1436            cov[i, j] = _covariance_element(obs[i], obs[j])
+1437    cov = cov + cov.T - np.diag(np.diag(cov))
+1438
+1439    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1440
+1441    if isinstance(smooth, int):
+1442        corr = _smooth_eigenvalues(corr, smooth)
+1443
+1444    if visualize:
+1445        plt.matshow(corr, vmin=-1, vmax=1)
+1446        plt.set_cmap('RdBu')
+1447        plt.colorbar()
+1448        plt.draw()
+1449
+1450    if correlation is True:
+1451        return corr
+1452
+1453    errors = [o.dvalue for o in obs]
+1454    cov = np.diag(errors) @ corr @ np.diag(errors)
+1455
+1456    eigenvalues = np.linalg.eigh(cov)[0]
+1457    if not np.all(eigenvalues >= 0):
+1458        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
 1459
-1460    errors = [o.dvalue for o in obs]
-1461    cov = np.diag(errors) @ corr @ np.diag(errors)
-1462
-1463    eigenvalues = np.linalg.eigh(cov)[0]
-1464    if not np.all(eigenvalues >= 0):
-1465        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1466
-1467    return cov
+1460    return cov
 
@@ -4873,24 +4857,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1547def import_jackknife(jacks, name, idl=None):
-1548    """Imports jackknife samples and returns an Obs
-1549
-1550    Parameters
-1551    ----------
-1552    jacks : numpy.ndarray
-1553        numpy array containing the mean value as zeroth entry and
-1554        the N jackknife samples as first to Nth entry.
-1555    name : str
-1556        name of the ensemble the samples are defined on.
-1557    """
-1558    length = len(jacks) - 1
-1559    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1560    samples = jacks[1:] @ prj
-1561    mean = np.mean(samples)
-1562    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1563    new_obs._value = jacks[0]
-1564    return new_obs
+            
1540def import_jackknife(jacks, name, idl=None):
+1541    """Imports jackknife samples and returns an Obs
+1542
+1543    Parameters
+1544    ----------
+1545    jacks : numpy.ndarray
+1546        numpy array containing the mean value as zeroth entry and
+1547        the N jackknife samples as first to Nth entry.
+1548    name : str
+1549        name of the ensemble the samples are defined on.
+1550    """
+1551    length = len(jacks) - 1
+1552    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1553    samples = jacks[1:] @ prj
+1554    mean = np.mean(samples)
+1555    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1556    new_obs._value = jacks[0]
+1557    return new_obs
 
@@ -4920,35 +4904,34 @@ name of the ensemble the samples are defined on.
-
1567def merge_obs(list_of_obs):
-1568    """Combine all observables in list_of_obs into one new observable
-1569
-1570    Parameters
-1571    ----------
-1572    list_of_obs : list
-1573        list of the Obs object to be combined
-1574
-1575    Notes
-1576    -----
-1577    It is not possible to combine obs which are based on the same replicum
-1578    """
-1579    replist = [item for obs in list_of_obs for item in obs.names]
-1580    if (len(replist) == len(set(replist))) is False:
-1581        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1582    if any([len(o.cov_names) for o in list_of_obs]):
-1583        raise Exception('Not possible to merge data that contains covobs!')
-1584    new_dict = {}
-1585    idl_dict = {}
-1586    for o in list_of_obs:
-1587        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1588                        for key in set(o.deltas) | set(o.r_values)})
-1589        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1590
-1591    names = sorted(new_dict.keys())
-1592    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1593    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
-1594    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1595    return o
+            
1560def merge_obs(list_of_obs):
+1561    """Combine all observables in list_of_obs into one new observable
+1562
+1563    Parameters
+1564    ----------
+1565    list_of_obs : list
+1566        list of the Obs object to be combined
+1567
+1568    Notes
+1569    -----
+1570    It is not possible to combine obs which are based on the same replicum
+1571    """
+1572    replist = [item for obs in list_of_obs for item in obs.names]
+1573    if (len(replist) == len(set(replist))) is False:
+1574        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1575    if any([len(o.cov_names) for o in list_of_obs]):
+1576        raise Exception('Not possible to merge data that contains covobs!')
+1577    new_dict = {}
+1578    idl_dict = {}
+1579    for o in list_of_obs:
+1580        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1581                        for key in set(o.deltas) | set(o.r_values)})
+1582        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1583
+1584    names = sorted(new_dict.keys())
+1585    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1586    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1587    return o
 
@@ -4979,47 +4962,47 @@ list of the Obs object to be combined
-
1598def cov_Obs(means, cov, name, grad=None):
-1599    """Create an Obs based on mean(s) and a covariance matrix
-1600
-1601    Parameters
-1602    ----------
-1603    mean : list of floats or float
-1604        N mean value(s) of the new Obs
-1605    cov : list or array
-1606        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1607    name : str
-1608        identifier for the covariance matrix
-1609    grad : list or array
-1610        Gradient of the Covobs wrt. the means belonging to cov.
-1611    """
-1612
-1613    def covobs_to_obs(co):
-1614        """Make an Obs out of a Covobs
-1615
-1616        Parameters
-1617        ----------
-1618        co : Covobs
-1619            Covobs to be embedded into the Obs
-1620        """
-1621        o = Obs([], [], means=[])
-1622        o._value = co.value
-1623        o.names.append(co.name)
-1624        o._covobs[co.name] = co
-1625        o._dvalue = np.sqrt(co.errsq())
-1626        return o
-1627
-1628    ol = []
-1629    if isinstance(means, (float, int)):
-1630        means = [means]
-1631
-1632    for i in range(len(means)):
-1633        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1634    if ol[0].covobs[name].N != len(means):
-1635        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1636    if len(ol) == 1:
-1637        return ol[0]
-1638    return ol
+            
1590def cov_Obs(means, cov, name, grad=None):
+1591    """Create an Obs based on mean(s) and a covariance matrix
+1592
+1593    Parameters
+1594    ----------
+1595    mean : list of floats or float
+1596        N mean value(s) of the new Obs
+1597    cov : list or array
+1598        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1599    name : str
+1600        identifier for the covariance matrix
+1601    grad : list or array
+1602        Gradient of the Covobs wrt. the means belonging to cov.
+1603    """
+1604
+1605    def covobs_to_obs(co):
+1606        """Make an Obs out of a Covobs
+1607
+1608        Parameters
+1609        ----------
+1610        co : Covobs
+1611            Covobs to be embedded into the Obs
+1612        """
+1613        o = Obs([], [], means=[])
+1614        o._value = co.value
+1615        o.names.append(co.name)
+1616        o._covobs[co.name] = co
+1617        o._dvalue = np.sqrt(co.errsq())
+1618        return o
+1619
+1620    ol = []
+1621    if isinstance(means, (float, int)):
+1622        means = [means]
+1623
+1624    for i in range(len(means)):
+1625        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1626    if ol[0].covobs[name].N != len(means):
+1627        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1628    if len(ol) == 1:
+1629        return ol[0]
+1630    return ol