[Fix] Further type fixes in fits and sfcf

This commit is contained in:
Fabian Joswig 2025-01-05 16:54:22 +01:00
parent 9c960ae24c
commit 5376a8ad44
2 changed files with 41 additions and 44 deletions

View file

@ -55,12 +55,12 @@ class Fit_result(Sequence):
def __getitem__(self, idx: int) -> Obs:
if self.fit_parameters is None:
raise ValueError('No fit parameters available.')
raise TypeError('No fit parameters available.')
return self.fit_parameters[idx]
def __len__(self) -> int:
if self.fit_parameters is None:
raise ValueError('No fit parameters available.')
raise TypeError('No fit parameters available.')
return len(self.fit_parameters)
def gamma_method(self, **kwargs):
@ -71,19 +71,19 @@ class Fit_result(Sequence):
def __str__(self) -> str:
my_str = 'Goodness of fit:\n'
if hasattr(self, 'chisquare_by_dof'):
if self.chisquare_by_dof is not None:
my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
elif hasattr(self, 'residual_variance'):
elif self.residual_variance is not None:
my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
if hasattr(self, 'chisquare_by_expected_chisquare'):
if self.chisquare_by_expected_chisquare is not None:
my_str += '\u03C7\u00b2/\u03C7\u00b2exp = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
if hasattr(self, 'p_value'):
if self.p_value is not None:
my_str += 'p-value = ' + f'{self.p_value:2.4f}' + '\n'
if hasattr(self, 't2_p_value'):
if self.t2_p_value is not None:
my_str += 't\u00B2p-value = ' + f'{self.t2_p_value:2.4f}' + '\n'
my_str += 'Fit parameters:\n'
if self.fit_parameters is None:
raise ValueError('No fit parameters available.')
raise TypeError('No fit parameters available.')
for i_par, par in enumerate(self.fit_parameters):
my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
return my_str

View file

@ -7,7 +7,7 @@ from ..obs import Obs
from .utils import sort_names, check_idl
import itertools
from numpy import ndarray
from typing import Union, Optional
from typing import Any, Union, Optional
sep = "/"
@ -164,10 +164,9 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
else:
compact = False
appended = False
ls = []
if "replica" in kwargs:
ls = kwargs.get("replica")
else:
ls = kwargs.get("replica")
if ls is None:
ls = []
for (dirpath, dirnames, filenames) in os.walk(path):
if not appended:
ls.extend(dirnames)
@ -192,13 +191,12 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
if not silent:
print('Read', part, 'part of', name_list, 'from', prefix[:-1], ',', replica, 'replica')
if 'names' in kwargs:
new_names = kwargs.get('names')
new_names = kwargs.get('names')
if new_names is not None:
if len(new_names) != len(set(new_names)):
raise Exception("names are not unique!")
if len(new_names) != replica:
raise Exception('names should have the length', replica)
else:
ens_name = kwargs.get("ens_name")
if not appended:
@ -207,14 +205,14 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name)
new_names = sort_names(new_names)
idl = []
idl: list[list[int]] = []
noffset_list = [str(x) for x in noffset_list]
wf_list = [str(x) for x in wf_list]
wf2_list = [str(x) for x in wf2_list]
noffset_strings: list[str] = [str(x) for x in noffset_list]
wf_strings: list[str] = [str(x) for x in wf_list]
wf2_strings: list[str] = [str(x) for x in wf2_list]
# setup dict structures
intern = {}
intern: dict[str, Any] = {}
for name, corr_type in zip(name_list, corr_type_list):
intern[name] = {}
b2b, single = _extract_corr_type(corr_type)
@ -223,26 +221,26 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
intern[name]["spec"] = {}
for quarks in quarks_list:
intern[name]["spec"][quarks] = {}
for off in noffset_list:
for off in noffset_strings:
intern[name]["spec"][quarks][off] = {}
for w in wf_list:
for w in wf_strings:
intern[name]["spec"][quarks][off][w] = {}
if b2b:
for w2 in wf2_list:
for w2 in wf2_strings:
intern[name]["spec"][quarks][off][w][w2] = {}
intern[name]["spec"][quarks][off][w][w2]["pattern"] = _make_pattern(version, name, off, w, w2, intern[name]['b2b'], quarks)
else:
intern[name]["spec"][quarks][off][w]["0"] = {}
intern[name]["spec"][quarks][off][w]["0"]["pattern"] = _make_pattern(version, name, off, w, 0, intern[name]['b2b'], quarks)
internal_ret_dict = {}
internal_ret_dict: dict[str, list] = {}
needed_keys = []
for name, corr_type in zip(name_list, corr_type_list):
b2b, single = _extract_corr_type(corr_type)
if b2b:
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, wf2_list))
needed_keys.extend(_lists2key([name], quarks_list, noffset_strings, wf_strings, wf2_strings))
else:
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, ["0"]))
needed_keys.extend(_lists2key([name], quarks_list, noffset_strings, wf_strings, ["0"]))
for key in needed_keys:
internal_ret_dict[key] = []
@ -288,9 +286,9 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
if version == "0.0" or not compact:
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
if corr_type_list[name_index] == 'bi':
name_keys = _lists2key(quarks_list, noffset_list, wf_list, ["0"])
name_keys = _lists2key(quarks_list, noffset_strings, wf_strings, ["0"])
else:
name_keys = _lists2key(quarks_list, noffset_list, wf_list, wf2_list)
name_keys = _lists2key(quarks_list, noffset_strings, wf_strings, wf2_strings)
for key in name_keys:
specs = _key2specs(key)
quarks = specs[0]
@ -306,7 +304,7 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
intern[name]["T"] = T
# preparing the datastructure
# the correlators get parsed into...
deltas = []
deltas: list[list] = []
for j in range(intern[name]["T"]):
deltas.append([])
internal_ret_dict[sep.join([name, key])] = deltas
@ -327,8 +325,8 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
rep_data.append(file_data)
for t in range(intern[name]["T"]):
internal_ret_dict[key][t].append([])
for cfg in range(no_cfg):
internal_ret_dict[key][t][i].append(rep_data[cfg][key][t])
for cfg_number in range(no_cfg):
internal_ret_dict[key][t][i].append(rep_data[cfg_number][key][t])
else:
for key in needed_keys:
specs = _key2specs(key)
@ -337,10 +335,9 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
off = specs[2]
w = specs[3]
w2 = specs[4]
if "files" in kwargs:
if isinstance(kwargs.get("files"), list) and all(isinstance(f, str) for f in kwargs.get("files")):
name_ls = kwargs.get("files")
else:
name_ls = kwargs.get("files")
if name_ls is not None:
if not (isinstance(name_ls, list) and all(isinstance(f, str) for f in name_ls)):
raise TypeError("In append mode, files has to be of type list[str]!")
else:
name_ls = ls
@ -377,7 +374,7 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
if not silent:
print("Done")
result_dict = {}
result_dict: dict[str, Any] = {}
if keyed_out:
for key in needed_keys:
name = _key2specs(key)[0]
@ -390,12 +387,12 @@ def read_sfcf_multi(path: str, prefix: str, name_list: list[str], quarks_list: l
result_dict[name] = {}
for quarks in quarks_list:
result_dict[name][quarks] = {}
for off in noffset_list:
for off in noffset_strings:
result_dict[name][quarks][off] = {}
for w in wf_list:
for w in wf_strings:
result_dict[name][quarks][off][w] = {}
if corr_type != 'bi':
for w2 in wf2_list:
for w2 in wf2_strings:
key = _specs2key(name, quarks, off, w, w2)
result = []
for t in range(intern[name]["T"]):
@ -642,13 +639,13 @@ def _read_append_rep(filename: str, pattern: str, b2b: bool, cfg_separator: str,
rep_idl.append(idl)
rep_data.append(data)
data = []
final_data: list[list[float]] = []
for t in range(T):
data.append([])
final_data.append([])
for c in range(len(rep_data)):
data[t].append(rep_data[c][t])
return T, rep_idl, data
final_data[t].append(rep_data[c][t])
return T, rep_idl, final_data
def _get_rep_names(ls: list[str], ens_name: None=None) -> list[str]: