corrlib/corrlib/input/sfcf.py
2025-04-07 10:36:59 +00:00

323 lines
10 KiB
Python

import pyerrors as pe
import datalad.api as dl
import json
import os
from typing import Any
bi_corrs: list = ["f_P", "fP", "f_p",
"g_P", "gP", "g_p",
"fA0", "f_A", "f_a",
"gA0", "g_A", "g_a",
"k1V1", "k1_V1", "k_v11",
"l1V1", "l1_V1", "l_v11",
"k2V2", "k2_V2", "k_v22",
"l2V2", "l2_V2", "l_v22",
"k3V3", "k3_V3", "k_v33",
"l3V3", "l3_V3", "l_v33",
"kVk", "k_V", "k_v",
"lVk", "l_V", "l_v",
"k1T01", "k1_T01",
"l1T01", "l1_T01",
"k2T02", "k2_T02",
"l2T02", "l2_T02",
"k3T03", "k3_T03",
"l3T03", "l3_T03",
"kT0k", "k_T", "k_t",
"lT0k", "l_T", "l_t",
"fAk", "f_Ak", "f_ak",
"gAk", "g_Ak", "g_ak",
"kV0", "k_V0", "k_v0",
"lV0", "l_V0", "l_v0",
"k1A2", "k1_A2", "f_av21",
"l1A2", "l1_A2", "g_av21",
"k2A3", "k2_A3", "f_av32",
"l2A3", "l2_A3", "g_av32",
"k3A1", "k3_A1", "f_av13",
"l3A1", "l3_A1", "g_av13",
"k1A3", "k1_A3", "f_av31",
"l1A3", "l1_A3", "g_av31",
"k2A1", "k2_A1", "f_av12",
"l2A1", "l2_A1", "g_av12",
"k3A2", "k3_A2", "f_av23",
"l3A2", "l3_A2", "g_av23",
]
bb_corrs: list = [
'F1',
'F_1',
'f_1',
'F1ll',
'k_1',
'F_AA_a',
'F_AA_d',
'F_AdP_a',
'F_AdP_d',
'F_dPA_a',
'F_dPA_d',
'F_dPdP_a',
'F_dPdP_d',
'F_sPA_a',
'F_sPA_d',
'F_sPdP_a',
'F_sPdP_d',
]
bib_corrs: list = [
'F_V0',
]
corr_types: dict[str, str] = {}
for c in bi_corrs:
corr_types[c] = 'bi'
for c in bb_corrs:
corr_types[c] = 'bb'
for c in bib_corrs:
corr_types[c] = 'bib'
def read_param(path: str, project: str, file_in_project: str) -> dict[str, Any]:
"""
Read the parameters from the sfcf file.
Parameters
----------
file : str
The path to the sfcf file.
Returns
-------
dict
The parameters in the sfcf file.
"""
file = path + "/projects/" + project + '/' + file_in_project
dl.get(file, dataset=path)
with open(file, 'r') as f:
lines = f.readlines()
params: dict[str, Any] = {}
params['wf_offsets'] = []
params['wf_basis'] = []
params['wf_coeff'] = []
params['qr'] = {}
params['mrr'] = []
params['crr'] = []
params['qs'] = {}
params['mrs'] = []
params['crs'] = []
for line in lines:
if line.startswith('#'):
continue
if line.startswith('\n'):
continue
if line.startswith('wf_offsets'):
num_wf_offsets = line.split()[1]
for i in range(int(num_wf_offsets)):
params['wf_offsets'].append([float(x) for x in lines[lines.index(line) + i + 1].split("#")[0].split()])
if line.startswith('wf_basis'):
num_wf_basis = line.split()[1]
for i in range(int(num_wf_basis)):
params['wf_basis'].append([float(x) for x in lines[lines.index(line) + i + 1].split("#")[0].split()])
if line.startswith('wf_coeff'):
num_wf_coeff = line.split()[1]
for i in range(int(num_wf_coeff)):
params['wf_coeff'].append([float(x) for x in lines[lines.index(line) + i + 1].split("#")[0].split()])
if line.startswith('qr'):
num_qr = line.split()[1]
for i in range(int(num_qr)):
dat = lines[lines.index(line) + i + 1].split("#")[0].strip().split()[:-1]
params['qr'][dat[0]] = {}
params['qr'][dat[0]]['mass'] = float(dat[1])
params['qr'][dat[0]]['thetas'] = [float(x) for x in dat[2:5]]
if line.startswith('mrr'):
num_mrr = line.split()[1]
for i in range(int(num_mrr)):
mrr = lines[lines.index(line) + i + 1]
mrr = mrr.split("#")[0]
mrr = mrr.strip()
mrr = mrr.replace("\t", " ")
params['mrr'].append(mrr)
if line.startswith('crr'):
num_crr = line.split()[1]
for i in range(int(num_crr)):
params['crr'].append(lines[lines.index(line) + i + 1].split("#")[0].strip())
if line.startswith('qs'):
num_qs = line.split()[1]
for i in range(int(num_qs)):
dat = lines[lines.index(line) + i + 1].split("#")[0].strip().split()[:-1]
params['qs'][dat[0]] = {}
params['qs'][dat[0]]['mass'] = float(dat[1])
params['qs'][dat[0]]['thetas'] = [float(x) for x in dat[2:5]]
if line.startswith('mrs'):
num_mrs = line.split()[1]
for i in range(int(num_mrs)):
params['mrs'].append(lines[lines.index(line) + i + 1].split("#")[0].strip())
if line.startswith('crs'):
num_crs = line.split()[1]
for i in range(int(num_crs)):
params['mrs'].append(lines[lines.index(line) + i + 1].split("#")[0].strip())
# catch standard cases
if params['wf_offsets'] == []:
params['wf_offsets'] = [[0, 0, 0]]
if params['wf_basis'] == []:
params['wf_basis'] = [[0, -1]]
if params['wf_coeff'] == []:
params['wf_coeff'] = [[1]]
return params
def _map_params(params: dict, spec_list: list) -> dict[str, Any]:
"""
Map the extracted parameters to the extracted data.
Parameters
----------
param: dict
The parameters extracted from the parameter (input) file. in the dict form given by read_param.
spec_list: list
The list of specifications that belongs to the dorrelator in question.
Return
------
new_specs: dict
Dict with keys replaced by parameters.
"""
# quarks/offset/wf/wf2
new_specs = {}
# quarks
quarks = spec_list[0].split(" ")
new_specs['quarks'] = [params['qr'][quarks[0]], params['qr'][quarks[1]]]
# offset
new_specs['offset'] = (params['wf_offsets'][int(spec_list[1])])
# wf1
contribs = []
for i, coeff in enumerate(params['wf_coeff'][int(spec_list[2])]):
if not coeff == 0:
contrib = [coeff, params['wf_basis'][i]]
contribs.append(contrib)
new_specs['wf1'] = contribs
if len(spec_list) == 4:
# wf2
contribs = []
for i, coeff in enumerate(params['wf_coeff'][int(spec_list[3])]):
if not coeff == 0:
contrib = [coeff, params['wf_basis'][i]]
contribs.append(contrib)
new_specs['wf2'] = contribs
return new_specs
def get_specs(key, parameters, sep='/') -> str:
key_parts = key.split(sep)
if corr_types[key_parts[0]] == 'bi':
param = _map_params(parameters, key_parts[1:-1])
else:
param = _map_params(parameters, key_parts[1:])
print(param)
s = json.dumps(param)
return s
def read_data(path, project, dir_in_project, prefix, param, version='1.0c', cfg_seperator='n', sep='/', **kwargs) -> dict:
"""
Extract the data from the sfcf file.
Parameters
----------
path: str
The path of the backlogger.
project: str
The Projects uuid.
dir_in_project:s str
The output directory in the project.
param: dict
The parameter dictionary, as given by read_param.
version: str
Version of sfcf.
cfg_seperator: str
Separator of the configuration number. Needed for reading. default: "n"
sep: str
Seperator for the key in return dict. (default: "/)
Returns
-------
sorted_data: dict
The data from the sfcf file.
"""
names = kwargs.get('names', None)
directory = os.path.join(path, "projects", project, dir_in_project)
appended = (version[-1] == "a")
ls = []
files_to_get = []
for (dirpath, dirnames, filenames) in os.walk(directory):
if not appended:
ls.extend(dirnames)
else:
ls.extend(filenames)
break
if not appended:
compact = (version[-1] == "c")
for i, item in enumerate(ls):
rep_path = directory + '/' + item
sub_ls = pe.input.sfcf._find_files(rep_path, prefix, compact, [])
files_to_get.extend([rep_path + "/" + filename for filename in sub_ls])
print("Getting data, this might take a while...")
if len(files_to_get) != 0:
dl.get(files_to_get, dataset= os.path.join(path, "projects", project), jobs=4)
else:
dl.get(directory, dataset= os.path.join(path, "projects", project), jobs=4)
print("... done downloading.")
corr_type_list = []
for corr_name in param['crr']:
if corr_name not in corr_types:
raise ValueError('Correlator type not known.')
corr_type_list.append(corr_types[corr_name])
data = {}
if not param['crr'] == []:
if names is not None:
data_crr = pe.input.sfcf.read_sfcf_multi(directory, prefix, param['crr'], param['mrr'], corr_type_list, range(len(param['wf_offsets'])),
range(len(param['wf_basis'])), range(len(param['wf_basis'])), version, cfg_seperator, keyed_out=True, names=names)
else:
data_crr = pe.input.sfcf.read_sfcf_multi(directory, prefix, param['crr'], param['mrr'], corr_type_list, range(len(param['wf_offsets'])),
range(len(param['wf_basis'])), range(len(param['wf_basis'])), version, cfg_seperator, keyed_out=True)
for key in data_crr.keys():
data[key] = data_crr[key]
# print("Read data:", data_crr)
# print(f"Read data: pe.input.sfcf.read_sfcf_multi({directory}, {prefix}, {param['crr']}, {param['mrr']}, {corr_type_list}, {range(len(param['wf_offsets']))}, {range(len(param['wf_basis']))}, {range(len(param['wf_basis']))}, {version}, {cfg_seperator}, keyed_out=True, names={names})")
if not param['crs'] == []:
data_crs = pe.input.sfcf.read_sfcf_multi(directory, param['crs'])
for key in data_crs.keys():
data[key] = data_crs[key]
# sort data by correlator
sorted_data: dict[str, dict[str, Any]] = {}
for key in data.keys():
key_parts = key.split(sep)
corr = key_parts[0]
if corr not in sorted_data:
sorted_data[corr] = {}
sorted_data[corr][sep.join(key_parts[1:])] = data[key]
return sorted_data