mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-16 04:23:41 +02:00
implemented read_sfcf for append-mode output, bug fixes
This commit is contained in:
parent
31c2ada963
commit
ebdc17aa66
1 changed files with 249 additions and 152 deletions
|
@ -8,7 +8,7 @@ import numpy as np # Thinly-wrapped numpy
|
||||||
from ..obs import Obs
|
from ..obs import Obs
|
||||||
from . import utils
|
from . import utils
|
||||||
|
|
||||||
def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version = "1.0", **kwargs):
|
def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version = "1.0c", **kwargs):
|
||||||
"""Read sfcf c format from given folder structure.
|
"""Read sfcf c format from given folder structure.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
@ -65,48 +65,61 @@ def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version =
|
||||||
|
|
||||||
#due to higher usage in current projects, compact file format is default
|
#due to higher usage in current projects, compact file format is default
|
||||||
compact = True
|
compact = True
|
||||||
|
appended = False
|
||||||
#get version string
|
#get version string
|
||||||
known_versions = ["0.0","1.0","2.0","1.0c","2.0c","1.0a","2.0a"]
|
known_versions = ["0.0","1.0","2.0","1.0c","2.0c","1.0a","2.0a"]
|
||||||
if "version" in kwargs:
|
|
||||||
version = kwargs.get("version")
|
|
||||||
if not version in known_versions:
|
if not version in known_versions:
|
||||||
raise Exception("This version is not known!")
|
raise Exception("This version is not known!")
|
||||||
#if the letter c is appended to the version, the compact fileformat is used (former read_sfcf_c)
|
#if the letter c is appended to the version, the compact fileformat is used (former read_sfcf_c)
|
||||||
if(version[-1] == "c"):
|
if(version[-1] == "c"):
|
||||||
|
appended = False
|
||||||
compact = True
|
compact = True
|
||||||
version = version[:-1]
|
version = version[:-1]
|
||||||
|
elif(version[-1] == "a"):
|
||||||
|
appended = True
|
||||||
|
compact = False
|
||||||
|
version = version[:-1]
|
||||||
else:
|
else:
|
||||||
compact = False
|
compact = False
|
||||||
|
appended = False
|
||||||
read = 0
|
read = 0
|
||||||
T = 0
|
T = 0
|
||||||
start = 0
|
start = 0
|
||||||
ls = []
|
ls = []
|
||||||
|
if "replica" in kwargs:
|
||||||
|
ls = reps
|
||||||
|
else:
|
||||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||||
|
if not appended:
|
||||||
ls.extend(dirnames)
|
ls.extend(dirnames)
|
||||||
|
else:
|
||||||
|
ls.extend(filenames)
|
||||||
break
|
break
|
||||||
if not ls:
|
if not ls:
|
||||||
raise Exception('Error, directory not found')
|
raise Exception('Error, directory not found')
|
||||||
# Exclude folders with different names
|
# Exclude folders with different names
|
||||||
if "replica" in kwargs:
|
|
||||||
ls = reps
|
|
||||||
else:
|
|
||||||
for exc in ls:
|
for exc in ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*'):
|
if not fnmatch.fnmatch(exc, prefix + '*'):
|
||||||
ls = list(set(ls) - set([exc]))
|
ls = list(set(ls) - set([exc]))
|
||||||
if len(ls) > 1:
|
if len(ls) > 1:
|
||||||
ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0])) # New version, to cope with ids, etc.
|
ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0])) # New version, to cope with ids, etc.
|
||||||
|
if not appended:
|
||||||
replica = len(ls)
|
replica = len(ls)
|
||||||
|
else:
|
||||||
|
replica = len([l.split(".")[-1] for l in ls])//len(set([l.split(".")[-1] for l in ls]))
|
||||||
print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
|
print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
|
||||||
idl = []
|
|
||||||
if 'names' in kwargs:
|
if 'names' in kwargs:
|
||||||
new_names = kwargs.get('names')
|
new_names = kwargs.get('names')
|
||||||
if len(new_names)!=len(set(new_names)):
|
if len(new_names)!=len(set(new_names)):
|
||||||
raise Exception("names are nor unique!")
|
raise Exception("names are not unique!")
|
||||||
if len(new_names) != replica:
|
if len(new_names) != replica:
|
||||||
raise Exception('Names does not have the required length', replica)
|
raise Exception('Names does not have the required length', replica)
|
||||||
else:
|
else:
|
||||||
# Adjust replica names to new bookmarking system
|
# Adjust replica names to new bookmarking system
|
||||||
|
|
||||||
new_names = []
|
new_names = []
|
||||||
|
if not appended:
|
||||||
for entry in ls:
|
for entry in ls:
|
||||||
try:
|
try:
|
||||||
idx = entry.index('r')
|
idx = entry.index('r')
|
||||||
|
@ -117,6 +130,26 @@ def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version =
|
||||||
new_names.append(kwargs.get('ens_name') + '|' + entry[idx:])
|
new_names.append(kwargs.get('ens_name') + '|' + entry[idx:])
|
||||||
else:
|
else:
|
||||||
new_names.append(entry[:idx] + '|' + entry[idx:])
|
new_names.append(entry[:idx] + '|' + entry[idx:])
|
||||||
|
else:
|
||||||
|
|
||||||
|
for exc in ls:
|
||||||
|
if not fnmatch.fnmatch(exc, prefix + '*.'+name):
|
||||||
|
ls = list(set(ls) - set([exc]))
|
||||||
|
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||||
|
for entry in ls:
|
||||||
|
myentry = entry.removesuffix("."+name)
|
||||||
|
try:
|
||||||
|
idx = myentry.index('r')
|
||||||
|
except:
|
||||||
|
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||||
|
|
||||||
|
if 'ens_name' in kwargs:
|
||||||
|
new_names.append(kwargs.get('ens_name') + '|' + myentry[idx:])
|
||||||
|
else:
|
||||||
|
new_names.append(myentry[:idx] + '|' + myentry[idx:])
|
||||||
|
#print(new_names)
|
||||||
|
idl = []
|
||||||
|
if not appended:
|
||||||
for i, item in enumerate(ls):
|
for i, item in enumerate(ls):
|
||||||
sub_ls = []
|
sub_ls = []
|
||||||
if "files" in kwargs:
|
if "files" in kwargs:
|
||||||
|
@ -158,7 +191,6 @@ def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version =
|
||||||
#here we have found all the files we need to look into.
|
#here we have found all the files we need to look into.
|
||||||
if i == 0:
|
if i == 0:
|
||||||
#here, we want to find the place within the file, where the correlator we need is stored.
|
#here, we want to find the place within the file, where the correlator we need is stored.
|
||||||
|
|
||||||
if compact:
|
if compact:
|
||||||
#to do so, the pattern needed is put together from the input values
|
#to do so, the pattern needed is put together from the input values
|
||||||
pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf)
|
pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf)
|
||||||
|
@ -212,9 +244,9 @@ def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version =
|
||||||
for j in range(T):
|
for j in range(T):
|
||||||
deltas.append([])
|
deltas.append([])
|
||||||
|
|
||||||
sublength = no_cfg
|
|
||||||
for j in range(T):
|
for t in range(T):
|
||||||
deltas[j].append(np.zeros(sublength))
|
deltas[t].append(np.zeros(no_cfg))
|
||||||
#... the actual parsing can start. we iterate through all measurement files in the path given...
|
#... the actual parsing can start. we iterate through all measurement files in the path given...
|
||||||
if compact:
|
if compact:
|
||||||
for cfg in range(no_cfg):
|
for cfg in range(no_cfg):
|
||||||
|
@ -245,6 +277,71 @@ def read_sfcf(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, version =
|
||||||
else:
|
else:
|
||||||
deltas[k - start][i][cnfg] = floats[1 + im - single]
|
deltas[k - start][i][cnfg] = floats[1 + im - single]
|
||||||
|
|
||||||
|
else:
|
||||||
|
for exc in ls:
|
||||||
|
if not fnmatch.fnmatch(exc, prefix + '*.'+name):
|
||||||
|
ls = list(set(ls) - set([exc]))
|
||||||
|
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||||
|
#print(ls)
|
||||||
|
pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf)
|
||||||
|
if b2b:
|
||||||
|
pattern += '\nwf_2 ' + str(wf2)
|
||||||
|
for rep,file in enumerate(ls):
|
||||||
|
rep_idl = []
|
||||||
|
with open(path + '/' + file, 'r') as fp:
|
||||||
|
content = fp.readlines()
|
||||||
|
data_starts = []
|
||||||
|
for l,line in enumerate(content):
|
||||||
|
if "[run]" in line:
|
||||||
|
data_starts.append(l)
|
||||||
|
if len(set([data_starts[i]-data_starts[i-1] for i in range(1,len(data_starts))])) > 1:
|
||||||
|
raise Exception ("Irregularities in file structure found, not all runs have the same output length")
|
||||||
|
#print(data_starts)
|
||||||
|
#first chunk of data
|
||||||
|
chunk = content[:data_starts[1]]
|
||||||
|
for l,line in enumerate(chunk):
|
||||||
|
if line.startswith("gauge_name"):
|
||||||
|
gauge_line = l
|
||||||
|
#meta_data["gauge_name"] = (line.strip()).split("/")[-1]
|
||||||
|
elif line.startswith("[correlator]"):
|
||||||
|
corr_line = l
|
||||||
|
found_pat = ""
|
||||||
|
for li in chunk[corr_line+1:corr_line+6+b2b]:
|
||||||
|
found_pat += li
|
||||||
|
if re.search(pattern,found_pat):
|
||||||
|
start_read = corr_line+7+b2b
|
||||||
|
T=len(chunk)-1-start_read
|
||||||
|
if rep == 0:
|
||||||
|
deltas = []
|
||||||
|
for t in range(T):
|
||||||
|
deltas.append([])
|
||||||
|
for t in range(T):
|
||||||
|
deltas[t].append(np.zeros(len(data_starts)))
|
||||||
|
#all other chunks should follow the same structure
|
||||||
|
for cnfg in range(len(data_starts)):
|
||||||
|
start = data_starts[cnfg]
|
||||||
|
stop = start+data_starts[1]
|
||||||
|
chunk = content[start:stop]
|
||||||
|
#meta_data = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
rep_idl.append(int(chunk[gauge_line].split("n")[-1]))
|
||||||
|
except:
|
||||||
|
raise Exception("Couldn't parse idl from directroy, problem with chunk around line "+gauge_line)
|
||||||
|
|
||||||
|
found_pat = ""
|
||||||
|
for li in chunk[corr_line+1:corr_line+6+b2b]:
|
||||||
|
found_pat += li
|
||||||
|
if re.search(pattern,found_pat):
|
||||||
|
#print("found pattern")
|
||||||
|
for t,line in enumerate(chunk[start_read:start_read+T]):
|
||||||
|
floats = list(map(float, line.split()))
|
||||||
|
deltas[t][rep][cnfg] = floats[-2:][im]
|
||||||
|
idl.append(rep_idl)
|
||||||
|
|
||||||
|
#print(new_names)
|
||||||
|
#print(deltas)
|
||||||
|
#print(idl)
|
||||||
if "check_configs" in kwargs:
|
if "check_configs" in kwargs:
|
||||||
print("Checking for missing configs...")
|
print("Checking for missing configs...")
|
||||||
che = kwargs.get("check_configs")
|
che = kwargs.get("check_configs")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue