mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-14 11:33:42 +02:00
docs: typos fixed, print statements removed
This commit is contained in:
parent
3c990b2e1f
commit
b8b3d6191f
3 changed files with 3 additions and 26 deletions
|
@ -1,6 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
import ctypes
|
import ctypes
|
||||||
import hashlib
|
import hashlib
|
||||||
import autograd.numpy as np # Thinly-wrapped numpy
|
import autograd.numpy as np # Thinly-wrapped numpy
|
||||||
|
|
|
@ -573,7 +573,6 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
||||||
found = []
|
found = []
|
||||||
files = []
|
files = []
|
||||||
for (dirpath, dirnames, filenames) in os.walk(path + "/"):
|
for (dirpath, dirnames, filenames) in os.walk(path + "/"):
|
||||||
# print(filenames)
|
|
||||||
found.extend(filenames)
|
found.extend(filenames)
|
||||||
break
|
break
|
||||||
for f in found:
|
for f in found:
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import fnmatch
|
import fnmatch
|
||||||
import re
|
import re
|
||||||
|
@ -39,7 +36,7 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
replaces the name of the ensemble
|
replaces the name of the ensemble
|
||||||
version: str
|
version: str
|
||||||
version of SFCF, with which the measurement was done.
|
version of SFCF, with which the measurement was done.
|
||||||
if the compact output option (-c) was spectified,
|
if the compact output option (-c) was specified,
|
||||||
append a "c" to the version (e.g. "1.0c")
|
append a "c" to the version (e.g. "1.0c")
|
||||||
if the append output option (-a) was specified,
|
if the append output option (-a) was specified,
|
||||||
append an "a" to the version
|
append an "a" to the version
|
||||||
|
@ -47,7 +44,7 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
list of replica to be read, default is all
|
list of replica to be read, default is all
|
||||||
files: list
|
files: list
|
||||||
list of files to be read per replica, default is all.
|
list of files to be read per replica, default is all.
|
||||||
for non-conpact ouztput format, hand the folders to be read here.
|
for non-compact output format, hand the folders to be read here.
|
||||||
check_configs:
|
check_configs:
|
||||||
list of list of supposed configs, eg. [range(1,1000)]
|
list of list of supposed configs, eg. [range(1,1000)]
|
||||||
for one replicum with 1000 configs
|
for one replicum with 1000 configs
|
||||||
|
@ -69,17 +66,12 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
else:
|
else:
|
||||||
b2b = False
|
b2b = False
|
||||||
single = False
|
single = False
|
||||||
# due to higher usage in current projects,
|
|
||||||
# compact file format is default
|
|
||||||
compact = True
|
compact = True
|
||||||
appended = False
|
appended = False
|
||||||
# get version string
|
|
||||||
known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"]
|
known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"]
|
||||||
|
|
||||||
if version not in known_versions:
|
if version not in known_versions:
|
||||||
raise Exception("This version is not known!")
|
raise Exception("This version is not known!")
|
||||||
# if the letter c is appended to the version,
|
|
||||||
# the compact fileformat is used (former read_sfcf_c)
|
|
||||||
if(version[-1] == "c"):
|
if(version[-1] == "c"):
|
||||||
appended = False
|
appended = False
|
||||||
compact = True
|
compact = True
|
||||||
|
@ -127,8 +119,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
if len(new_names) != replica:
|
if len(new_names) != replica:
|
||||||
raise Exception('Names does not have the required length', replica)
|
raise Exception('Names does not have the required length', replica)
|
||||||
else:
|
else:
|
||||||
# Adjust replica names to new bookmarking system
|
|
||||||
|
|
||||||
new_names = []
|
new_names = []
|
||||||
if not appended:
|
if not appended:
|
||||||
for entry in ls:
|
for entry in ls:
|
||||||
|
@ -149,7 +139,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
|
||||||
for entry in ls:
|
for entry in ls:
|
||||||
myentry = entry[:-len(name) - 1]
|
myentry = entry[:-len(name) - 1]
|
||||||
# print(myentry)
|
|
||||||
try:
|
try:
|
||||||
idx = myentry.index('r')
|
idx = myentry.index('r')
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -159,7 +148,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
new_names.append(kwargs.get('ens_name') + '|' + myentry[idx:])
|
new_names.append(kwargs.get('ens_name') + '|' + myentry[idx:])
|
||||||
else:
|
else:
|
||||||
new_names.append(myentry[:idx] + '|' + myentry[idx:])
|
new_names.append(myentry[:idx] + '|' + myentry[idx:])
|
||||||
# print(new_names)
|
|
||||||
idl = []
|
idl = []
|
||||||
if not appended:
|
if not appended:
|
||||||
for i, item in enumerate(ls):
|
for i, item in enumerate(ls):
|
||||||
|
@ -174,8 +162,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
else:
|
else:
|
||||||
sub_ls.extend(dirnames)
|
sub_ls.extend(dirnames)
|
||||||
break
|
break
|
||||||
|
|
||||||
# print(sub_ls)
|
|
||||||
if compact:
|
if compact:
|
||||||
for exc in sub_ls:
|
for exc in sub_ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*'):
|
if not fnmatch.fnmatch(exc, prefix + '*'):
|
||||||
|
@ -186,7 +172,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
if not fnmatch.fnmatch(exc, 'cfg*'):
|
if not fnmatch.fnmatch(exc, 'cfg*'):
|
||||||
sub_ls = list(set(sub_ls) - set([exc]))
|
sub_ls = list(set(sub_ls) - set([exc]))
|
||||||
sub_ls.sort(key=lambda x: int(x[3:]))
|
sub_ls.sort(key=lambda x: int(x[3:]))
|
||||||
# print(sub_ls)
|
|
||||||
rep_idl = []
|
rep_idl = []
|
||||||
no_cfg = len(sub_ls)
|
no_cfg = len(sub_ls)
|
||||||
for cfg in sub_ls:
|
for cfg in sub_ls:
|
||||||
|
@ -201,7 +186,7 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
# maybe there is a better way to print the idls
|
# maybe there is a better way to print the idls
|
||||||
print(item, ':', no_cfg, ' configurations')
|
print(item, ':', no_cfg, ' configurations')
|
||||||
idl.append(rep_idl)
|
idl.append(rep_idl)
|
||||||
# here we have found all the files we need to look into.
|
# here we have found all the files we need to look into.
|
||||||
if i == 0:
|
if i == 0:
|
||||||
# here, we want to find the place within the file,
|
# here, we want to find the place within the file,
|
||||||
# where the correlator we need is stored.
|
# where the correlator we need is stored.
|
||||||
|
@ -255,8 +240,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
|
|
||||||
for t in range(T):
|
for t in range(T):
|
||||||
deltas[t].append(np.zeros(no_cfg))
|
deltas[t].append(np.zeros(no_cfg))
|
||||||
# ...the actual parsing can start.
|
|
||||||
# we iterate through all measurement files in the path given...
|
|
||||||
if compact:
|
if compact:
|
||||||
for cfg in range(no_cfg):
|
for cfg in range(no_cfg):
|
||||||
with open(path + '/' + item + '/' + sub_ls[cfg]) as fp:
|
with open(path + '/' + item + '/' + sub_ls[cfg]) as fp:
|
||||||
|
@ -312,7 +295,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
data_starts.append(linenumber)
|
data_starts.append(linenumber)
|
||||||
if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1:
|
if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1:
|
||||||
raise Exception("Irregularities in file structure found, not all runs have the same output length")
|
raise Exception("Irregularities in file structure found, not all runs have the same output length")
|
||||||
# first chunk of data
|
|
||||||
chunk = content[:data_starts[1]]
|
chunk = content[:data_starts[1]]
|
||||||
for linenumber, line in enumerate(chunk):
|
for linenumber, line in enumerate(chunk):
|
||||||
if line.startswith("gauge_name"):
|
if line.startswith("gauge_name"):
|
||||||
|
@ -340,7 +322,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
|
||||||
start = data_starts[cnfg]
|
start = data_starts[cnfg]
|
||||||
stop = start + data_starts[1]
|
stop = start + data_starts[1]
|
||||||
chunk = content[start:stop]
|
chunk = content[start:stop]
|
||||||
# meta_data = {}
|
|
||||||
try:
|
try:
|
||||||
rep_idl.append(int(chunk[gauge_line].split("n")[-1]))
|
rep_idl.append(int(chunk[gauge_line].split("n")[-1]))
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue