Impr/fix refactor sfcf read (#164)

* refactor read_sfcf

* adding tests for find_corr and read_compact_file

* add necessary broken data for tests

* fixed appended mode reading

* factored out sort_names and find_files

* now also using sort_files in sfcf.py

* edited tests to fit with new structure

* added find_files function

* shifted helpfunctions to bottom of file

* removed some debug lines

* linting

* Fixed requested changes, added silent mode

* added Exception if correlator is not found by read_append_rep

* use tmp_path fixture

* linting silent keyword

* try to fix testing for a_bb

* tests: Exception testing in test_find_corr made more explicit.

---------

Co-authored-by: Fabian Joswig <fabian.joswig@ed.ac.uk>
This commit is contained in:
Justus Kuhlmann 2023-03-15 18:46:12 +01:00 committed by GitHub
parent 991199a680
commit 41fec09816
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 880 additions and 315 deletions

View file

@ -1,6 +1,5 @@
import os
import fnmatch
import re
import struct
import warnings
import numpy as np # Thinly-wrapped numpy
@ -10,75 +9,7 @@ from ..obs import Obs
from ..fits import fit_lin
from ..obs import CObs
from ..correlators import Corr
def _find_files(path, prefix, postfix, ext, known_files=[]):
found = []
files = []
if postfix != "":
if postfix[-1] != ".":
postfix = postfix + "."
if postfix[0] != ".":
postfix = "." + postfix
if ext[0] == ".":
ext = ext[1:]
pattern = prefix + "*" + postfix + ext
for (dirpath, dirnames, filenames) in os.walk(path + "/"):
found.extend(filenames)
break
if known_files != []:
for kf in known_files:
if kf not in found:
raise FileNotFoundError("Given file " + kf + " does not exist!")
return known_files
if not found:
raise FileNotFoundError(f"Error, directory '{path}' not found")
for f in found:
if fnmatch.fnmatch(f, pattern):
files.append(f)
if files == []:
raise Exception("No files found after pattern filter!")
files = _sort_names(files)
return files
def _sort_names(ll):
r_pattern = r'r(\d+)'
id_pattern = r'id(\d+)'
# sort list by id first
if all([re.search(id_pattern, entry) for entry in ll]):
ll.sort(key=lambda x: int(re.findall(id_pattern, x)[0]))
# then by replikum
if all([re.search(r_pattern, entry) for entry in ll]):
ll.sort(key=lambda x: int(re.findall(r_pattern, x)[0]))
# as the rearrangements by one key let the other key untouched, the list is sorted now
else:
# fallback
sames = ''
if len(ll) > 1:
for i in range(len(ll[0])):
checking = ll[0][i]
for rn in ll[1:]:
is_same = (rn[i] == checking)
if is_same:
sames += checking
else:
break
print(ll[0][len(sames):])
ll.sort(key=lambda x: int(re.findall(r'\d+', x[len(sames):])[0]))
return ll
from .utils import sort_names
def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
@ -171,7 +102,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
else:
rep_names = names
rep_names = _sort_names(rep_names)
rep_names = sort_names(rep_names)
print_err = 0
if 'print_err' in kwargs:
@ -561,6 +492,46 @@ def _parse_array_openQCD2(d, n, size, wa, quadrupel=False):
return arr
def _find_files(path, prefix, postfix, ext, known_files=[]):
found = []
files = []
if postfix != "":
if postfix[-1] != ".":
postfix = postfix + "."
if postfix[0] != ".":
postfix = "." + postfix
if ext[0] == ".":
ext = ext[1:]
pattern = prefix + "*" + postfix + ext
for (dirpath, dirnames, filenames) in os.walk(path + "/"):
found.extend(filenames)
break
if known_files != []:
for kf in known_files:
if kf not in found:
raise FileNotFoundError("Given file " + kf + " does not exist!")
return known_files
if not found:
raise FileNotFoundError(f"Error, directory '{path}' not found")
for f in found:
if fnmatch.fnmatch(f, pattern):
files.append(f)
if files == []:
raise Exception("No files found after pattern filter!")
files = sort_names(files)
return files
def _read_array_openQCD2(fp):
t = fp.read(4)
d = struct.unpack('i', t)[0]
@ -974,7 +945,7 @@ def _read_flow_obs(path, prefix, c, dtr_cnfg=1, version="openQCD", obspos=0, sum
deltas.append(Q_top)
rep_names = _sort_names(rep_names)
rep_names = sort_names(rep_names)
idl = [range(int(configlist[rep][r_start_index[rep]]), int(configlist[rep][r_stop_index[rep]]) + 1, 1) for rep in range(len(deltas))]
deltas = [deltas[nrep][r_start_index[nrep]:r_stop_index[nrep] + 1] for nrep in range(len(deltas))]

View file

@ -3,11 +3,11 @@ import fnmatch
import re
import numpy as np # Thinly-wrapped numpy
from ..obs import Obs
from . import utils
from .utils import sort_names, check_idl
def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", **kwargs):
"""Read sfcf c format from given folder structure.
def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs):
"""Read sfcf files from given folder structure.
Parameters
----------
@ -71,8 +71,7 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
else:
im = 0
part = 'real'
if "replica" in kwargs:
reps = kwargs.get("replica")
if corr_type == 'bb':
b2b = True
single = True
@ -82,8 +81,7 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
else:
b2b = False
single = False
compact = True
appended = False
known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"]
if version not in known_versions:
@ -99,12 +97,9 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
else:
compact = False
appended = False
read = 0
T = 0
start = 0
ls = []
if "replica" in kwargs:
ls = reps
ls = kwargs.get("replica")
else:
for (dirpath, dirnames, filenames) in os.walk(path):
if not appended:
@ -120,74 +115,38 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
ls = list(set(ls) - set([exc]))
if not appended:
if len(ls) > 1:
# New version, to cope with ids, etc.
ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
ls = sort_names(ls)
replica = len(ls)
else:
replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls]))
print('Read', part, 'part of', name, 'from', prefix[:-1],
',', replica, 'replica')
if not silent:
print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
if 'names' in kwargs:
new_names = kwargs.get('names')
if len(new_names) != len(set(new_names)):
raise Exception("names are not unique!")
if len(new_names) != replica:
raise Exception('Names does not have the required length', replica)
raise Exception('names should have the length', replica)
else:
new_names = []
ens_name = kwargs.get("ens_name")
if not appended:
for entry in ls:
try:
idx = entry.index('r')
except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if 'ens_name' in kwargs:
new_names.append(kwargs.get('ens_name') + '|' + entry[idx:])
else:
new_names.append(entry[:idx] + '|' + entry[idx:])
new_names = _get_rep_names(ls, ens_name)
else:
new_names = _get_appended_rep_names(ls, prefix, name, ens_name)
new_names = sort_names(new_names)
for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
ls = list(set(ls) - set([exc]))
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
for entry in ls:
myentry = entry[:-len(name) - 1]
try:
idx = myentry.index('r')
except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if 'ens_name' in kwargs:
new_names.append(kwargs.get('ens_name') + '|' + myentry[idx:])
else:
new_names.append(myentry[:idx] + '|' + myentry[idx:])
idl = []
if not appended:
for i, item in enumerate(ls):
sub_ls = []
rep_path = path + '/' + item
if "files" in kwargs:
sub_ls = kwargs.get("files")
sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
files = kwargs.get("files")
else:
for (dirpath, dirnames, filenames) in os.walk(path + '/' + item):
if compact:
sub_ls.extend(filenames)
else:
sub_ls.extend(dirnames)
break
if compact:
for exc in sub_ls:
if not fnmatch.fnmatch(exc, prefix + '*'):
sub_ls = list(set(sub_ls) - set([exc]))
sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
else:
for exc in sub_ls:
if not fnmatch.fnmatch(exc, 'cfg*'):
sub_ls = list(set(sub_ls) - set([exc]))
sub_ls.sort(key=lambda x: int(x[3:]))
files = []
sub_ls = _find_files(rep_path, prefix, compact, files)
rep_idl = []
no_cfg = len(sub_ls)
for cfg in sub_ls:
@ -200,7 +159,8 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
raise Exception("Couldn't parse idl from directroy, problem with file " + cfg)
rep_idl.sort()
# maybe there is a better way to print the idls
print(item, ':', no_cfg, ' configurations')
if not silent:
print(item, ':', no_cfg, ' configurations')
idl.append(rep_idl)
# here we have found all the files we need to look into.
if i == 0:
@ -209,88 +169,37 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
# to do so, the pattern needed is put together
# from the input values
if version == "0.0":
pattern = "# " + name + " : offset " + str(noffset) + ", wf " + str(wf)
# if b2b, a second wf is needed
if b2b:
pattern += ", wf_2 " + str(wf2)
qs = quarks.split(" ")
pattern += " : " + qs[0] + " - " + qs[1]
file = open(path + '/' + item + '/' + sub_ls[0] + '/' + name, "r")
for k, line in enumerate(file):
if read == 1 and not line.strip() and k > start + 1:
break
if read == 1 and k >= start:
T += 1
if pattern in line:
read = 1
start = k + 1
print(str(T) + " entries found.")
file.close()
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
else:
pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf)
if b2b:
pattern += '\nwf_2 ' + str(wf2)
# and the file is parsed through to find the pattern
if compact:
file = open(path + '/' + item + '/' + sub_ls[0], "r")
file = path + '/' + item + '/' + sub_ls[0]
else:
# for non-compactified versions of the files
file = open(path + '/' + item + '/' + sub_ls[0] + '/' + name, "r")
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
content = file.read()
match = re.search(pattern, content)
if match:
start_read = content.count('\n', 0, match.start()) + 5 + b2b
end_match = re.search(r'\n\s*\n', content[match.start():])
T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b
assert T > 0
print(T, 'entries, starting to read in line', start_read)
file.close()
else:
file.close()
raise Exception('Correlator with pattern\n' + pattern + '\nnot found.')
pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks)
start_read, T = _find_correlator(file, version, pattern, b2b, silent=silent)
# we found where the correlator
# that is to be read is in the files
# after preparing the datastructure
# preparing the datastructure
# the correlators get parsed into...
deltas = []
for j in range(T):
deltas.append([])
for t in range(T):
deltas[t].append(np.zeros(no_cfg))
if compact:
for cfg in range(no_cfg):
with open(path + '/' + item + '/' + sub_ls[cfg]) as fp:
lines = fp.readlines()
# check, if the correlator is in fact
# printed completely
if (start_read + T > len(lines)):
raise Exception("EOF before end of correlator data! Maybe " + path + '/' + item + '/' + sub_ls[cfg] + " is corrupted?")
# and start to read the correlator.
# the range here is chosen like this,
# since this allows for implementing
# a security check for every read correlator later...
for k in range(start_read - 6, start_read + T):
if k == start_read - 5 - b2b:
if lines[k].strip() != 'name ' + name:
raise Exception('Wrong format', sub_ls[cfg])
if (k >= start_read and k < start_read + T):
floats = list(map(float, lines[k].split()))
deltas[k - start_read][i][cfg] = floats[-2:][im]
rep_deltas = _read_compact_rep(path, item, sub_ls, start_read, T, b2b, name, im)
for t in range(T):
deltas[t].append(rep_deltas[t])
else:
for t in range(T):
deltas[t].append(np.zeros(no_cfg))
for cnfg, subitem in enumerate(sub_ls):
with open(path + '/' + item + '/' + subitem + '/' + name) as fp:
# since the non-compatified files
# are typically not so long,
# we can iterate over the whole file.
# here one can also implement the chekc from above.
for k, line in enumerate(fp):
if (k >= start_read and k < start_read + T):
floats = list(map(float, line.split()))
if version == "0.0":
deltas[k - start][i][cnfg] = floats[im - single]
deltas[k - start_read][i][cnfg] = floats[im - single]
else:
deltas[k - start_read][i][cnfg] = floats[1 + im - single]
@ -301,71 +210,237 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0,
for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
ls = list(set(ls) - set([exc]))
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf)
if b2b:
pattern += '\nwf_2 ' + str(wf2)
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks)
deltas = []
for rep, file in enumerate(ls):
rep_idl = []
with open(path + '/' + file, 'r') as fp:
content = fp.readlines()
data_starts = []
for linenumber, line in enumerate(content):
if "[run]" in line:
data_starts.append(linenumber)
if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1:
raise Exception("Irregularities in file structure found, not all runs have the same output length")
chunk = content[:data_starts[1]]
for linenumber, line in enumerate(chunk):
if line.startswith("gauge_name"):
gauge_line = linenumber
elif line.startswith("[correlator]"):
corr_line = linenumber
found_pat = ""
for li in chunk[corr_line + 1: corr_line + 6 + b2b]:
found_pat += li
if re.search(pattern, found_pat):
start_read = corr_line + 7 + b2b
break
endline = corr_line + 6 + b2b
while not chunk[endline] == "\n":
endline += 1
T = endline - start_read
if rep == 0:
deltas = []
for t in range(T):
deltas.append([])
filename = path + '/' + file
T, rep_idl, rep_data = _read_append_rep(filename, pattern, b2b, cfg_separator, im, single)
if rep == 0:
for t in range(T):
deltas[t].append(np.zeros(len(data_starts)))
# all other chunks should follow the same structure
for cnfg in range(len(data_starts)):
start = data_starts[cnfg]
stop = start + data_starts[1]
chunk = content[start:stop]
try:
rep_idl.append(int(chunk[gauge_line].split(cfg_separator)[-1]))
except Exception:
raise Exception("Couldn't parse idl from directory, problem with chunk around line ", gauge_line)
found_pat = ""
for li in chunk[corr_line + 1:corr_line + 6 + b2b]:
found_pat += li
if re.search(pattern, found_pat):
for t, line in enumerate(chunk[start_read:start_read + T]):
floats = list(map(float, line.split()))
deltas[t][rep][cnfg] = floats[im + 1 - single]
deltas.append([])
for t in range(T):
deltas[t].append(rep_data[t])
idl.append(rep_idl)
if "check_configs" in kwargs:
print("Checking for missing configs...")
if not silent:
print("Checking for missing configs...")
che = kwargs.get("check_configs")
if not (len(che) == len(idl)):
raise Exception("check_configs has to be the same length as replica!")
for r in range(len(idl)):
print("checking " + new_names[r])
utils.check_idl(idl[r], che[r])
print("Done")
if not silent:
print("checking " + new_names[r])
check_idl(idl[r], che[r])
if not silent:
print("Done")
result = []
for t in range(T):
result.append(Obs(deltas[t], new_names, idl=idl))
return result
def _find_files(rep_path, prefix, compact, files=[]):
sub_ls = []
if not files == []:
files.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
else:
for (dirpath, dirnames, filenames) in os.walk(rep_path):
if compact:
sub_ls.extend(filenames)
else:
sub_ls.extend(dirnames)
break
if compact:
for exc in sub_ls:
if not fnmatch.fnmatch(exc, prefix + '*'):
sub_ls = list(set(sub_ls) - set([exc]))
sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
else:
for exc in sub_ls:
if not fnmatch.fnmatch(exc, 'cfg*'):
sub_ls = list(set(sub_ls) - set([exc]))
sub_ls.sort(key=lambda x: int(x[3:]))
files = sub_ls
if len(files) == 0:
raise FileNotFoundError("Did not find files in", rep_path, "with prefix", prefix, "and the given structure.")
return files
def _make_pattern(version, name, noffset, wf, wf2, b2b, quarks):
if version == "0.0":
pattern = "# " + name + " : offset " + str(noffset) + ", wf " + str(wf)
if b2b:
pattern += ", wf_2 " + str(wf2)
qs = quarks.split(" ")
pattern += " : " + qs[0] + " - " + qs[1]
else:
pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf)
if b2b:
pattern += '\nwf_2 ' + str(wf2)
return pattern
def _find_correlator(file_name, version, pattern, b2b, silent=False):
T = 0
file = open(file_name, "r")
content = file.read()
match = re.search(pattern, content)
if match:
if version == "0.0":
start_read = content.count('\n', 0, match.start()) + 1
T = content.count('\n', start_read)
else:
start_read = content.count('\n', 0, match.start()) + 5 + b2b
end_match = re.search(r'\n\s*\n', content[match.start():])
T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b
if not T > 0:
raise ValueError("Correlator with pattern\n" + pattern + "\nis empty!")
if not silent:
print(T, 'entries, starting to read in line', start_read)
else:
file.close()
raise ValueError('Correlator with pattern\n' + pattern + '\nnot found.')
file.close()
return start_read, T
def _read_compact_file(rep_path, config_file, start_read, T, b2b, name, im):
with open(rep_path + config_file) as fp:
lines = fp.readlines()
# check, if the correlator is in fact
# printed completely
if (start_read + T + 1 > len(lines)):
raise Exception("EOF before end of correlator data! Maybe " + rep_path + config_file + " is corrupted?")
corr_lines = lines[start_read - 6: start_read + T]
del lines
t_vals = []
if corr_lines[1 - b2b].strip() != 'name ' + name:
raise Exception('Wrong format in file', config_file)
for k in range(6, T + 6):
floats = list(map(float, corr_lines[k].split()))
t_vals.append(floats[-2:][im])
return t_vals
def _read_compact_rep(path, rep, sub_ls, start_read, T, b2b, name, im):
rep_path = path + '/' + rep + '/'
no_cfg = len(sub_ls)
deltas = []
for t in range(T):
deltas.append(np.zeros(no_cfg))
for cfg in range(no_cfg):
cfg_file = sub_ls[cfg]
cfg_data = _read_compact_file(rep_path, cfg_file, start_read, T, b2b, name, im)
for t in range(T):
deltas[t][cfg] = cfg_data[t]
return deltas
def _read_chunk(chunk, gauge_line, cfg_sep, start_read, T, corr_line, b2b, pattern, im, single):
try:
idl = int(chunk[gauge_line].split(cfg_sep)[-1])
except Exception:
raise Exception("Couldn't parse idl from directory, problem with chunk around line ", gauge_line)
found_pat = ""
data = []
for li in chunk[corr_line + 1:corr_line + 6 + b2b]:
found_pat += li
if re.search(pattern, found_pat):
for t, line in enumerate(chunk[start_read:start_read + T]):
floats = list(map(float, line.split()))
data.append(floats[im + 1 - single])
return idl, data
def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
with open(filename, 'r') as fp:
content = fp.readlines()
data_starts = []
for linenumber, line in enumerate(content):
if "[run]" in line:
data_starts.append(linenumber)
if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1:
raise Exception("Irregularities in file structure found, not all runs have the same output length")
chunk = content[:data_starts[1]]
for linenumber, line in enumerate(chunk):
if line.startswith("gauge_name"):
gauge_line = linenumber
elif line.startswith("[correlator]"):
corr_line = linenumber
found_pat = ""
for li in chunk[corr_line + 1: corr_line + 6 + b2b]:
found_pat += li
if re.search(pattern, found_pat):
start_read = corr_line + 7 + b2b
break
else:
raise ValueError("Did not find pattern\n", pattern, "\nin\n", filename)
endline = corr_line + 6 + b2b
while not chunk[endline] == "\n":
endline += 1
T = endline - start_read
# all other chunks should follow the same structure
rep_idl = []
rep_data = []
for cnfg in range(len(data_starts)):
start = data_starts[cnfg]
stop = start + data_starts[1]
chunk = content[start:stop]
idl, data = _read_chunk(chunk, gauge_line, cfg_separator, start_read, T, corr_line, b2b, pattern, im, single)
rep_idl.append(idl)
rep_data.append(data)
data = []
for t in range(T):
data.append([])
for c in range(len(rep_data)):
data[t].append(rep_data[c][t])
return T, rep_idl, data
def _get_rep_names(ls, ens_name=None):
new_names = []
for entry in ls:
try:
idx = entry.index('r')
except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if ens_name:
new_names.append('ens_name' + '|' + entry[idx:])
else:
new_names.append(entry[:idx] + '|' + entry[idx:])
return new_names
def _get_appended_rep_names(ls, prefix, name, ens_name=None):
new_names = []
for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
ls = list(set(ls) - set([exc]))
ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
for entry in ls:
myentry = entry[:-len(name) - 1]
try:
idx = myentry.index('r')
except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if ens_name:
new_names.append('ens_name' + '|' + entry[idx:])
else:
new_names.append(myentry[:idx] + '|' + myentry[idx:])
return new_names

View file

@ -1,6 +1,51 @@
import re
"""Utilities for the input"""
def sort_names(ll):
"""Sorts a list of names of replika with searches for `r` and `id` in the replikum string.
If this search fails, a fallback method is used,
where the strings are simply compared and the first diffeing numeral is used for differentiation.
Parameters
----------
ll: list
list to sort
Returns
-------
ll: list
sorted list
"""
if len(ll) > 1:
r_pattern = r'r(\d+)'
id_pattern = r'id(\d+)'
# sort list by id first
if all([re.search(id_pattern, entry) for entry in ll]):
ll.sort(key=lambda x: int(re.findall(id_pattern, x)[0]))
# then by replikum
if all([re.search(r_pattern, entry) for entry in ll]):
ll.sort(key=lambda x: int(re.findall(r_pattern, x)[0]))
# as the rearrangements by one key let the other key untouched, the list is sorted now
else:
# fallback
sames = ''
if len(ll) > 1:
for i in range(len(ll[0])):
checking = ll[0][i]
for rn in ll[1:]:
is_same = (rn[i] == checking)
if is_same:
sames += checking
else:
break
print("Using prefix:", ll[0][len(sames):])
ll.sort(key=lambda x: int(re.findall(r'\d+', x[len(sames):])[0]))
return ll
def check_idl(idl, che):
"""Checks if list of configurations is contained in an idl