pyerrors.input.sfcf
View Source
0import os 1import fnmatch 2import re 3import numpy as np # Thinly-wrapped numpy 4from ..obs import Obs 5from . import utils 6 7 8def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", **kwargs): 9 """Read sfcf c format from given folder structure. 10 11 Parameters 12 ---------- 13 quarks: str 14 Label of the quarks used in the sfcf input file. e.g. "quark quark" 15 for version 0.0 this does NOT need to be given with the typical " - " 16 that is present in the output file, 17 this is done automatically for this version 18 noffset: int 19 Offset of the source (only relevant when wavefunctions are used) 20 wf: int 21 ID of wave function 22 wf2: int 23 ID of the second wavefunction 24 (only relevant for boundary-to-boundary correlation functions) 25 im: bool 26 if True, read imaginary instead of real part 27 of the correlation function. 28 corr_type : str 29 change between bi (boundary - inner) (default) bib (boundary - inner - boundary) and bb (boundary - boundary) 30 correlator types 31 names : list 32 Alternative labeling for replicas/ensembles. 33 Has to have the appropriate length 34 ens_name : str 35 replaces the name of the ensemble 36 version: str 37 version of SFCF, with which the measurement was done. 38 if the compact output option (-c) was specified, 39 append a "c" to the version (e.g. "1.0c") 40 if the append output option (-a) was specified, 41 append an "a" to the version 42 cfg_separator : str 43 String that separates the ensemble identifier from the configuration number (default 'n'). 44 replica: list 45 list of replica to be read, default is all 46 files: list 47 list of files to be read per replica, default is all. 48 for non-compact output format, hand the folders to be read here. 49 check_configs: 50 list of list of supposed configs, eg. [range(1,1000)] 51 for one replicum with 1000 configs 52 """ 53 if kwargs.get('im'): 54 im = 1 55 part = 'imaginary' 56 else: 57 im = 0 58 part = 'real' 59 if "replica" in kwargs: 60 reps = kwargs.get("replica") 61 if corr_type == 'bb': 62 b2b = True 63 single = True 64 elif corr_type == 'bib': 65 b2b = True 66 single = False 67 else: 68 b2b = False 69 single = False 70 compact = True 71 appended = False 72 known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"] 73 74 if version not in known_versions: 75 raise Exception("This version is not known!") 76 if(version[-1] == "c"): 77 appended = False 78 compact = True 79 version = version[:-1] 80 elif(version[-1] == "a"): 81 appended = True 82 compact = False 83 version = version[:-1] 84 else: 85 compact = False 86 appended = False 87 read = 0 88 T = 0 89 start = 0 90 ls = [] 91 if "replica" in kwargs: 92 ls = reps 93 else: 94 for (dirpath, dirnames, filenames) in os.walk(path): 95 if not appended: 96 ls.extend(dirnames) 97 else: 98 ls.extend(filenames) 99 break 100 if not ls: 101 raise Exception('Error, directory not found') 102 # Exclude folders with different names 103 for exc in ls: 104 if not fnmatch.fnmatch(exc, prefix + '*'): 105 ls = list(set(ls) - set([exc])) 106 107 if not appended: 108 if len(ls) > 1: 109 # New version, to cope with ids, etc. 110 ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0])) 111 replica = len(ls) 112 else: 113 replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls])) 114 print('Read', part, 'part of', name, 'from', prefix[:-1], 115 ',', replica, 'replica') 116 if 'names' in kwargs: 117 new_names = kwargs.get('names') 118 if len(new_names) != len(set(new_names)): 119 raise Exception("names are not unique!") 120 if len(new_names) != replica: 121 raise Exception('Names does not have the required length', replica) 122 else: 123 new_names = [] 124 if not appended: 125 for entry in ls: 126 try: 127 idx = entry.index('r') 128 except Exception: 129 raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") 130 131 if 'ens_name' in kwargs: 132 new_names.append(kwargs.get('ens_name') + '|' + entry[idx:]) 133 else: 134 new_names.append(entry[:idx] + '|' + entry[idx:]) 135 else: 136 137 for exc in ls: 138 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 139 ls = list(set(ls) - set([exc])) 140 ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 141 for entry in ls: 142 myentry = entry[:-len(name) - 1] 143 try: 144 idx = myentry.index('r') 145 except Exception: 146 raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") 147 148 if 'ens_name' in kwargs: 149 new_names.append(kwargs.get('ens_name') + '|' + myentry[idx:]) 150 else: 151 new_names.append(myentry[:idx] + '|' + myentry[idx:]) 152 idl = [] 153 if not appended: 154 for i, item in enumerate(ls): 155 sub_ls = [] 156 if "files" in kwargs: 157 sub_ls = kwargs.get("files") 158 sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 159 else: 160 for (dirpath, dirnames, filenames) in os.walk(path + '/' + item): 161 if compact: 162 sub_ls.extend(filenames) 163 else: 164 sub_ls.extend(dirnames) 165 break 166 if compact: 167 for exc in sub_ls: 168 if not fnmatch.fnmatch(exc, prefix + '*'): 169 sub_ls = list(set(sub_ls) - set([exc])) 170 sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 171 else: 172 for exc in sub_ls: 173 if not fnmatch.fnmatch(exc, 'cfg*'): 174 sub_ls = list(set(sub_ls) - set([exc])) 175 sub_ls.sort(key=lambda x: int(x[3:])) 176 rep_idl = [] 177 no_cfg = len(sub_ls) 178 for cfg in sub_ls: 179 try: 180 if compact: 181 rep_idl.append(int(cfg.split(cfg_separator)[-1])) 182 else: 183 rep_idl.append(int(cfg[3:])) 184 except Exception: 185 raise Exception("Couldn't parse idl from directroy, problem with file " + cfg) 186 rep_idl.sort() 187 # maybe there is a better way to print the idls 188 print(item, ':', no_cfg, ' configurations') 189 idl.append(rep_idl) 190 # here we have found all the files we need to look into. 191 if i == 0: 192 # here, we want to find the place within the file, 193 # where the correlator we need is stored. 194 # to do so, the pattern needed is put together 195 # from the input values 196 if version == "0.0": 197 pattern = "# " + name + " : offset " + str(noffset) + ", wf " + str(wf) 198 # if b2b, a second wf is needed 199 if b2b: 200 pattern += ", wf_2 " + str(wf2) 201 qs = quarks.split(" ") 202 pattern += " : " + qs[0] + " - " + qs[1] 203 file = open(path + '/' + item + '/' + sub_ls[0] + '/' + name, "r") 204 for k, line in enumerate(file): 205 if read == 1 and not line.strip() and k > start + 1: 206 break 207 if read == 1 and k >= start: 208 T += 1 209 if pattern in line: 210 read = 1 211 start = k + 1 212 print(str(T) + " entries found.") 213 file.close() 214 else: 215 pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf) 216 if b2b: 217 pattern += '\nwf_2 ' + str(wf2) 218 # and the file is parsed through to find the pattern 219 if compact: 220 file = open(path + '/' + item + '/' + sub_ls[0], "r") 221 else: 222 # for non-compactified versions of the files 223 file = open(path + '/' + item + '/' + sub_ls[0] + '/' + name, "r") 224 225 content = file.read() 226 match = re.search(pattern, content) 227 if match: 228 start_read = content.count('\n', 0, match.start()) + 5 + b2b 229 end_match = re.search(r'\n\s*\n', content[match.start():]) 230 T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b 231 assert T > 0 232 print(T, 'entries, starting to read in line', start_read) 233 file.close() 234 else: 235 file.close() 236 raise Exception('Correlator with pattern\n' + pattern + '\nnot found.') 237 238 # we found where the correlator 239 # that is to be read is in the files 240 # after preparing the datastructure 241 # the correlators get parsed into... 242 deltas = [] 243 for j in range(T): 244 deltas.append([]) 245 246 for t in range(T): 247 deltas[t].append(np.zeros(no_cfg)) 248 if compact: 249 for cfg in range(no_cfg): 250 with open(path + '/' + item + '/' + sub_ls[cfg]) as fp: 251 lines = fp.readlines() 252 # check, if the correlator is in fact 253 # printed completely 254 if(start_read + T > len(lines)): 255 raise Exception("EOF before end of correlator data! Maybe " + path + '/' + item + '/' + sub_ls[cfg] + " is corrupted?") 256 # and start to read the correlator. 257 # the range here is chosen like this, 258 # since this allows for implementing 259 # a security check for every read correlator later... 260 for k in range(start_read - 6, start_read + T): 261 if k == start_read - 5 - b2b: 262 if lines[k].strip() != 'name ' + name: 263 raise Exception('Wrong format', sub_ls[cfg]) 264 if(k >= start_read and k < start_read + T): 265 floats = list(map(float, lines[k].split())) 266 deltas[k - start_read][i][cfg] = floats[-2:][im] 267 else: 268 for cnfg, subitem in enumerate(sub_ls): 269 with open(path + '/' + item + '/' + subitem + '/' + name) as fp: 270 # since the non-compatified files 271 # are typically not so long, 272 # we can iterate over the whole file. 273 # here one can also implement the chekc from above. 274 for k, line in enumerate(fp): 275 if(k >= start_read and k < start_read + T): 276 floats = list(map(float, line.split())) 277 if version == "0.0": 278 deltas[k - start][i][cnfg] = floats[im - single] 279 else: 280 deltas[k - start_read][i][cnfg] = floats[1 + im - single] 281 282 else: 283 if "files" in kwargs: 284 ls = kwargs.get("files") 285 else: 286 for exc in ls: 287 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 288 ls = list(set(ls) - set([exc])) 289 ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 290 pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf) 291 if b2b: 292 pattern += '\nwf_2 ' + str(wf2) 293 for rep, file in enumerate(ls): 294 rep_idl = [] 295 with open(path + '/' + file, 'r') as fp: 296 content = fp.readlines() 297 data_starts = [] 298 for linenumber, line in enumerate(content): 299 if "[run]" in line: 300 data_starts.append(linenumber) 301 if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1: 302 raise Exception("Irregularities in file structure found, not all runs have the same output length") 303 chunk = content[:data_starts[1]] 304 for linenumber, line in enumerate(chunk): 305 if line.startswith("gauge_name"): 306 gauge_line = linenumber 307 elif line.startswith("[correlator]"): 308 corr_line = linenumber 309 found_pat = "" 310 for li in chunk[corr_line + 1: corr_line + 6 + b2b]: 311 found_pat += li 312 if re.search(pattern, found_pat): 313 start_read = corr_line + 7 + b2b 314 break 315 endline = corr_line + 6 + b2b 316 while not chunk[endline] == "\n": 317 endline += 1 318 T = endline - start_read 319 if rep == 0: 320 deltas = [] 321 for t in range(T): 322 deltas.append([]) 323 for t in range(T): 324 deltas[t].append(np.zeros(len(data_starts))) 325 # all other chunks should follow the same structure 326 for cnfg in range(len(data_starts)): 327 start = data_starts[cnfg] 328 stop = start + data_starts[1] 329 chunk = content[start:stop] 330 try: 331 rep_idl.append(int(chunk[gauge_line].split(cfg_separator)[-1])) 332 except Exception: 333 raise Exception("Couldn't parse idl from directory, problem with chunk around line ", gauge_line) 334 335 found_pat = "" 336 for li in chunk[corr_line + 1:corr_line + 6 + b2b]: 337 found_pat += li 338 if re.search(pattern, found_pat): 339 for t, line in enumerate(chunk[start_read:start_read + T]): 340 floats = list(map(float, line.split())) 341 deltas[t][rep][cnfg] = floats[im + 1 - single] 342 idl.append(rep_idl) 343 344 if "check_configs" in kwargs: 345 print("Checking for missing configs...") 346 che = kwargs.get("check_configs") 347 if not (len(che) == len(idl)): 348 raise Exception("check_configs has to be the same length as replica!") 349 for r in range(len(idl)): 350 print("checking " + new_names[r]) 351 utils.check_idl(idl[r], che[r]) 352 print("Done") 353 result = [] 354 for t in range(T): 355 result.append(Obs(deltas[t], new_names, idl=idl)) 356 return result
#  
def
read_sfcf(
path,
prefix,
name,
quarks='.*',
corr_type='bi',
noffset=0,
wf=0,
wf2=0,
version='1.0c',
cfg_separator='n',
**kwargs
):
View Source
9def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", **kwargs): 10 """Read sfcf c format from given folder structure. 11 12 Parameters 13 ---------- 14 quarks: str 15 Label of the quarks used in the sfcf input file. e.g. "quark quark" 16 for version 0.0 this does NOT need to be given with the typical " - " 17 that is present in the output file, 18 this is done automatically for this version 19 noffset: int 20 Offset of the source (only relevant when wavefunctions are used) 21 wf: int 22 ID of wave function 23 wf2: int 24 ID of the second wavefunction 25 (only relevant for boundary-to-boundary correlation functions) 26 im: bool 27 if True, read imaginary instead of real part 28 of the correlation function. 29 corr_type : str 30 change between bi (boundary - inner) (default) bib (boundary - inner - boundary) and bb (boundary - boundary) 31 correlator types 32 names : list 33 Alternative labeling for replicas/ensembles. 34 Has to have the appropriate length 35 ens_name : str 36 replaces the name of the ensemble 37 version: str 38 version of SFCF, with which the measurement was done. 39 if the compact output option (-c) was specified, 40 append a "c" to the version (e.g. "1.0c") 41 if the append output option (-a) was specified, 42 append an "a" to the version 43 cfg_separator : str 44 String that separates the ensemble identifier from the configuration number (default 'n'). 45 replica: list 46 list of replica to be read, default is all 47 files: list 48 list of files to be read per replica, default is all. 49 for non-compact output format, hand the folders to be read here. 50 check_configs: 51 list of list of supposed configs, eg. [range(1,1000)] 52 for one replicum with 1000 configs 53 """ 54 if kwargs.get('im'): 55 im = 1 56 part = 'imaginary' 57 else: 58 im = 0 59 part = 'real' 60 if "replica" in kwargs: 61 reps = kwargs.get("replica") 62 if corr_type == 'bb': 63 b2b = True 64 single = True 65 elif corr_type == 'bib': 66 b2b = True 67 single = False 68 else: 69 b2b = False 70 single = False 71 compact = True 72 appended = False 73 known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"] 74 75 if version not in known_versions: 76 raise Exception("This version is not known!") 77 if(version[-1] == "c"): 78 appended = False 79 compact = True 80 version = version[:-1] 81 elif(version[-1] == "a"): 82 appended = True 83 compact = False 84 version = version[:-1] 85 else: 86 compact = False 87 appended = False 88 read = 0 89 T = 0 90 start = 0 91 ls = [] 92 if "replica" in kwargs: 93 ls = reps 94 else: 95 for (dirpath, dirnames, filenames) in os.walk(path): 96 if not appended: 97 ls.extend(dirnames) 98 else: 99 ls.extend(filenames) 100 break 101 if not ls: 102 raise Exception('Error, directory not found') 103 # Exclude folders with different names 104 for exc in ls: 105 if not fnmatch.fnmatch(exc, prefix + '*'): 106 ls = list(set(ls) - set([exc])) 107 108 if not appended: 109 if len(ls) > 1: 110 # New version, to cope with ids, etc. 111 ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0])) 112 replica = len(ls) 113 else: 114 replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls])) 115 print('Read', part, 'part of', name, 'from', prefix[:-1], 116 ',', replica, 'replica') 117 if 'names' in kwargs: 118 new_names = kwargs.get('names') 119 if len(new_names) != len(set(new_names)): 120 raise Exception("names are not unique!") 121 if len(new_names) != replica: 122 raise Exception('Names does not have the required length', replica) 123 else: 124 new_names = [] 125 if not appended: 126 for entry in ls: 127 try: 128 idx = entry.index('r') 129 except Exception: 130 raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") 131 132 if 'ens_name' in kwargs: 133 new_names.append(kwargs.get('ens_name') + '|' + entry[idx:]) 134 else: 135 new_names.append(entry[:idx] + '|' + entry[idx:]) 136 else: 137 138 for exc in ls: 139 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 140 ls = list(set(ls) - set([exc])) 141 ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 142 for entry in ls: 143 myentry = entry[:-len(name) - 1] 144 try: 145 idx = myentry.index('r') 146 except Exception: 147 raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") 148 149 if 'ens_name' in kwargs: 150 new_names.append(kwargs.get('ens_name') + '|' + myentry[idx:]) 151 else: 152 new_names.append(myentry[:idx] + '|' + myentry[idx:]) 153 idl = [] 154 if not appended: 155 for i, item in enumerate(ls): 156 sub_ls = [] 157 if "files" in kwargs: 158 sub_ls = kwargs.get("files") 159 sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 160 else: 161 for (dirpath, dirnames, filenames) in os.walk(path + '/' + item): 162 if compact: 163 sub_ls.extend(filenames) 164 else: 165 sub_ls.extend(dirnames) 166 break 167 if compact: 168 for exc in sub_ls: 169 if not fnmatch.fnmatch(exc, prefix + '*'): 170 sub_ls = list(set(sub_ls) - set([exc])) 171 sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 172 else: 173 for exc in sub_ls: 174 if not fnmatch.fnmatch(exc, 'cfg*'): 175 sub_ls = list(set(sub_ls) - set([exc])) 176 sub_ls.sort(key=lambda x: int(x[3:])) 177 rep_idl = [] 178 no_cfg = len(sub_ls) 179 for cfg in sub_ls: 180 try: 181 if compact: 182 rep_idl.append(int(cfg.split(cfg_separator)[-1])) 183 else: 184 rep_idl.append(int(cfg[3:])) 185 except Exception: 186 raise Exception("Couldn't parse idl from directroy, problem with file " + cfg) 187 rep_idl.sort() 188 # maybe there is a better way to print the idls 189 print(item, ':', no_cfg, ' configurations') 190 idl.append(rep_idl) 191 # here we have found all the files we need to look into. 192 if i == 0: 193 # here, we want to find the place within the file, 194 # where the correlator we need is stored. 195 # to do so, the pattern needed is put together 196 # from the input values 197 if version == "0.0": 198 pattern = "# " + name + " : offset " + str(noffset) + ", wf " + str(wf) 199 # if b2b, a second wf is needed 200 if b2b: 201 pattern += ", wf_2 " + str(wf2) 202 qs = quarks.split(" ") 203 pattern += " : " + qs[0] + " - " + qs[1] 204 file = open(path + '/' + item + '/' + sub_ls[0] + '/' + name, "r") 205 for k, line in enumerate(file): 206 if read == 1 and not line.strip() and k > start + 1: 207 break 208 if read == 1 and k >= start: 209 T += 1 210 if pattern in line: 211 read = 1 212 start = k + 1 213 print(str(T) + " entries found.") 214 file.close() 215 else: 216 pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf) 217 if b2b: 218 pattern += '\nwf_2 ' + str(wf2) 219 # and the file is parsed through to find the pattern 220 if compact: 221 file = open(path + '/' + item + '/' + sub_ls[0], "r") 222 else: 223 # for non-compactified versions of the files 224 file = open(path + '/' + item + '/' + sub_ls[0] + '/' + name, "r") 225 226 content = file.read() 227 match = re.search(pattern, content) 228 if match: 229 start_read = content.count('\n', 0, match.start()) + 5 + b2b 230 end_match = re.search(r'\n\s*\n', content[match.start():]) 231 T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b 232 assert T > 0 233 print(T, 'entries, starting to read in line', start_read) 234 file.close() 235 else: 236 file.close() 237 raise Exception('Correlator with pattern\n' + pattern + '\nnot found.') 238 239 # we found where the correlator 240 # that is to be read is in the files 241 # after preparing the datastructure 242 # the correlators get parsed into... 243 deltas = [] 244 for j in range(T): 245 deltas.append([]) 246 247 for t in range(T): 248 deltas[t].append(np.zeros(no_cfg)) 249 if compact: 250 for cfg in range(no_cfg): 251 with open(path + '/' + item + '/' + sub_ls[cfg]) as fp: 252 lines = fp.readlines() 253 # check, if the correlator is in fact 254 # printed completely 255 if(start_read + T > len(lines)): 256 raise Exception("EOF before end of correlator data! Maybe " + path + '/' + item + '/' + sub_ls[cfg] + " is corrupted?") 257 # and start to read the correlator. 258 # the range here is chosen like this, 259 # since this allows for implementing 260 # a security check for every read correlator later... 261 for k in range(start_read - 6, start_read + T): 262 if k == start_read - 5 - b2b: 263 if lines[k].strip() != 'name ' + name: 264 raise Exception('Wrong format', sub_ls[cfg]) 265 if(k >= start_read and k < start_read + T): 266 floats = list(map(float, lines[k].split())) 267 deltas[k - start_read][i][cfg] = floats[-2:][im] 268 else: 269 for cnfg, subitem in enumerate(sub_ls): 270 with open(path + '/' + item + '/' + subitem + '/' + name) as fp: 271 # since the non-compatified files 272 # are typically not so long, 273 # we can iterate over the whole file. 274 # here one can also implement the chekc from above. 275 for k, line in enumerate(fp): 276 if(k >= start_read and k < start_read + T): 277 floats = list(map(float, line.split())) 278 if version == "0.0": 279 deltas[k - start][i][cnfg] = floats[im - single] 280 else: 281 deltas[k - start_read][i][cnfg] = floats[1 + im - single] 282 283 else: 284 if "files" in kwargs: 285 ls = kwargs.get("files") 286 else: 287 for exc in ls: 288 if not fnmatch.fnmatch(exc, prefix + '*.' + name): 289 ls = list(set(ls) - set([exc])) 290 ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1])) 291 pattern = 'name ' + name + '\nquarks ' + quarks + '\noffset ' + str(noffset) + '\nwf ' + str(wf) 292 if b2b: 293 pattern += '\nwf_2 ' + str(wf2) 294 for rep, file in enumerate(ls): 295 rep_idl = [] 296 with open(path + '/' + file, 'r') as fp: 297 content = fp.readlines() 298 data_starts = [] 299 for linenumber, line in enumerate(content): 300 if "[run]" in line: 301 data_starts.append(linenumber) 302 if len(set([data_starts[i] - data_starts[i - 1] for i in range(1, len(data_starts))])) > 1: 303 raise Exception("Irregularities in file structure found, not all runs have the same output length") 304 chunk = content[:data_starts[1]] 305 for linenumber, line in enumerate(chunk): 306 if line.startswith("gauge_name"): 307 gauge_line = linenumber 308 elif line.startswith("[correlator]"): 309 corr_line = linenumber 310 found_pat = "" 311 for li in chunk[corr_line + 1: corr_line + 6 + b2b]: 312 found_pat += li 313 if re.search(pattern, found_pat): 314 start_read = corr_line + 7 + b2b 315 break 316 endline = corr_line + 6 + b2b 317 while not chunk[endline] == "\n": 318 endline += 1 319 T = endline - start_read 320 if rep == 0: 321 deltas = [] 322 for t in range(T): 323 deltas.append([]) 324 for t in range(T): 325 deltas[t].append(np.zeros(len(data_starts))) 326 # all other chunks should follow the same structure 327 for cnfg in range(len(data_starts)): 328 start = data_starts[cnfg] 329 stop = start + data_starts[1] 330 chunk = content[start:stop] 331 try: 332 rep_idl.append(int(chunk[gauge_line].split(cfg_separator)[-1])) 333 except Exception: 334 raise Exception("Couldn't parse idl from directory, problem with chunk around line ", gauge_line) 335 336 found_pat = "" 337 for li in chunk[corr_line + 1:corr_line + 6 + b2b]: 338 found_pat += li 339 if re.search(pattern, found_pat): 340 for t, line in enumerate(chunk[start_read:start_read + T]): 341 floats = list(map(float, line.split())) 342 deltas[t][rep][cnfg] = floats[im + 1 - single] 343 idl.append(rep_idl) 344 345 if "check_configs" in kwargs: 346 print("Checking for missing configs...") 347 che = kwargs.get("check_configs") 348 if not (len(che) == len(idl)): 349 raise Exception("check_configs has to be the same length as replica!") 350 for r in range(len(idl)): 351 print("checking " + new_names[r]) 352 utils.check_idl(idl[r], che[r]) 353 print("Done") 354 result = [] 355 for t in range(T): 356 result.append(Obs(deltas[t], new_names, idl=idl)) 357 return result
Read sfcf c format from given folder structure.
Parameters
- quarks (str): Label of the quarks used in the sfcf input file. e.g. "quark quark" for version 0.0 this does NOT need to be given with the typical " - " that is present in the output file, this is done automatically for this version
- noffset (int): Offset of the source (only relevant when wavefunctions are used)
- wf (int): ID of wave function
- wf2 (int): ID of the second wavefunction (only relevant for boundary-to-boundary correlation functions)
- im (bool): if True, read imaginary instead of real part of the correlation function.
- corr_type (str): change between bi (boundary - inner) (default) bib (boundary - inner - boundary) and bb (boundary - boundary) correlator types
- names (list): Alternative labeling for replicas/ensembles. Has to have the appropriate length
- ens_name (str): replaces the name of the ensemble
- version (str): version of SFCF, with which the measurement was done. if the compact output option (-c) was specified, append a "c" to the version (e.g. "1.0c") if the append output option (-a) was specified, append an "a" to the version
- cfg_separator (str): String that separates the ensemble identifier from the configuration number (default 'n').
- replica (list): list of replica to be read, default is all
- files (list): list of files to be read per replica, default is all. for non-compact output format, hand the folders to be read here.
- check_configs:: list of list of supposed configs, eg. [range(1,1000)] for one replicum with 1000 configs