mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-15 12:03:42 +02:00
merge with develop
This commit is contained in:
commit
71fe86b8ba
59 changed files with 5367 additions and 1798 deletions
|
@ -161,7 +161,7 @@ Passing arguments to the `gamma_method` still dominates over the dictionaries.
|
|||
|
||||
## Irregular Monte Carlo chains
|
||||
|
||||
Irregular Monte Carlo chains can be initialized with the parameter `idl`.
|
||||
`Obs` objects defined on irregular Monte Carlo chains can be initialized with the parameter `idl`.
|
||||
|
||||
```python
|
||||
# Observable defined on configurations 20 to 519
|
||||
|
@ -187,12 +187,64 @@ obs3.details()
|
|||
|
||||
```
|
||||
|
||||
`Obs` objects defined on regular and irregular histories of the same ensemble can be computed with each other and the correct error propagation and estimation is automatically taken care of.
|
||||
|
||||
**Warning:** Irregular Monte Carlo chains can result in odd patterns in the autocorrelation functions.
|
||||
Make sure to check the autocorrelation time with e.g. `pyerrors.obs.Obs.plot_rho` or `pyerrors.obs.Obs.plot_tauint`.
|
||||
|
||||
For the full API see `pyerrors.obs.Obs`.
|
||||
|
||||
# Correlators
|
||||
When one is not interested in single observables but correlation functions, `pyerrors` offers the `Corr` class which simplifies the corresponding error propagation and provides the user with a set of standard methods. In order to initialize a `Corr` objects one needs to arrange the data as a list of `Obs´
|
||||
```python
|
||||
my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3])
|
||||
print(my_corr)
|
||||
> x0/a Corr(x0/a)
|
||||
> ------------------
|
||||
> 0 0.7957(80)
|
||||
> 1 0.5156(51)
|
||||
> 2 0.3227(33)
|
||||
> 3 0.2041(21)
|
||||
```
|
||||
In case the correlation functions are not defined on the outermost timeslices, for example because of fixed boundary conditions, a padding can be introduced.
|
||||
```python
|
||||
my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3], padding=[1, 1])
|
||||
print(my_corr)
|
||||
> x0/a Corr(x0/a)
|
||||
> ------------------
|
||||
> 0
|
||||
> 1 0.7957(80)
|
||||
> 2 0.5156(51)
|
||||
> 3 0.3227(33)
|
||||
> 4 0.2041(21)
|
||||
> 5
|
||||
```
|
||||
The individual entries of a correlator can be accessed via slicing
|
||||
```python
|
||||
print(my_corr[3])
|
||||
> 0.3227(33)
|
||||
```
|
||||
Error propagation with the `Corr` class works very similar to `Obs` objects. Mathematical operations are overloaded and `Corr` objects can be computed together with other `Corr` objects, `Obs` objects or real numbers and integers.
|
||||
```python
|
||||
my_new_corr = 0.3 * my_corr[2] * my_corr * my_corr + 12 / my_corr
|
||||
```
|
||||
|
||||
`pyerrors` provides the user with a set of regularly used methods for the manipulation of correlator objects:
|
||||
- `Corr.gamma_method` applies the gamma method to all entries of the correlator.
|
||||
- `Corr.m_eff` to construct effective masses. Various variants for periodic and fixed temporal boundary conditions are available.
|
||||
- `Corr.deriv` returns the first derivative of the correlator as `Corr`. Different discretizations of the numerical derivative are available.
|
||||
- `Corr.second_deriv` returns the second derivative of the correlator as `Corr`. Different discretizations of the numerical derivative are available.
|
||||
- `Corr.symmetric` symmetrizes parity even correlations functions, assuming periodic boundary conditions.
|
||||
- `Corr.anti_symmetric` anti-symmetrizes parity odd correlations functions, assuming periodic boundary conditions.
|
||||
- `Corr.T_symmetry` averages a correlator with its time symmetry partner, assuming fixed boundary conditions.
|
||||
- `Corr.plateau` extracts a plateau value from the correlator in a given range.
|
||||
- `Corr.roll` periodically shifts the correlator.
|
||||
- `Corr.reverse` reverses the time ordering of the correlator.
|
||||
- `Corr.correlate` constructs a disconnected correlation function from the correlator and another `Corr` or `Obs` object.
|
||||
- `Corr.reweight` reweights the correlator.
|
||||
|
||||
`pyerrors` can also handle matrices of correlation functions and extract energy states from these matrices via a generalized eigenvalue problem (see `pyerrors.correlators.Corr.GEVP`).
|
||||
|
||||
For the full API see `pyerrors.correlators.Corr`.
|
||||
|
||||
# Complex observables
|
||||
|
@ -223,17 +275,26 @@ print(my_derived_cobs)
|
|||
`pyerrors.roots`
|
||||
|
||||
# Matrix operations
|
||||
`pyerrors.linalg`
|
||||
`pyerrors` provides wrappers for `Obs`-valued matrix operations based on `numpy.linalg`. The supported functions include:
|
||||
- `inv` for the matrix inverse.
|
||||
- `cholseky` for the Cholesky decomposition.
|
||||
- `det` for the matrix determinant.
|
||||
- `eigh` for eigenvalues and eigenvectors of hermitean matrices.
|
||||
- `eig` for eigenvalues of general matrices.
|
||||
- `pinv` for the Moore-Penrose pseudoinverse.
|
||||
- `svd` for the singular-value-decomposition.
|
||||
|
||||
For the full API see `pyerrors.linalg`.
|
||||
|
||||
# Export data
|
||||
The preferred exported file format within `pyerrors` is
|
||||
The preferred exported file format within `pyerrors` is json.gz
|
||||
|
||||
## Jackknife samples
|
||||
For comparison with other analysis workflows `pyerrors` can generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
||||
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
||||
|
||||
# Input
|
||||
`pyerrors.input`
|
||||
`pyerrors` includes an `input` submodule in which input routines and parsers for the output of various numerical programs are contained. For details see `pyerrors.input`.
|
||||
'''
|
||||
from .obs import *
|
||||
from .correlators import *
|
||||
|
|
|
@ -3,8 +3,8 @@ import numpy as np
|
|||
import autograd.numpy as anp
|
||||
import matplotlib.pyplot as plt
|
||||
import scipy.linalg
|
||||
from .obs import Obs, reweight, correlate
|
||||
from .misc import dump_object
|
||||
from .obs import Obs, reweight, correlate, CObs
|
||||
from .misc import dump_object, _assert_equal_properties
|
||||
from .fits import least_squares
|
||||
from .linalg import eigh, inv, cholesky
|
||||
from .roots import find_root
|
||||
|
@ -19,60 +19,99 @@ class Corr:
|
|||
to iterate over all timeslices for every operation. This is especially true, when dealing with smearing matrices.
|
||||
|
||||
The correlator can have two types of content: An Obs at every timeslice OR a GEVP
|
||||
smearing matrix at every timeslice. Other dependency (eg. spacial) are not supported.
|
||||
smearing matrix at every timeslice. Other dependency (eg. spatial) are not supported.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, data_input, padding_front=0, padding_back=0, prange=None):
|
||||
# All data_input should be a list of things at different timeslices. This needs to be verified
|
||||
def __init__(self, data_input, padding=[0, 0], prange=None):
|
||||
""" Initialize a Corr object.
|
||||
|
||||
if not isinstance(data_input, list):
|
||||
raise TypeError('Corr__init__ expects a list of timeslices.')
|
||||
# data_input can have multiple shapes. The simplest one is a list of Obs.
|
||||
# We check, if this is the case
|
||||
if all([isinstance(item, Obs) for item in data_input]):
|
||||
self.content = [np.asarray([item]) for item in data_input]
|
||||
# Wrapping the Obs in an array ensures that the data structure is consistent with smearing matrices.
|
||||
self.N = 1 # number of smearings
|
||||
Parameters
|
||||
----------
|
||||
data_input : list or array
|
||||
list of Obs or list of arrays of Obs or array of Corrs
|
||||
padding : list, optional
|
||||
List with two entries where the first labels the padding
|
||||
at the front of the correlator and the second the padding
|
||||
at the back.
|
||||
prange : list, optional
|
||||
List containing the first and last timeslice of the plateau
|
||||
region indentified for this correlator.
|
||||
"""
|
||||
|
||||
# data_input in the form [np.array(Obs,NxN)]
|
||||
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
||||
self.content = data_input
|
||||
if isinstance(data_input, np.ndarray): # Input is an array of Corrs
|
||||
|
||||
noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
|
||||
self.N = noNull[0].shape[0]
|
||||
# The checks are now identical to the case above
|
||||
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
||||
raise Exception("Smearing matrices are not NxN")
|
||||
if (not all([item.shape == noNull[0].shape for item in noNull])):
|
||||
raise Exception("Items in data_input are not of identical shape." + str(noNull))
|
||||
else: # In case its a list of something else.
|
||||
raise Exception("data_input contains item of wrong type")
|
||||
# This only works, if the array fulfills the conditions below
|
||||
if not len(data_input.shape) == 2 and data_input.shape[0] == data_input.shape[1]:
|
||||
raise Exception("Incompatible array shape")
|
||||
if not all([isinstance(item, Corr) for item in data_input.flatten()]):
|
||||
raise Exception("If the input is an array, its elements must be of type pe.Corr")
|
||||
if not all([item.N == 1 for item in data_input.flatten()]):
|
||||
raise Exception("Can only construct matrix correlator from single valued correlators")
|
||||
if not len(set([item.T for item in data_input.flatten()])) == 1:
|
||||
raise Exception("All input Correlators must be defined over the same timeslices.")
|
||||
|
||||
T = data_input[0, 0].T
|
||||
N = data_input.shape[0]
|
||||
input_as_list = []
|
||||
for t in range(T):
|
||||
if any([(item.content[t][0] is None) for item in data_input.flatten()]):
|
||||
if not all([(item.content[t][0] is None) for item in data_input.flatten()]):
|
||||
warnings.warn("Input ill-defined at different timeslices. Conversion leads to data loss!", RuntimeWarning)
|
||||
input_as_list.append(None)
|
||||
else:
|
||||
array_at_timeslace = np.empty([N, N], dtype="object")
|
||||
for i in range(N):
|
||||
for j in range(N):
|
||||
array_at_timeslace[i, j] = data_input[i, j][t]
|
||||
input_as_list.append(array_at_timeslace)
|
||||
data_input = input_as_list
|
||||
|
||||
if isinstance(data_input, list):
|
||||
|
||||
if all([(isinstance(item, Obs) or isinstance(item, CObs)) for item in data_input]):
|
||||
_assert_equal_properties(data_input)
|
||||
self.content = [np.asarray([item]) for item in data_input]
|
||||
if all([(isinstance(item, Obs) or isinstance(item, CObs)) or item is None for item in data_input]):
|
||||
_assert_equal_properties([o for o in data_input if o is not None])
|
||||
self.content = [np.asarray([item]) if item is not None else None for item in data_input]
|
||||
self.N = 1
|
||||
|
||||
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
||||
self.content = data_input
|
||||
noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
|
||||
self.N = noNull[0].shape[0]
|
||||
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
||||
raise Exception("Smearing matrices are not NxN")
|
||||
if (not all([item.shape == noNull[0].shape for item in noNull])):
|
||||
raise Exception("Items in data_input are not of identical shape." + str(noNull))
|
||||
else:
|
||||
raise Exception("data_input contains item of wrong type")
|
||||
else:
|
||||
raise Exception("Data input was not given as list or correct array")
|
||||
|
||||
self.tag = None
|
||||
|
||||
# We now apply some padding to our list. In case that our list represents a correlator of length T but is not defined at every value.
|
||||
# An undefined timeslice is represented by the None object
|
||||
self.content = [None] * padding_front + self.content + [None] * padding_back
|
||||
self.T = len(self.content) # for convenience: will be used a lot
|
||||
self.content = [None] * padding[0] + self.content + [None] * padding[1]
|
||||
self.T = len(self.content)
|
||||
|
||||
# The attribute "range" [start,end] marks a range of two timeslices.
|
||||
# This is useful for keeping track of plateaus and fitranges.
|
||||
# The range can be inherited from other Corrs, if the operation should not alter a chosen range eg. multiplication with a constant.
|
||||
self.prange = prange
|
||||
|
||||
self.gamma_method()
|
||||
|
||||
def __getitem__(self, idx):
|
||||
"""Return the content of timeslice idx"""
|
||||
if len(self.content[idx]) == 1:
|
||||
if self.content[idx] is None:
|
||||
return None
|
||||
elif len(self.content[idx]) == 1:
|
||||
return self.content[idx][0]
|
||||
else:
|
||||
return self.content[idx]
|
||||
|
||||
@property
|
||||
def reweighted(self):
|
||||
bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in self.content])
|
||||
bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in [x for x in self.content if x is not None]])
|
||||
if np.all(bool_array == 1):
|
||||
return True
|
||||
elif np.all(bool_array == 0):
|
||||
|
@ -91,15 +130,15 @@ class Corr:
|
|||
for j in range(self.N):
|
||||
item[i, j].gamma_method(**kwargs)
|
||||
|
||||
# We need to project the Correlator with a Vector to get a single value at each timeslice.
|
||||
# The method can use one or two vectors.
|
||||
# If two are specified it returns v1@G@v2 (the order might be very important.)
|
||||
# By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
||||
def projected(self, vector_l=None, vector_r=None):
|
||||
def projected(self, vector_l=None, vector_r=None, normalize=False):
|
||||
"""We need to project the Correlator with a Vector to get a single value at each timeslice.
|
||||
|
||||
The method can use one or two vectors.
|
||||
If two are specified it returns v1@G@v2 (the order might be very important.)
|
||||
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
||||
"""
|
||||
if self.N == 1:
|
||||
raise Exception("Trying to project a Corr, that already has N=1.")
|
||||
# This Exception is in no way necessary. One could just return self
|
||||
# But there is no scenario, where a user would want that to happen and the error message might be more informative.
|
||||
|
||||
self.gamma_method()
|
||||
|
||||
|
@ -107,31 +146,49 @@ class Corr:
|
|||
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
|
||||
elif(vector_r is None):
|
||||
vector_r = vector_l
|
||||
if isinstance(vector_l, list) and not isinstance(vector_r, list):
|
||||
if len(vector_l) != self.T:
|
||||
raise Exception("Length of vector list must be equal to T")
|
||||
vector_r = [vector_r] * self.T
|
||||
if isinstance(vector_r, list) and not isinstance(vector_l, list):
|
||||
if len(vector_r) != self.T:
|
||||
raise Exception("Length of vector list must be equal to T")
|
||||
vector_l = [vector_l] * self.T
|
||||
|
||||
if not vector_l.shape == vector_r.shape == (self.N,):
|
||||
raise Exception("Vectors are of wrong shape!")
|
||||
if not isinstance(vector_l, list):
|
||||
if not vector_l.shape == vector_r.shape == (self.N,):
|
||||
raise Exception("Vectors are of wrong shape!")
|
||||
if normalize:
|
||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
# if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
|
||||
# print("Vectors are normalized before projection!")
|
||||
|
||||
# We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
|
||||
if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
|
||||
print("Vectors are normalized before projection!")
|
||||
newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
|
||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
else:
|
||||
# There are no checks here yet. There are so many possible scenarios, where this can go wrong.
|
||||
if normalize:
|
||||
for t in range(self.T):
|
||||
vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
|
||||
|
||||
newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
newcontent = [None if (self.content[t] is None or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
|
||||
return Corr(newcontent)
|
||||
|
||||
def sum(self):
|
||||
return np.sqrt(self.N) * self.projected(np.ones(self.N))
|
||||
|
||||
# For purposes of debugging and verification, one might want to see a single smearing level. smearing will return a Corr at the specified i,j. where both are integers 0<=i,j<N.
|
||||
def smearing(self, i, j):
|
||||
"""Picks the element [i,j] from every matrix and returns a correlator containing one Obs per timeslice.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
i : int
|
||||
First index to be picked.
|
||||
j : int
|
||||
Second index to be picked.
|
||||
"""
|
||||
if self.N == 1:
|
||||
raise Exception("Trying to pick smearing from projected Corr")
|
||||
newcontent = [None if(item is None) else item[i, j] for item in self.content]
|
||||
return Corr(newcontent)
|
||||
|
||||
# Obs and Matplotlib do not play nicely
|
||||
# We often want to retrieve x,y,y_err as lists to pass them to something like pyplot.errorbar
|
||||
def plottable(self):
|
||||
"""Outputs the correlator in a plotable format.
|
||||
|
||||
|
@ -139,16 +196,13 @@ class Corr:
|
|||
timeslice and the error on each timeslice.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("Can only make Corr[N=1] plottable") # We could also autoproject to the groundstate or expect vectors, but this is supposed to be a super simple function.
|
||||
raise Exception("Can only make Corr[N=1] plottable")
|
||||
x_list = [x for x in range(self.T) if not self.content[x] is None]
|
||||
y_list = [y[0].value for y in self.content if y is not None]
|
||||
y_err_list = [y[0].dvalue for y in self.content if y is not None]
|
||||
|
||||
return x_list, y_list, y_err_list
|
||||
|
||||
# symmetric returns a Corr, that has been symmetrized.
|
||||
# A symmetry checker is still to be implemented
|
||||
# The method will not delete any redundant timeslices (Bad for memory, Great for convenience)
|
||||
def symmetric(self):
|
||||
""" Symmetrize the correlator around x0=0."""
|
||||
if self.T % 2 != 0:
|
||||
|
@ -185,28 +239,72 @@ class Corr:
|
|||
raise Exception("Corr could not be symmetrized: No redundant values")
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
# This method will symmetrice the matrices and therefore make them positive definit.
|
||||
def smearing_symmetric(self):
|
||||
"""Symmetrizes the matrices and therefore make them positive definite."""
|
||||
if self.N > 1:
|
||||
transposed = [None if (G is None) else G.T for G in self.content]
|
||||
return 0.5 * (Corr(transposed) + self)
|
||||
if self.N == 1:
|
||||
raise Exception("Trying to symmetrize a smearing matrix, that already has N=1.")
|
||||
|
||||
# We also include a simple GEVP method based on Scipy.linalg
|
||||
def GEVP(self, t0, ts, state=1):
|
||||
if (self.content[t0] is None) or (self.content[ts] is None):
|
||||
raise Exception("Corr not defined at t0/ts")
|
||||
G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
|
||||
for i in range(self.N):
|
||||
for j in range(self.N):
|
||||
G0[i, j] = self.content[t0][i, j].value
|
||||
Gt[i, j] = self.content[ts][i, j].value
|
||||
def GEVP(self, t0, ts=None, state=0, sorted_list=None):
|
||||
"""Solve the general eigenvalue problem on the current correlator
|
||||
|
||||
sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
|
||||
sp_vec = sp_vec[:, np.argsort(sp_val)[-state]] # We only want the eigenvector belonging to the selected state
|
||||
sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
|
||||
return sp_vec
|
||||
Parameters
|
||||
----------
|
||||
t0 : int
|
||||
The time t0 for G(t)v= lambda G(t_0)v
|
||||
ts : int
|
||||
fixed time G(t_s)v= lambda G(t_0)v if return_list=False
|
||||
If return_list=True and sorting=Eigenvector it gives a reference point for the sorting method.
|
||||
state : int
|
||||
The state one is interested in ordered by energy. The lowest state is zero.
|
||||
sorted_list : string
|
||||
if this argument is set, a list of vectors (len=self.T) is returned. If it is left as None, only one vector is returned.
|
||||
"Eigenvalue" - The eigenvector is chosen according to which einvenvalue it belongs individually on every timeslice.
|
||||
"Eigenvector" - Use the method described in arXiv:2004.10472 [hep-lat] to find the set of v(t) belonging to the state.
|
||||
The referense state is identified by its eigenvalue at t=ts
|
||||
"""
|
||||
if sorted_list is None:
|
||||
if (ts is None):
|
||||
raise Exception("ts is required if sorted_list=None")
|
||||
if (self.content[t0] is None) or (self.content[ts] is None):
|
||||
raise Exception("Corr not defined at t0/ts")
|
||||
G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
|
||||
for i in range(self.N):
|
||||
for j in range(self.N):
|
||||
G0[i, j] = self.content[t0][i, j].value
|
||||
Gt[i, j] = self.content[ts][i, j].value
|
||||
|
||||
sp_vecs = _GEVP_solver(Gt, G0)
|
||||
sp_vec = sp_vecs[state]
|
||||
return sp_vec
|
||||
else:
|
||||
|
||||
all_vecs = []
|
||||
for t in range(self.T):
|
||||
try:
|
||||
G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
|
||||
for i in range(self.N):
|
||||
for j in range(self.N):
|
||||
G0[i, j] = self.content[t0][i, j].value
|
||||
Gt[i, j] = self.content[t][i, j].value
|
||||
|
||||
sp_vecs = _GEVP_solver(Gt, G0)
|
||||
if sorted_list == "Eigenvalue":
|
||||
sp_vec = sp_vecs[state]
|
||||
all_vecs.append(sp_vec)
|
||||
else:
|
||||
all_vecs.append(sp_vecs)
|
||||
except Exception:
|
||||
all_vecs.append(None)
|
||||
if sorted_list == "Eigenvector":
|
||||
if (ts is None):
|
||||
raise Exception("ts is required for the Eigenvector sorting method.")
|
||||
all_vecs = _sort_vectors(all_vecs, ts)
|
||||
all_vecs = [a[state] for a in all_vecs]
|
||||
|
||||
return all_vecs
|
||||
|
||||
def Eigenvalue(self, t0, state=1):
|
||||
G = self.smearing_symmetric()
|
||||
|
@ -217,13 +315,57 @@ class Corr:
|
|||
LTi = inv(LT)
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
Gt = G.content[t]
|
||||
M = Li @ Gt @ LTi
|
||||
eigenvalues = eigh(M)[0]
|
||||
eigenvalue = eigenvalues[-state]
|
||||
newcontent.append(eigenvalue)
|
||||
if self.content[t] is None:
|
||||
newcontent.append(None)
|
||||
else:
|
||||
Gt = G.content[t]
|
||||
M = Li @ Gt @ LTi
|
||||
eigenvalues = eigh(M)[0]
|
||||
eigenvalue = eigenvalues[-state]
|
||||
newcontent.append(eigenvalue)
|
||||
return Corr(newcontent)
|
||||
|
||||
def Hankel(self, N, periodic=False):
|
||||
"""Constructs an NxN Hankel matrix
|
||||
|
||||
C(t) c(t+1) ... c(t+n-1)
|
||||
C(t+1) c(t+2) ... c(t+n)
|
||||
.................
|
||||
C(t+(n-1)) c(t+n) ... c(t+2(n-1))
|
||||
|
||||
Parameters
|
||||
----------
|
||||
N : int
|
||||
Dimension of the Hankel matrix
|
||||
periodic : bool, optional
|
||||
determines whether the matrix is extended periodically
|
||||
"""
|
||||
|
||||
if self.N != 1:
|
||||
raise Exception("Multi-operator Prony not implemented!")
|
||||
|
||||
array = np.empty([N, N], dtype="object")
|
||||
new_content = []
|
||||
for t in range(self.T):
|
||||
new_content.append(array.copy())
|
||||
|
||||
def wrap(i):
|
||||
if i >= self.T:
|
||||
return i - self.T
|
||||
return i
|
||||
|
||||
for t in range(self.T):
|
||||
for i in range(N):
|
||||
for j in range(N):
|
||||
if periodic:
|
||||
new_content[t][i, j] = self.content[wrap(t + i + j)][0]
|
||||
elif (t + i + j) >= self.T:
|
||||
new_content[t] = None
|
||||
else:
|
||||
new_content[t][i, j] = self.content[t + i + j][0]
|
||||
|
||||
return Corr(new_content)
|
||||
|
||||
def roll(self, dt):
|
||||
"""Periodically shift the correlator by dt timeslices
|
||||
|
||||
|
@ -236,7 +378,7 @@ class Corr:
|
|||
|
||||
def reverse(self):
|
||||
"""Reverse the time ordering of the Corr"""
|
||||
return Corr(self.content[::-1])
|
||||
return Corr(self.content[:: -1])
|
||||
|
||||
def correlate(self, partner):
|
||||
"""Correlate the correlator with another correlator or Obs
|
||||
|
@ -258,7 +400,7 @@ class Corr:
|
|||
new_content.append(None)
|
||||
else:
|
||||
new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
|
||||
elif isinstance(partner, Obs):
|
||||
elif isinstance(partner, Obs): # Should this include CObs?
|
||||
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
|
||||
else:
|
||||
raise Exception("Can only correlate with an Obs or a Corr.")
|
||||
|
@ -312,25 +454,16 @@ class Corr:
|
|||
|
||||
return (self + T_partner) / 2
|
||||
|
||||
def deriv(self, symmetric=True):
|
||||
def deriv(self, variant="symmetric"):
|
||||
"""Return the first derivative of the correlator with respect to x0.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
symmetric : bool
|
||||
decides whether symmetric of simple finite differences are used. Default: True
|
||||
variant : str
|
||||
decides which definition of the finite differences derivative is used.
|
||||
Available choice: symmetric, forward, backward, improved, default: symmetric
|
||||
"""
|
||||
if not symmetric:
|
||||
newcontent = []
|
||||
for t in range(self.T - 1):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t + 1] - self.content[t])
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding_back=1)
|
||||
if symmetric:
|
||||
if variant == "symmetric":
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
if (self.content[t - 1] is None) or (self.content[t + 1] is None):
|
||||
|
@ -339,19 +472,71 @@ class Corr:
|
|||
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception('Derivative is undefined at all timeslices')
|
||||
return Corr(newcontent, padding_back=1, padding_front=1)
|
||||
return Corr(newcontent, padding=[1, 1])
|
||||
elif variant == "forward":
|
||||
newcontent = []
|
||||
for t in range(self.T - 1):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t + 1] - self.content[t])
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[0, 1])
|
||||
elif variant == "backward":
|
||||
newcontent = []
|
||||
for t in range(1, self.T):
|
||||
if (self.content[t - 1] is None) or (self.content[t] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] - self.content[t - 1])
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[1, 0])
|
||||
elif variant == "improved":
|
||||
newcontent = []
|
||||
for t in range(2, self.T - 2):
|
||||
if (self.content[t - 2] is None) or (self.content[t - 1] is None) or (self.content[t + 1] is None) or (self.content[t + 2] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception('Derivative is undefined at all timeslices')
|
||||
return Corr(newcontent, padding=[2, 2])
|
||||
else:
|
||||
raise Exception("Unknown variant.")
|
||||
|
||||
def second_deriv(self):
|
||||
"""Return the second derivative of the correlator with respect to x0."""
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
if (self.content[t - 1] is None) or (self.content[t + 1] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding_back=1, padding_front=1)
|
||||
def second_deriv(self, variant="symmetric"):
|
||||
"""Return the second derivative of the correlator with respect to x0.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
variant : str
|
||||
decides which definition of the finite differences derivative is used.
|
||||
Available choice: symmetric, improved, default: symmetric
|
||||
"""
|
||||
if variant == "symmetric":
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
if (self.content[t - 1] is None) or (self.content[t + 1] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[1, 1])
|
||||
elif variant == "improved":
|
||||
newcontent = []
|
||||
for t in range(2, self.T - 2):
|
||||
if (self.content[t - 2] is None) or (self.content[t - 1] is None) or (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t + 2] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[2, 2])
|
||||
else:
|
||||
raise Exception("Unknown variant.")
|
||||
|
||||
def m_eff(self, variant='log', guess=1.0):
|
||||
"""Returns the effective mass of the correlator as correlator object
|
||||
|
@ -379,7 +564,7 @@ class Corr:
|
|||
if(all([x is None for x in newcontent])):
|
||||
raise Exception('m_eff is undefined at all timeslices')
|
||||
|
||||
return np.log(Corr(newcontent, padding_back=1))
|
||||
return np.log(Corr(newcontent, padding=[0, 1]))
|
||||
|
||||
elif variant in ['periodic', 'cosh', 'sinh']:
|
||||
if variant in ['periodic', 'cosh']:
|
||||
|
@ -402,7 +587,7 @@ class Corr:
|
|||
if(all([x is None for x in newcontent])):
|
||||
raise Exception('m_eff is undefined at all timeslices')
|
||||
|
||||
return Corr(newcontent, padding_back=1)
|
||||
return Corr(newcontent, padding=[0, 1])
|
||||
|
||||
elif variant == 'arccosh':
|
||||
newcontent = []
|
||||
|
@ -413,7 +598,7 @@ class Corr:
|
|||
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
||||
if(all([x is None for x in newcontent])):
|
||||
raise Exception("m_eff is undefined at all timeslices")
|
||||
return np.arccosh(Corr(newcontent, padding_back=1, padding_front=1))
|
||||
return np.arccosh(Corr(newcontent, padding=[1, 1]))
|
||||
|
||||
else:
|
||||
raise Exception('Unknown variant.')
|
||||
|
@ -434,11 +619,6 @@ class Corr:
|
|||
if self.N != 1:
|
||||
raise Exception("Correlator must be projected before fitting")
|
||||
|
||||
# The default behavior is:
|
||||
# 1 use explicit fitrange
|
||||
# if none is provided, use the range of the corr
|
||||
# if this is also not set, use the whole length of the corr (This could come with a warning!)
|
||||
|
||||
if fitrange is None:
|
||||
if self.prange:
|
||||
fitrange = self.prange
|
||||
|
@ -520,7 +700,7 @@ class Corr:
|
|||
if self.N != 1:
|
||||
raise Exception("Correlator must be projected before plotting")
|
||||
if x_range is None:
|
||||
x_range = [0, self.T]
|
||||
x_range = [0, self.T - 1]
|
||||
|
||||
fig = plt.figure()
|
||||
ax1 = fig.add_subplot(111)
|
||||
|
@ -582,24 +762,45 @@ class Corr:
|
|||
|
||||
return
|
||||
|
||||
def dump(self, filename):
|
||||
"""Dumps the Corr into a pickle file
|
||||
|
||||
def dump(self, filename, datatype="json.gz", **kwargs):
|
||||
"""Dumps the Corr into a file of chosen type
|
||||
Parameters
|
||||
----------
|
||||
filename : str
|
||||
Name of the file
|
||||
Name of the file to be saved.
|
||||
datatype : str
|
||||
Format of the exported file. Supported formats include
|
||||
"json.gz" and "pickle"
|
||||
path : str
|
||||
specifies a custom path for the file (default '.')
|
||||
"""
|
||||
dump_object(self, filename)
|
||||
return
|
||||
if datatype == "json.gz":
|
||||
from .input.json import dump_to_json
|
||||
if 'path' in kwargs:
|
||||
file_name = kwargs.get('path') + '/' + filename
|
||||
else:
|
||||
file_name = filename
|
||||
dump_to_json(self, file_name)
|
||||
elif datatype == "pickle":
|
||||
dump_object(self, filename, **kwargs)
|
||||
else:
|
||||
raise Exception("Unknown datatype " + str(datatype))
|
||||
|
||||
def print(self, range=[0, None]):
|
||||
print(self.__repr__(range))
|
||||
|
||||
def __repr__(self, range=[0, None]):
|
||||
content_string = ""
|
||||
|
||||
content_string += "Corr T=" + str(self.T) + " N=" + str(self.N) + "\n" # +" filled with"+ str(type(self.content[0][0])) there should be a good solution here
|
||||
|
||||
if self.tag is not None:
|
||||
content_string += "Description: " + self.tag + "\n"
|
||||
if self.N != 1:
|
||||
return content_string
|
||||
# This avoids a crash for N>1. I do not know, what else to do here. I like the list representation for N==1. We could print only one "smearing" or one matrix. Printing everything will just
|
||||
# be a wall of numbers.
|
||||
|
||||
if range[1]:
|
||||
range[1] += 1
|
||||
content_string += 'x0/a\tCorr(x0/a)\n------------------\n'
|
||||
|
@ -633,7 +834,7 @@ class Corr:
|
|||
newcontent.append(self.content[t] + y.content[t])
|
||||
return Corr(newcontent)
|
||||
|
||||
elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
|
||||
elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
|
@ -656,7 +857,7 @@ class Corr:
|
|||
newcontent.append(self.content[t] * y.content[t])
|
||||
return Corr(newcontent)
|
||||
|
||||
elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
|
||||
elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
|
@ -689,9 +890,14 @@ class Corr:
|
|||
raise Exception("Division returns completely undefined correlator")
|
||||
return Corr(newcontent)
|
||||
|
||||
elif isinstance(y, Obs):
|
||||
if y.value == 0:
|
||||
raise Exception('Division by zero will return undefined correlator')
|
||||
elif isinstance(y, Obs) or isinstance(y, CObs):
|
||||
if isinstance(y, Obs):
|
||||
if y.value == 0:
|
||||
raise Exception('Division by zero will return undefined correlator')
|
||||
if isinstance(y, CObs):
|
||||
if y.is_zero():
|
||||
raise Exception('Division by zero will return undefined correlator')
|
||||
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
|
@ -721,7 +927,7 @@ class Corr:
|
|||
return self + (-y)
|
||||
|
||||
def __pow__(self, y):
|
||||
if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
|
||||
if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float) or isinstance(y, CObs):
|
||||
newcontent = [None if (item is None) else item**y for item in self.content]
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
else:
|
||||
|
@ -802,3 +1008,70 @@ class Corr:
|
|||
|
||||
def __rtruediv__(self, y):
|
||||
return (self / y) ** (-1)
|
||||
|
||||
@property
|
||||
def real(self):
|
||||
def return_real(obs_OR_cobs):
|
||||
if isinstance(obs_OR_cobs, CObs):
|
||||
return obs_OR_cobs.real
|
||||
else:
|
||||
return obs_OR_cobs
|
||||
|
||||
return self._apply_func_to_corr(return_real)
|
||||
|
||||
@property
|
||||
def imag(self):
|
||||
def return_imag(obs_OR_cobs):
|
||||
if isinstance(obs_OR_cobs, CObs):
|
||||
return obs_OR_cobs.imag
|
||||
else:
|
||||
return obs_OR_cobs * 0 # So it stays the right type
|
||||
|
||||
return self._apply_func_to_corr(return_imag)
|
||||
|
||||
|
||||
def _sort_vectors(vec_set, ts):
|
||||
"""Helper function used to find a set of Eigenvectors consistent over all timeslices"""
|
||||
reference_sorting = np.array(vec_set[ts])
|
||||
N = reference_sorting.shape[0]
|
||||
sorted_vec_set = []
|
||||
for t in range(len(vec_set)):
|
||||
if vec_set[t] is None:
|
||||
sorted_vec_set.append(None)
|
||||
elif not t == ts:
|
||||
perms = permutation([i for i in range(N)])
|
||||
best_score = 0
|
||||
for perm in perms:
|
||||
current_score = 1
|
||||
for k in range(N):
|
||||
new_sorting = reference_sorting.copy()
|
||||
new_sorting[perm[k], :] = vec_set[t][k]
|
||||
current_score *= abs(np.linalg.det(new_sorting))
|
||||
if current_score > best_score:
|
||||
best_score = current_score
|
||||
best_perm = perm
|
||||
sorted_vec_set.append([vec_set[t][k] for k in best_perm])
|
||||
else:
|
||||
sorted_vec_set.append(vec_set[t])
|
||||
|
||||
return sorted_vec_set
|
||||
|
||||
|
||||
def permutation(lst): # Shamelessly copied
|
||||
if len(lst) == 1:
|
||||
return [lst]
|
||||
ll = []
|
||||
for i in range(len(lst)):
|
||||
m = lst[i]
|
||||
remLst = lst[:i] + lst[i + 1:]
|
||||
# Generating all permutations where m is first
|
||||
for p in permutation(remLst):
|
||||
ll.append([m] + p)
|
||||
return ll
|
||||
|
||||
|
||||
def _GEVP_solver(Gt, G0): # Just so normalization an sorting does not need to be repeated. Here we could later put in some checks
|
||||
sp_val, sp_vecs = scipy.linalg.eig(Gt, G0)
|
||||
sp_vecs = [sp_vecs[:, np.argsort(sp_val)[-i]] for i in range(1, sp_vecs.shape[0] + 1)]
|
||||
sp_vecs = [v / np.sqrt((v.T @ G0 @ v)) for v in sp_vecs]
|
||||
return sp_vecs
|
||||
|
|
|
@ -22,6 +22,30 @@ identity = np.array(
|
|||
dtype=complex)
|
||||
|
||||
|
||||
def epsilon_tensor(i, j, k):
|
||||
"""Rank-3 epsilon tensor
|
||||
|
||||
Based on https://codegolf.stackexchange.com/a/160375
|
||||
"""
|
||||
test_set = set((i, j, k))
|
||||
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
|
||||
raise Exception("Unexpected input", i, j, k)
|
||||
|
||||
return (i - j) * (j - k) * (k - i) / 2
|
||||
|
||||
|
||||
def epsilon_tensor_rank4(i, j, k, o):
|
||||
"""Rank-4 epsilon tensor
|
||||
|
||||
Extension of https://codegolf.stackexchange.com/a/160375
|
||||
"""
|
||||
test_set = set((i, j, k, o))
|
||||
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
|
||||
raise Exception("Unexpected input", i, j, k, o)
|
||||
|
||||
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
|
||||
|
||||
|
||||
def Grid_gamma(gamma_tag):
|
||||
"""Returns gamma matrix in Grid labeling."""
|
||||
if gamma_tag == 'Identity':
|
||||
|
|
175
pyerrors/fits.py
175
pyerrors/fits.py
|
@ -1,3 +1,4 @@
|
|||
import gc
|
||||
from collections.abc import Sequence
|
||||
import warnings
|
||||
import numpy as np
|
||||
|
@ -7,6 +8,7 @@ import scipy.stats
|
|||
import matplotlib.pyplot as plt
|
||||
from matplotlib import gridspec
|
||||
from scipy.odr import ODR, Model, RealData
|
||||
from scipy.stats import chi2
|
||||
import iminuit
|
||||
from autograd import jacobian
|
||||
from autograd import elementwise_grad as egrad
|
||||
|
@ -45,6 +47,8 @@ class Fit_result(Sequence):
|
|||
my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
|
||||
if hasattr(self, 'chisquare_by_expected_chisquare'):
|
||||
my_str += '\u03C7\u00b2/\u03C7\u00b2exp = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
|
||||
if hasattr(self, 'p_value'):
|
||||
my_str += 'p-value = ' + f'{self.p_value:2.4f}' + '\n'
|
||||
my_str += 'Fit parameters:\n'
|
||||
for i_par, par in enumerate(self.fit_parameters):
|
||||
my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
|
||||
|
@ -92,7 +96,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
initial_guess : list
|
||||
can provide an initial guess for the input parameters. Relevant for
|
||||
non-linear fits with many parameters.
|
||||
method : str
|
||||
method : str, optional
|
||||
can be used to choose an alternative method for the minimization of chisquare.
|
||||
The possible methods are the ones which can be used for scipy.optimize.minimize and
|
||||
migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
|
||||
|
@ -109,8 +113,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
correlated_fit : bool
|
||||
If true, use the full correlation matrix in the definition of the chisquare
|
||||
(only works for prior==None and when no method is given, at the moment).
|
||||
const_par : list, optional
|
||||
List of N Obs that are used to constrain the last N fit parameters of func.
|
||||
'''
|
||||
if priors is not None:
|
||||
return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
|
||||
|
@ -156,8 +158,6 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
corrected by effects caused by correlated input data.
|
||||
This can take a while as the full correlation matrix
|
||||
has to be calculated (default False).
|
||||
const_par : list, optional
|
||||
List of N Obs that are used to constrain the last N fit parameters of func.
|
||||
|
||||
Based on the orthogonal distance regression module of scipy
|
||||
'''
|
||||
|
@ -173,17 +173,6 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
if not callable(func):
|
||||
raise TypeError('func has to be a function.')
|
||||
|
||||
func_aug = func
|
||||
if 'const_par' in kwargs:
|
||||
const_par = kwargs['const_par']
|
||||
if isinstance(const_par, Obs):
|
||||
const_par = [const_par]
|
||||
|
||||
def func(p, x):
|
||||
return func_aug(np.concatenate((p, [o.value for o in const_par])), x)
|
||||
else:
|
||||
const_par = []
|
||||
|
||||
for i in range(25):
|
||||
try:
|
||||
func(np.arange(i), x.T[0])
|
||||
|
@ -194,9 +183,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
n_parms = i
|
||||
if not silent:
|
||||
print('Fit with', n_parms, 'parameters')
|
||||
if(len(const_par) > 0):
|
||||
print('\t and %d constrained parameter%s' % (len(const_par), 's' if len(const_par) > 1 else ''), const_par)
|
||||
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
||||
|
||||
x_f = np.vectorize(lambda o: o.value)(x)
|
||||
dx_f = np.vectorize(lambda o: o.dvalue)(x)
|
||||
|
@ -239,18 +226,12 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
raise Exception('The minimization procedure did not converge.')
|
||||
|
||||
m = x_f.size
|
||||
n_parms_aug = n_parms + len(const_par)
|
||||
|
||||
def odr_chisquare(p):
|
||||
model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
|
||||
return chisq
|
||||
|
||||
def odr_chisquare_aug(p):
|
||||
model = func_aug(np.concatenate((p[:n_parms_aug], [o.value for o in const_par])), p[n_parms_aug:].reshape(x_shape))
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms_aug:].reshape(x_shape)) / dx_f) ** 2)
|
||||
return chisq
|
||||
|
||||
if kwargs.get('expected_chisquare') is True:
|
||||
W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
|
||||
|
||||
|
@ -277,44 +258,40 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
print('chisquare/expected_chisquare:',
|
||||
output.chisquare_by_expected_chisquare)
|
||||
|
||||
fitp = np.concatenate((out.beta, [o.value for o in const_par]))
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare_aug))(np.concatenate((fitp, out.xplus.ravel()))))
|
||||
fitp = out.beta
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
|
||||
|
||||
def odr_chisquare_compact_x(d):
|
||||
model = func_aug(d[:n_parms_aug], d[n_parms_aug:n_parms_aug + m].reshape(x_shape))
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms_aug + m:].reshape(x_shape) - d[n_parms_aug:n_parms_aug + m].reshape(x_shape)) / dx_f) ** 2)
|
||||
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
|
||||
return chisq
|
||||
|
||||
jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
|
||||
|
||||
deriv_x = -hess_inv @ jac_jac_x[:n_parms_aug + m, n_parms_aug + m:]
|
||||
deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:]
|
||||
|
||||
def odr_chisquare_compact_y(d):
|
||||
model = func_aug(d[:n_parms_aug], d[n_parms_aug:n_parms_aug + m].reshape(x_shape))
|
||||
chisq = anp.sum(((d[n_parms_aug + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms_aug:n_parms_aug + m].reshape(x_shape)) / dx_f) ** 2)
|
||||
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
|
||||
chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
|
||||
return chisq
|
||||
|
||||
jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f)))
|
||||
|
||||
deriv_y = -hess_inv @ jac_jac_y[:n_parms_aug + m, n_parms_aug + m:]
|
||||
deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:]
|
||||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
|
||||
|
||||
output.fit_parameters = result + const_par
|
||||
output.fit_parameters = result
|
||||
|
||||
output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
|
||||
output.dof = x.shape[-1] - n_parms
|
||||
output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def prior_fit(x, y, func, priors, silent=False, **kwargs):
|
||||
warnings.warn("prior_fit renamed to least_squares", DeprecationWarning)
|
||||
return least_squares(x, y, func, priors=priors, silent=silent, **kwargs)
|
||||
|
||||
|
||||
def _prior_fit(x, y, func, priors, silent=False, **kwargs):
|
||||
output = Fit_result()
|
||||
|
||||
|
@ -357,7 +334,7 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs):
|
|||
output.priors = loc_priors
|
||||
|
||||
if not silent:
|
||||
print('Fit with', n_parms, 'parameters')
|
||||
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
||||
|
||||
y_f = [o.value for o in y]
|
||||
dy_f = [o.dvalue for o in y]
|
||||
|
@ -433,11 +410,6 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs):
|
|||
return output
|
||||
|
||||
|
||||
def standard_fit(x, y, func, silent=False, **kwargs):
|
||||
warnings.warn("standard_fit renamed to least_squares", DeprecationWarning)
|
||||
return least_squares(x, y, func, silent=silent, **kwargs)
|
||||
|
||||
|
||||
def _standard_fit(x, y, func, silent=False, **kwargs):
|
||||
|
||||
output = Fit_result()
|
||||
|
@ -455,17 +427,6 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
if not callable(func):
|
||||
raise TypeError('func has to be a function.')
|
||||
|
||||
func_aug = func
|
||||
if 'const_par' in kwargs:
|
||||
const_par = kwargs['const_par']
|
||||
if isinstance(const_par, Obs):
|
||||
const_par = [const_par]
|
||||
|
||||
def func(p, x):
|
||||
return func_aug(np.concatenate((p, [o.value for o in const_par])), x)
|
||||
else:
|
||||
const_par = []
|
||||
|
||||
for i in range(25):
|
||||
try:
|
||||
func(np.arange(i), x.T[0])
|
||||
|
@ -477,9 +438,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
n_parms = i
|
||||
|
||||
if not silent:
|
||||
print('Fit with', n_parms, 'parameters')
|
||||
if(len(const_par) > 0):
|
||||
print('\t and %d constrained parameter%s' % (len(const_par), 's' if len(const_par) > 1 else ''), const_par)
|
||||
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
||||
|
||||
y_f = [o.value for o in y]
|
||||
dy_f = [o.dvalue for o in y]
|
||||
|
@ -512,42 +471,27 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
model = func(p, x)
|
||||
chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2)
|
||||
return chisq
|
||||
|
||||
def chisqfunc_aug(p):
|
||||
model = func_aug(np.concatenate((p, [o.value for o in const_par])), x)
|
||||
chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2)
|
||||
return chisq
|
||||
|
||||
else:
|
||||
def chisqfunc(p):
|
||||
model = func(p, x)
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2)
|
||||
return chisq
|
||||
|
||||
def chisqfunc_aug(p):
|
||||
model = func_aug(np.concatenate((p, [o.value for o in const_par])), x)
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2)
|
||||
return chisq
|
||||
output.method = kwargs.get('method', 'Levenberg-Marquardt')
|
||||
if not silent:
|
||||
print('Method:', output.method)
|
||||
|
||||
if 'method' in kwargs:
|
||||
output.method = kwargs.get('method')
|
||||
if not silent:
|
||||
print('Method:', kwargs.get('method'))
|
||||
if kwargs.get('method') == 'migrad':
|
||||
fit_result = iminuit.minimize(chisqfunc, x0)
|
||||
fit_result = iminuit.minimize(chisqfunc, fit_result.x)
|
||||
if output.method != 'Levenberg-Marquardt':
|
||||
if output.method == 'migrad':
|
||||
fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping crieterion 0.002 * tol * errordef
|
||||
output.iterations = fit_result.nfev
|
||||
else:
|
||||
fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'))
|
||||
fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=1e-12)
|
||||
fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12)
|
||||
output.iterations = fit_result.nit
|
||||
|
||||
chisquare = fit_result.fun
|
||||
|
||||
output.iterations = fit_result.nit
|
||||
else:
|
||||
output.method = 'Levenberg-Marquardt'
|
||||
if not silent:
|
||||
print('Method: Levenberg-Marquardt')
|
||||
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
def chisqfunc_residuals(p):
|
||||
model = func(p, x)
|
||||
|
@ -591,34 +535,34 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
print('chisquare/expected_chisquare:',
|
||||
output.chisquare_by_expected_chisquare)
|
||||
|
||||
fitp = np.concatenate((fit_result.x, [o.value for o in const_par]))
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc_aug))(fitp))
|
||||
fitp = fit_result.x
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
|
||||
|
||||
n_parms_aug = n_parms + len(const_par)
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
def chisqfunc_compact(d):
|
||||
model = func_aug(d[:n_parms_aug], x)
|
||||
chisq = anp.sum(anp.dot(chol_inv, (d[n_parms_aug:] - model)) ** 2)
|
||||
model = func(d[:n_parms], x)
|
||||
chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2)
|
||||
return chisq
|
||||
|
||||
else:
|
||||
def chisqfunc_compact(d):
|
||||
model = func_aug(d[:n_parms_aug], x)
|
||||
chisq = anp.sum(((d[n_parms_aug:] - model) / dy_f) ** 2)
|
||||
model = func(d[:n_parms], x)
|
||||
chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2)
|
||||
return chisq
|
||||
|
||||
jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f)))
|
||||
|
||||
deriv = -hess_inv @ jac_jac[:n_parms_aug, n_parms_aug:]
|
||||
deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]
|
||||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i])))
|
||||
|
||||
output.fit_parameters = result + const_par
|
||||
output.fit_parameters = result
|
||||
|
||||
output.chisquare = chisqfunc(fit_result.x)
|
||||
output.dof = x.shape[-1] - n_parms
|
||||
output.p_value = 1 - chi2.cdf(output.chisquare, output.dof)
|
||||
|
||||
if kwargs.get('resplot') is True:
|
||||
residual_plot(x, y, func, result)
|
||||
|
@ -629,11 +573,6 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
return output
|
||||
|
||||
|
||||
def odr_fit(x, y, func, silent=False, **kwargs):
|
||||
warnings.warn("odr_fit renamed to total_least_squares", DeprecationWarning)
|
||||
return total_least_squares(x, y, func, silent=silent, **kwargs)
|
||||
|
||||
|
||||
def fit_lin(x, y, **kwargs):
|
||||
"""Performs a linear fit to y = n + m * x and returns two Obs n, m.
|
||||
|
||||
|
@ -703,7 +642,7 @@ def residual_plot(x, y, func, fit_res):
|
|||
ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
|
||||
ax1.tick_params(direction='out')
|
||||
ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
|
||||
ax1.axhline(y=0.0, ls='--', color='k')
|
||||
ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
|
||||
ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
|
||||
ax1.set_xlim([xstart, xstop])
|
||||
ax1.set_ylabel('Residuals')
|
||||
|
@ -740,3 +679,43 @@ def error_band(x, func, beta):
|
|||
err = np.array(err)
|
||||
|
||||
return err
|
||||
|
||||
|
||||
def ks_test(objects=None):
|
||||
"""Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
objects : list
|
||||
List of fit results to include in the analysis (optional).
|
||||
"""
|
||||
|
||||
if objects is None:
|
||||
obs_list = []
|
||||
for obj in gc.get_objects():
|
||||
if isinstance(obj, Fit_result):
|
||||
obs_list.append(obj)
|
||||
else:
|
||||
obs_list = objects
|
||||
|
||||
p_values = [o.p_value for o in obs_list]
|
||||
|
||||
bins = len(p_values)
|
||||
x = np.arange(0, 1.001, 0.001)
|
||||
plt.plot(x, x, 'k', zorder=1)
|
||||
plt.xlim(0, 1)
|
||||
plt.ylim(0, 1)
|
||||
plt.xlabel('p-value')
|
||||
plt.ylabel('Cumulative probability')
|
||||
plt.title(str(bins) + ' p-values')
|
||||
|
||||
n = np.arange(1, bins + 1) / np.float64(bins)
|
||||
Xs = np.sort(p_values)
|
||||
plt.step(Xs, n)
|
||||
diffs = n - Xs
|
||||
loc_max_diff = np.argmax(np.abs(diffs))
|
||||
loc = Xs[loc_max_diff]
|
||||
plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
|
||||
plt.draw()
|
||||
|
||||
print(scipy.stats.kstest(p_values, 'uniform'))
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import h5py
|
||||
import numpy as np
|
||||
from collections import Counter
|
||||
from ..obs import Obs, CObs
|
||||
from ..correlators import Corr
|
||||
|
||||
|
@ -32,6 +33,10 @@ def _get_files(path, filestem, idl):
|
|||
filtered_files.append(line)
|
||||
cnfg_numbers.append(no)
|
||||
|
||||
if idl:
|
||||
if Counter(list(idl)) != Counter(cnfg_numbers):
|
||||
raise Exception("Not all configurations specified in idl found, configurations " + str(list(Counter(list(idl)) - Counter(cnfg_numbers))) + " are missing.")
|
||||
|
||||
# Check that configurations are evenly spaced
|
||||
dc = np.unique(np.diff(cnfg_numbers))
|
||||
if np.any(dc < 0):
|
||||
|
@ -58,16 +63,13 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson', idl=No
|
|||
meson : str
|
||||
label of the meson to be extracted, standard value meson_0 which
|
||||
corresponds to the pseudoscalar pseudoscalar two-point function.
|
||||
tree : str
|
||||
Label of the upmost directory in the hdf5 file, default 'meson'
|
||||
for outputs of the Meson module. Can be altered to read input
|
||||
from other modules with similar structures.
|
||||
idl : range
|
||||
If specified only configurations in the given range are read in.
|
||||
"""
|
||||
|
||||
files, idx = _get_files(path, filestem, idl)
|
||||
|
||||
tree = meson.rsplit('_')[0]
|
||||
corr_data = []
|
||||
infos = []
|
||||
for hd5_file in files:
|
||||
|
@ -155,7 +157,7 @@ def read_ExternalLeg_hd5(path, filestem, ens_id, idl=None):
|
|||
raw_data = file['ExternalLeg/corr'][0][0].view('complex')
|
||||
corr_data.append(raw_data)
|
||||
if mom is None:
|
||||
mom = np.array(str(file['ExternalLeg/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
|
||||
mom = np.array(str(file['ExternalLeg/info'].attrs['pIn'])[3:-2].strip().split(), dtype=float)
|
||||
file.close()
|
||||
corr_data = np.array(corr_data)
|
||||
|
||||
|
@ -200,9 +202,9 @@ def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
|
|||
raw_data = file['Bilinear/Bilinear_' + str(i) + '/corr'][0][0].view('complex')
|
||||
corr_data[name].append(raw_data)
|
||||
if mom_in is None:
|
||||
mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
|
||||
mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(), dtype=float)
|
||||
if mom_out is None:
|
||||
mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int)
|
||||
mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(), dtype=float)
|
||||
|
||||
file.close()
|
||||
|
||||
|
@ -265,9 +267,9 @@ def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
|
|||
raw_data = file[tree + str(i) + '/corr'][0][0].view('complex')
|
||||
corr_data[name].append(raw_data)
|
||||
if mom_in is None:
|
||||
mom_in = np.array(str(file[tree + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
|
||||
mom_in = np.array(str(file[tree + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(), dtype=float)
|
||||
if mom_out is None:
|
||||
mom_out = np.array(str(file[tree + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int)
|
||||
mom_out = np.array(str(file[tree + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(), dtype=float)
|
||||
|
||||
file.close()
|
||||
|
||||
|
|
|
@ -6,8 +6,11 @@ import socket
|
|||
import datetime
|
||||
import platform
|
||||
import warnings
|
||||
import re
|
||||
from ..obs import Obs
|
||||
from ..covobs import Covobs
|
||||
from ..correlators import Corr
|
||||
from ..misc import _assert_equal_properties
|
||||
from .. import version as pyerrorsversion
|
||||
|
||||
|
||||
|
@ -18,8 +21,8 @@ def create_json_string(ol, description='', indent=1):
|
|||
Parameters
|
||||
----------
|
||||
ol : list
|
||||
List of objects that will be exported. At the moments, these objects can be
|
||||
either of: Obs, list, numpy.ndarray.
|
||||
List of objects that will be exported. At the moment, these objects can be
|
||||
either of: Obs, list, numpy.ndarray, Corr.
|
||||
All Obs inside a structure have to be defined on the same set of configurations.
|
||||
description : str
|
||||
Optional string that describes the contents of the json file.
|
||||
|
@ -103,20 +106,6 @@ def create_json_string(ol, description='', indent=1):
|
|||
dl.append(ed)
|
||||
return dl
|
||||
|
||||
def _assert_equal_properties(ol, otype=Obs):
|
||||
for o in ol:
|
||||
if not isinstance(o, otype):
|
||||
raise Exception("Wrong data type in list.")
|
||||
for o in ol[1:]:
|
||||
if not ol[0].is_merged == o.is_merged:
|
||||
raise Exception("All Obs in list have to be defined on the same set of configs.")
|
||||
if not ol[0].reweighted == o.reweighted:
|
||||
raise Exception("All Obs in list have to have the same property 'reweighted'.")
|
||||
if not ol[0].e_content == o.e_content:
|
||||
raise Exception("All Obs in list have to be defined on the same set of configs.")
|
||||
if not ol[0].idl == o.idl:
|
||||
raise Exception("All Obs in list have to be defined on the same set of configurations.")
|
||||
|
||||
def write_Obs_to_dict(o):
|
||||
d = {}
|
||||
d['type'] = 'Obs'
|
||||
|
@ -173,12 +162,44 @@ def create_json_string(ol, description='', indent=1):
|
|||
d['cdata'] = cdata
|
||||
return d
|
||||
|
||||
def _nan_Obs_like(obs):
|
||||
samples = []
|
||||
names = []
|
||||
idl = []
|
||||
for key, value in obs.idl.items():
|
||||
samples.append([np.nan] * len(value))
|
||||
names.append(key)
|
||||
idl.append(value)
|
||||
my_obs = Obs(samples, names, idl)
|
||||
my_obs.reweighted = obs.reweighted
|
||||
my_obs.is_merged = obs.is_merged
|
||||
return my_obs
|
||||
|
||||
def write_Corr_to_dict(my_corr):
|
||||
first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j))
|
||||
dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object)
|
||||
dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0])
|
||||
content = [o if o is not None else dummy_array for o in my_corr.content]
|
||||
dat = write_Array_to_dict(np.array(content, dtype=object))
|
||||
dat['type'] = 'Corr'
|
||||
corr_meta_data = str(my_corr.tag)
|
||||
if 'tag' in dat.keys():
|
||||
dat['tag'].append(corr_meta_data)
|
||||
else:
|
||||
dat['tag'] = [corr_meta_data]
|
||||
taglist = dat['tag']
|
||||
dat['tag'] = {} # tag is now a dictionary, that contains the previous taglist in the key "tag"
|
||||
dat['tag']['tag'] = taglist
|
||||
if my_corr.prange is not None:
|
||||
dat['tag']['prange'] = my_corr.prange
|
||||
return dat
|
||||
|
||||
if not isinstance(ol, list):
|
||||
ol = [ol]
|
||||
|
||||
d = {}
|
||||
d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__)
|
||||
d['version'] = '0.1'
|
||||
d['version'] = '0.2'
|
||||
d['who'] = getpass.getuser()
|
||||
d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z')
|
||||
d['host'] = socket.gethostname() + ', ' + platform.platform()
|
||||
|
@ -193,6 +214,10 @@ def create_json_string(ol, description='', indent=1):
|
|||
d['obsdata'].append(write_List_to_dict(io))
|
||||
elif isinstance(io, np.ndarray):
|
||||
d['obsdata'].append(write_Array_to_dict(io))
|
||||
elif isinstance(io, Corr):
|
||||
d['obsdata'].append(write_Corr_to_dict(io))
|
||||
else:
|
||||
raise Exception("Unkown datatype.")
|
||||
|
||||
jsonstring = json.dumps(d, indent=indent, cls=my_encoder, ensure_ascii=False)
|
||||
|
||||
|
@ -212,6 +237,7 @@ def create_json_string(ol, description='', indent=1):
|
|||
return '\n'.join(split)
|
||||
|
||||
jsonstring = remove_quotationmarks(jsonstring)
|
||||
jsonstring = jsonstring.replace('nan', 'NaN')
|
||||
return jsonstring
|
||||
|
||||
|
||||
|
@ -221,8 +247,8 @@ def dump_to_json(ol, fname, description='', indent=1, gz=True):
|
|||
Parameters
|
||||
----------
|
||||
ol : list
|
||||
List of objects that will be exported. At the moments, these objects can be
|
||||
either of: Obs, list, numpy.ndarray.
|
||||
List of objects that will be exported. At the moment, these objects can be
|
||||
either of: Obs, list, numpy.ndarray, Corr.
|
||||
All Obs inside a structure have to be defined on the same set of configurations.
|
||||
fname : str
|
||||
Filename of the output file.
|
||||
|
@ -255,7 +281,7 @@ def dump_to_json(ol, fname, description='', indent=1, gz=True):
|
|||
def import_json_string(json_string, verbose=True, full_output=False):
|
||||
"""Reconstruct a list of Obs or structures containing Obs from a json string.
|
||||
|
||||
The following structures are supported: Obs, list, numpy.ndarray
|
||||
The following structures are supported: Obs, list, numpy.ndarray, Corr
|
||||
If the list contains only one element, it is unpacked from the list.
|
||||
|
||||
Parameters
|
||||
|
@ -374,6 +400,33 @@ def import_json_string(json_string, verbose=True, full_output=False):
|
|||
ret[-1].tag = taglist[i]
|
||||
return np.reshape(ret, layout)
|
||||
|
||||
def get_Corr_from_dict(o):
|
||||
if isinstance(o.get('tag'), list): # supports the old way
|
||||
taglist = o.get('tag') # This had to be modified to get the taglist from the dictionary
|
||||
temp_prange = None
|
||||
elif isinstance(o.get('tag'), dict):
|
||||
tagdic = o.get('tag')
|
||||
taglist = tagdic['tag']
|
||||
if 'prange' in tagdic:
|
||||
temp_prange = tagdic['prange']
|
||||
else:
|
||||
temp_prange = None
|
||||
else:
|
||||
raise Exception("The tag is not a list or dict")
|
||||
|
||||
corr_tag = taglist[-1]
|
||||
tmp_o = o
|
||||
tmp_o['tag'] = taglist[:-1]
|
||||
if len(tmp_o['tag']) == 0:
|
||||
del tmp_o['tag']
|
||||
dat = get_Array_from_dict(tmp_o)
|
||||
my_corr = Corr([None if np.isnan(o.ravel()[0].value) else o for o in list(dat)])
|
||||
if corr_tag != 'None':
|
||||
my_corr.tag = corr_tag
|
||||
|
||||
my_corr.prange = temp_prange
|
||||
return my_corr
|
||||
|
||||
json_dict = json.loads(json_string)
|
||||
|
||||
prog = json_dict.get('program', '')
|
||||
|
@ -400,6 +453,10 @@ def import_json_string(json_string, verbose=True, full_output=False):
|
|||
ol.append(get_List_from_dict(io))
|
||||
elif io['type'] == 'Array':
|
||||
ol.append(get_Array_from_dict(io))
|
||||
elif io['type'] == 'Corr':
|
||||
ol.append(get_Corr_from_dict(io))
|
||||
else:
|
||||
raise Exception("Unkown datatype.")
|
||||
|
||||
if full_output:
|
||||
retd = {}
|
||||
|
@ -420,9 +477,9 @@ def import_json_string(json_string, verbose=True, full_output=False):
|
|||
|
||||
|
||||
def load_json(fname, verbose=True, gz=True, full_output=False):
|
||||
"""Import a list of Obs or structures containing Obs from a .json.gz file.
|
||||
"""Import a list of Obs or structures containing Obs from a .json(.gz) file.
|
||||
|
||||
The following structures are supported: Obs, list, numpy.ndarray
|
||||
The following structures are supported: Obs, list, numpy.ndarray, Corr
|
||||
If the list contains only one element, it is unpacked from the list.
|
||||
|
||||
Parameters
|
||||
|
@ -451,3 +508,215 @@ def load_json(fname, verbose=True, gz=True, full_output=False):
|
|||
d = fin.read()
|
||||
|
||||
return import_json_string(d, verbose, full_output)
|
||||
|
||||
|
||||
def _ol_from_dict(ind, reps='DICTOBS'):
|
||||
"""Convert a dictionary of Obs objects to a list and a dictionary that contains
|
||||
placeholders instead of the Obs objects.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ind : dict
|
||||
Dict of JSON valid structures and objects that will be exported.
|
||||
At the moment, these object can be either of: Obs, list, numpy.ndarray, Corr.
|
||||
All Obs inside a structure have to be defined on the same set of configurations.
|
||||
reps : str
|
||||
Specify the structure of the placeholder in exported dict to be reps[0-9]+.
|
||||
"""
|
||||
|
||||
obstypes = (Obs, Corr, np.ndarray)
|
||||
|
||||
if not reps.isalnum():
|
||||
raise Exception('Placeholder string has to be alphanumeric!')
|
||||
ol = []
|
||||
counter = 0
|
||||
|
||||
def dict_replace_obs(d):
|
||||
nonlocal ol
|
||||
nonlocal counter
|
||||
x = {}
|
||||
for k, v in d.items():
|
||||
if isinstance(v, dict):
|
||||
v = dict_replace_obs(v)
|
||||
elif isinstance(v, list) and all([isinstance(o, Obs) for o in v]):
|
||||
v = obslist_replace_obs(v)
|
||||
elif isinstance(v, list):
|
||||
v = list_replace_obs(v)
|
||||
elif isinstance(v, obstypes):
|
||||
ol.append(v)
|
||||
v = reps + '%d' % (counter)
|
||||
counter += 1
|
||||
elif isinstance(v, str):
|
||||
if bool(re.match(r'%s[0-9]+' % (reps), v)):
|
||||
raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be savely exported.' % (v, reps))
|
||||
x[k] = v
|
||||
return x
|
||||
|
||||
def list_replace_obs(li):
|
||||
nonlocal ol
|
||||
nonlocal counter
|
||||
x = []
|
||||
for e in li:
|
||||
if isinstance(e, list):
|
||||
e = list_replace_obs(e)
|
||||
elif isinstance(e, list) and all([isinstance(o, Obs) for o in e]):
|
||||
e = obslist_replace_obs(e)
|
||||
elif isinstance(e, dict):
|
||||
e = dict_replace_obs(e)
|
||||
elif isinstance(e, obstypes):
|
||||
ol.append(e)
|
||||
e = reps + '%d' % (counter)
|
||||
counter += 1
|
||||
elif isinstance(e, str):
|
||||
if bool(re.match(r'%s[0-9]+' % (reps), e)):
|
||||
raise Exception('Dict contains string %s that matches the placeholder! %s Cannot be savely exported.' % (e, reps))
|
||||
x.append(e)
|
||||
return x
|
||||
|
||||
def obslist_replace_obs(li):
|
||||
nonlocal ol
|
||||
nonlocal counter
|
||||
il = []
|
||||
for e in li:
|
||||
il.append(e)
|
||||
|
||||
ol.append(il)
|
||||
x = reps + '%d' % (counter)
|
||||
counter += 1
|
||||
return x
|
||||
|
||||
nd = dict_replace_obs(ind)
|
||||
|
||||
return ol, nd
|
||||
|
||||
|
||||
def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
|
||||
"""Export a dict of Obs or structures containing Obs to a .json(.gz) file
|
||||
|
||||
Parameters
|
||||
----------
|
||||
od : dict
|
||||
Dict of JSON valid structures and objects that will be exported.
|
||||
At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr.
|
||||
All Obs inside a structure have to be defined on the same set of configurations.
|
||||
fname : str
|
||||
Filename of the output file.
|
||||
description : str
|
||||
Optional string that describes the contents of the json file.
|
||||
indent : int
|
||||
Specify the indentation level of the json file. None or 0 is permissible and
|
||||
saves disk space.
|
||||
reps : str
|
||||
Specify the structure of the placeholder in exported dict to be reps[0-9]+.
|
||||
gz : bool
|
||||
If True, the output is a gzipped json. If False, the output is a json file.
|
||||
"""
|
||||
|
||||
if not isinstance(od, dict):
|
||||
raise Exception('od has to be a dictionary. Did you want to use dump_to_json?')
|
||||
|
||||
infostring = ('This JSON file contains a python dictionary that has been parsed to a list of structures. '
|
||||
'OBSDICT contains the dictionary, where Obs or other structures have been replaced by '
|
||||
'' + reps + '[0-9]+. The field description contains the additional description of this JSON file. '
|
||||
'This file may be parsed to a dict with the pyerrors routine load_json_dict.')
|
||||
|
||||
desc_dict = {'INFO': infostring, 'OBSDICT': {}, 'description': description}
|
||||
ol, desc_dict['OBSDICT'] = _ol_from_dict(od, reps=reps)
|
||||
|
||||
dump_to_json(ol, fname, description=desc_dict, indent=indent, gz=gz)
|
||||
|
||||
|
||||
def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
|
||||
"""Parse a list of Obs or structures containing Obs and an accompanying
|
||||
dict, where the structures have been replaced by placeholders to a
|
||||
dict that contains the structures.
|
||||
|
||||
The following structures are supported: Obs, list, numpy.ndarray, Corr
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ol : list
|
||||
List of objects -
|
||||
At the moment, these objects can be either of: Obs, list, numpy.ndarray, Corr.
|
||||
All Obs inside a structure have to be defined on the same set of configurations.
|
||||
ind : dict
|
||||
Dict that defines the structure of the resulting dict and contains placeholders
|
||||
reps : str
|
||||
Specify the structure of the placeholder in imported dict to be reps[0-9]+.
|
||||
"""
|
||||
if not reps.isalnum():
|
||||
raise Exception('Placeholder string has to be alphanumeric!')
|
||||
|
||||
counter = 0
|
||||
|
||||
def dict_replace_string(d):
|
||||
nonlocal counter
|
||||
nonlocal ol
|
||||
x = {}
|
||||
for k, v in d.items():
|
||||
if isinstance(v, dict):
|
||||
v = dict_replace_string(v)
|
||||
elif isinstance(v, list):
|
||||
v = list_replace_string(v)
|
||||
elif isinstance(v, str) and bool(re.match(r'%s[0-9]+' % (reps), v)):
|
||||
index = int(v[len(reps):])
|
||||
v = ol[index]
|
||||
counter += 1
|
||||
x[k] = v
|
||||
return x
|
||||
|
||||
def list_replace_string(li):
|
||||
nonlocal counter
|
||||
nonlocal ol
|
||||
x = []
|
||||
for e in li:
|
||||
if isinstance(e, list):
|
||||
e = list_replace_string(e)
|
||||
elif isinstance(e, dict):
|
||||
e = dict_replace_string(e)
|
||||
elif isinstance(e, str) and bool(re.match(r'%s[0-9]+' % (reps), e)):
|
||||
index = int(e[len(reps):])
|
||||
e = ol[index]
|
||||
counter += 1
|
||||
x.append(e)
|
||||
return x
|
||||
|
||||
nd = dict_replace_string(ind)
|
||||
|
||||
if counter == 0:
|
||||
raise Exception('No placeholder has been replaced! Check if reps is set correctly.')
|
||||
|
||||
return nd
|
||||
|
||||
|
||||
def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'):
|
||||
"""Import a dict of Obs or structures containing Obs from a .json(.gz) file.
|
||||
|
||||
The following structures are supported: Obs, list, numpy.ndarray, Corr
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : str
|
||||
Filename of the input file.
|
||||
verbose : bool
|
||||
Print additional information that was written to the file.
|
||||
gz : bool
|
||||
If True, assumes that data is gzipped. If False, assumes JSON file.
|
||||
full_output : bool
|
||||
If True, a dict containing auxiliary information and the data is returned.
|
||||
If False, only the data is returned.
|
||||
reps : str
|
||||
Specify the structure of the placeholder in imported dict to be reps[0-9]+.
|
||||
"""
|
||||
indata = load_json(fname, verbose=verbose, gz=gz, full_output=True)
|
||||
description = indata['description']['description']
|
||||
indict = indata['description']['OBSDICT']
|
||||
ol = indata['obsdata']
|
||||
od = _od_from_list_and_dict(ol, indict, reps=reps)
|
||||
|
||||
if full_output:
|
||||
indata['description'] = description
|
||||
indata['obsdata'] = od
|
||||
return indata
|
||||
else:
|
||||
return od
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
|
@ -12,11 +9,12 @@ from ..obs import Obs
|
|||
def read_pbp(path, prefix, **kwargs):
|
||||
"""Read pbp format from given folder structure. Returns a list of length nrw
|
||||
|
||||
Keyword arguments
|
||||
-----------------
|
||||
r_start -- list which contains the first config to be read for each replicum
|
||||
r_stop -- list which contains the last config to be read for each replicum
|
||||
|
||||
Parameters
|
||||
----------
|
||||
r_start : list
|
||||
list which contains the first config to be read for each replicum
|
||||
r_stop : list
|
||||
list which contains the last config to be read for each replicum
|
||||
"""
|
||||
|
||||
extract_nfct = 1
|
||||
|
@ -66,7 +64,6 @@ def read_pbp(path, prefix, **kwargs):
|
|||
tmp_array = []
|
||||
with open(path + '/' + ls[rep], 'rb') as fp:
|
||||
|
||||
# header
|
||||
t = fp.read(4) # number of reweighting factors
|
||||
if rep == 0:
|
||||
nrw = struct.unpack('i', t)[0]
|
||||
|
@ -74,7 +71,7 @@ def read_pbp(path, prefix, **kwargs):
|
|||
deltas.append([])
|
||||
else:
|
||||
if nrw != struct.unpack('i', t)[0]:
|
||||
raise Exception('Error: different number of reweighting factors for replicum', rep)
|
||||
raise Exception('Error: different number of factors for replicum', rep)
|
||||
|
||||
for k in range(nrw):
|
||||
tmp_array.append([])
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
import os
|
||||
import fnmatch
|
||||
import re
|
||||
import struct
|
||||
import numpy as np # Thinly-wrapped numpy
|
||||
import warnings
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import gridspec
|
||||
from ..obs import Obs
|
||||
from ..fits import fit_lin
|
||||
|
||||
|
@ -15,16 +15,31 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
path that contains the data files
|
||||
prefix : str
|
||||
all files in path that start with prefix are considered as input files.
|
||||
May be used together postfix to consider only special file endings.
|
||||
Prefix is ignored, if the keyword 'files' is used.
|
||||
version : str
|
||||
version of openQCD, default 2.0
|
||||
names : list
|
||||
list of names that is assigned to the data according according
|
||||
to the order in the file list. Use careful, if you do not provide file names!
|
||||
r_start : list
|
||||
list which contains the first config to be read for each replicum
|
||||
r_stop : list
|
||||
list which contains the last config to be read for each replicum
|
||||
r_step : int
|
||||
integer that defines a fixed step size between two measurements (in units of configs)
|
||||
If not given, r_step=1 is assumed.
|
||||
postfix : str
|
||||
postfix of the file to read, e.g. '.ms1' for openQCD-files
|
||||
idl_offsets : list
|
||||
offsets to the idl range of obs. Useful for the case that the measurements of rwms are only starting at cfg. 20
|
||||
files : list
|
||||
list which contains the filenames to be read. No automatic detection of
|
||||
files performed if given.
|
||||
print_err : bool
|
||||
Print additional information that is useful for debugging.
|
||||
"""
|
||||
known_oqcd_versions = ['1.4', '1.6', '2.0']
|
||||
if not (version in known_oqcd_versions):
|
||||
|
@ -44,7 +59,6 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
if 'files' in kwargs:
|
||||
ls = kwargs.get('files')
|
||||
else:
|
||||
# Exclude files with different names
|
||||
for exc in ls:
|
||||
if not fnmatch.fnmatch(exc, prefix + '*' + postfix + '.dat'):
|
||||
ls = list(set(ls) - set([exc]))
|
||||
|
@ -56,8 +70,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
r_start = kwargs.get('r_start')
|
||||
if len(r_start) != replica:
|
||||
raise Exception('r_start does not match number of replicas')
|
||||
# Adjust Configuration numbering to python index
|
||||
r_start = [o - 1 if o else None for o in r_start]
|
||||
r_start = [o if o else None for o in r_start]
|
||||
else:
|
||||
r_start = [None] * replica
|
||||
|
||||
|
@ -68,16 +81,22 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
else:
|
||||
r_stop = [None] * replica
|
||||
|
||||
if 'r_step' in kwargs:
|
||||
r_step = kwargs.get('r_step')
|
||||
else:
|
||||
r_step = 1
|
||||
|
||||
print('Read reweighting factors from', prefix[:-1], ',',
|
||||
replica, 'replica', end='')
|
||||
|
||||
# Adjust replica names to new bookmarking system
|
||||
if names is None:
|
||||
rep_names = []
|
||||
for entry in ls:
|
||||
truncated_entry = entry.split('.')[0]
|
||||
idx = truncated_entry.index('r')
|
||||
rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
|
||||
else:
|
||||
rep_names = names
|
||||
|
||||
print_err = 0
|
||||
if 'print_err' in kwargs:
|
||||
|
@ -86,11 +105,14 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
|
||||
deltas = []
|
||||
|
||||
configlist = []
|
||||
r_start_index = []
|
||||
r_stop_index = []
|
||||
|
||||
for rep in range(replica):
|
||||
tmp_array = []
|
||||
with open(path + '/' + ls[rep], 'rb') as fp:
|
||||
|
||||
# header
|
||||
t = fp.read(4) # number of reweighting factors
|
||||
if rep == 0:
|
||||
nrw = struct.unpack('i', t)[0]
|
||||
|
@ -99,7 +121,6 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
for k in range(nrw):
|
||||
deltas.append([])
|
||||
else:
|
||||
# little weird if-clause due to the /2 operation needed.
|
||||
if ((nrw != struct.unpack('i', t)[0] and (not version == '2.0')) or (nrw != struct.unpack('i', t)[0] / 2 and version == '2.0')):
|
||||
raise Exception('Error: different number of reweighting factors for replicum', rep)
|
||||
|
||||
|
@ -112,8 +133,6 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
for i in range(nrw):
|
||||
t = fp.read(4)
|
||||
nfct.append(struct.unpack('i', t)[0])
|
||||
# print('nfct: ', nfct) # Hasenbusch factor,
|
||||
# 1 for rat reweighting
|
||||
else:
|
||||
for i in range(nrw):
|
||||
nfct.append(1)
|
||||
|
@ -126,13 +145,13 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
if not struct.unpack('i', fp.read(4))[0] == 0:
|
||||
print('something is wrong!')
|
||||
|
||||
# body
|
||||
configlist.append([])
|
||||
while 0 < 1:
|
||||
t = fp.read(4)
|
||||
if len(t) < 4:
|
||||
break
|
||||
if print_err:
|
||||
config_no = struct.unpack('i', t)
|
||||
config_no = struct.unpack('i', t)[0]
|
||||
configlist[-1].append(config_no)
|
||||
for i in range(nrw):
|
||||
if(version == '2.0'):
|
||||
tmpd = _read_array_openQCD2(fp)
|
||||
|
@ -163,8 +182,32 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
print('Partial factor:', tmp_nfct)
|
||||
tmp_array[i].append(tmp_nfct)
|
||||
|
||||
if r_start[rep] is None:
|
||||
r_start_index.append(0)
|
||||
else:
|
||||
try:
|
||||
r_start_index.append(configlist[-1].index(r_start[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_start[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
|
||||
if r_stop[rep] is None:
|
||||
r_stop_index.append(len(configlist[-1]) - 1)
|
||||
else:
|
||||
try:
|
||||
r_stop_index.append(configlist[-1].index(r_stop[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_stop[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
|
||||
for k in range(nrw):
|
||||
deltas[k].append(tmp_array[k][r_start[rep]:r_stop[rep]])
|
||||
deltas[k].append(tmp_array[k][r_start_index[rep]:r_stop_index[rep]][::r_step])
|
||||
|
||||
if np.any([len(np.unique(np.diff(cl))) != 1 for cl in configlist]):
|
||||
raise Exception('Irregular spaced data in input file!', [len(np.unique(np.diff(cl))) for cl in configlist])
|
||||
stepsizes = [list(np.unique(np.diff(cl)))[0] for cl in configlist]
|
||||
if np.any([step != 1 for step in stepsizes]):
|
||||
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning)
|
||||
|
||||
print(',', nrw, 'reweighting factors with', nsrc, 'sources')
|
||||
if "idl_offsets" in kwargs:
|
||||
|
@ -172,15 +215,9 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
else:
|
||||
idl_offsets = np.ones(nrw, dtype=int)
|
||||
result = []
|
||||
idl = [range(configlist[rep][r_start_index[rep]], configlist[rep][r_stop_index[rep]], r_step) for rep in range(replica)]
|
||||
for t in range(nrw):
|
||||
idl = []
|
||||
for rep in range(replica):
|
||||
idl.append(range(idl_offsets[rep], len(deltas[t][rep] + idl_offsets[rep])))
|
||||
if names is None:
|
||||
result.append(Obs(deltas[t], rep_names, idl=idl))
|
||||
else:
|
||||
print(names)
|
||||
result.append(Obs(deltas[t], names, idl=idl))
|
||||
result.append(Obs(deltas[t], rep_names, idl=idl))
|
||||
return result
|
||||
|
||||
|
||||
|
@ -193,7 +230,12 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
The data around the zero crossing of t^2<E> - 0.3
|
||||
is fitted with a linear function
|
||||
from which the exact root is extracted.
|
||||
Only works with openQCD v 1.2.
|
||||
Only works with openQCD
|
||||
|
||||
It is assumed that one measurement is performed for each config.
|
||||
If this is not the case, the resulting idl, as well as the handling
|
||||
of r_start, r_stop and r_step is wrong and the user has to correct
|
||||
this in the resulting observable.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -215,10 +257,26 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
crossing to be included in the linear fit. (Default: 5)
|
||||
r_start : list
|
||||
list which contains the first config to be read for each replicum.
|
||||
r_stop: list
|
||||
r_stop : list
|
||||
list which contains the last config to be read for each replicum.
|
||||
r_step : int
|
||||
integer that defines a fixed step size between two measurements (in units of configs)
|
||||
If not given, r_step=1 is assumed.
|
||||
plaquette : bool
|
||||
If true extract the plaquette estimate of t0 instead.
|
||||
names : list
|
||||
list of names that is assigned to the data according according
|
||||
to the order in the file list. Use careful, if you do not provide file names!
|
||||
files : list
|
||||
list which contains the filenames to be read. No automatic detection of
|
||||
files performed if given.
|
||||
plot_fit : bool
|
||||
If true, the fit for the extraction of t0 is shown together with the data.
|
||||
assume_thermalization : bool
|
||||
If True: If the first record divided by the distance between two measurements is larger than
|
||||
1, it is assumed that this is due to thermalization and the first measurement belongs
|
||||
to the first config (default).
|
||||
If False: The config numbers are assumed to be traj_number // difference
|
||||
"""
|
||||
|
||||
ls = []
|
||||
|
@ -229,20 +287,21 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
if not ls:
|
||||
raise Exception('Error, directory not found')
|
||||
|
||||
# Exclude files with different names
|
||||
for exc in ls:
|
||||
if not fnmatch.fnmatch(exc, prefix + '*.ms.dat'):
|
||||
ls = list(set(ls) - set([exc]))
|
||||
if len(ls) > 1:
|
||||
ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
|
||||
if 'files' in kwargs:
|
||||
ls = kwargs.get('files')
|
||||
else:
|
||||
for exc in ls:
|
||||
if not fnmatch.fnmatch(exc, prefix + '*.ms.dat'):
|
||||
ls = list(set(ls) - set([exc]))
|
||||
if len(ls) > 1:
|
||||
ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
|
||||
replica = len(ls)
|
||||
|
||||
if 'r_start' in kwargs:
|
||||
r_start = kwargs.get('r_start')
|
||||
if len(r_start) != replica:
|
||||
raise Exception('r_start does not match number of replicas')
|
||||
# Adjust Configuration numbering to python index
|
||||
r_start = [o - 1 if o else None for o in r_start]
|
||||
r_start = [o if o else None for o in r_start]
|
||||
else:
|
||||
r_start = [None] * replica
|
||||
|
||||
|
@ -253,14 +312,31 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
else:
|
||||
r_stop = [None] * replica
|
||||
|
||||
if 'r_step' in kwargs:
|
||||
r_step = kwargs.get('r_step')
|
||||
else:
|
||||
r_step = 1
|
||||
|
||||
print('Extract t0 from', prefix, ',', replica, 'replica')
|
||||
|
||||
if 'names' in kwargs:
|
||||
rep_names = kwargs.get('names')
|
||||
else:
|
||||
rep_names = []
|
||||
for entry in ls:
|
||||
truncated_entry = entry.split('.')[0]
|
||||
idx = truncated_entry.index('r')
|
||||
rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
|
||||
|
||||
Ysum = []
|
||||
|
||||
configlist = []
|
||||
r_start_index = []
|
||||
r_stop_index = []
|
||||
|
||||
for rep in range(replica):
|
||||
|
||||
with open(path + '/' + ls[rep], 'rb') as fp:
|
||||
# Read header
|
||||
t = fp.read(12)
|
||||
header = struct.unpack('iii', t)
|
||||
if rep == 0:
|
||||
|
@ -279,12 +355,13 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
|
||||
Ysl = []
|
||||
|
||||
# Read body
|
||||
configlist.append([])
|
||||
while 0 < 1:
|
||||
t = fp.read(4)
|
||||
if(len(t) < 4):
|
||||
break
|
||||
nc = struct.unpack('i', t)[0]
|
||||
configlist[-1].append(nc)
|
||||
|
||||
t = fp.read(8 * tmax * (nn + 1))
|
||||
if kwargs.get('plaquette'):
|
||||
|
@ -302,6 +379,38 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
current + tmax - xmin])
|
||||
for current in range(0, len(item), tmax)])
|
||||
|
||||
diffmeas = configlist[-1][-1] - configlist[-1][-2]
|
||||
configlist[-1] = [item // diffmeas for item in configlist[-1]]
|
||||
if kwargs.get('assume_thermalization', True) and configlist[-1][0] > 1:
|
||||
warnings.warn('Assume thermalization and that the first measurement belongs to the first config.')
|
||||
offset = configlist[-1][0] - 1
|
||||
configlist[-1] = [item - offset for item in configlist[-1]]
|
||||
|
||||
if r_start[rep] is None:
|
||||
r_start_index.append(0)
|
||||
else:
|
||||
try:
|
||||
r_start_index.append(configlist[-1].index(r_start[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_start[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
|
||||
if r_stop[rep] is None:
|
||||
r_stop_index.append(len(configlist[-1]) - 1)
|
||||
else:
|
||||
try:
|
||||
r_stop_index.append(configlist[-1].index(r_stop[rep]))
|
||||
except ValueError:
|
||||
raise Exception('Config %d not in file with range [%d, %d]' % (
|
||||
r_stop[rep], configlist[-1][0], configlist[-1][-1])) from None
|
||||
|
||||
if np.any([len(np.unique(np.diff(cl))) != 1 for cl in configlist]):
|
||||
raise Exception('Irregular spaced data in input file!', [len(np.unique(np.diff(cl))) for cl in configlist])
|
||||
stepsizes = [list(np.unique(np.diff(cl)))[0] for cl in configlist]
|
||||
if np.any([step != 1 for step in stepsizes]):
|
||||
warnings.warn('Stepsize between configurations is greater than one!' + str(stepsizes), RuntimeWarning)
|
||||
|
||||
idl = [range(configlist[rep][r_start_index[rep]], configlist[rep][r_stop_index[rep]], r_step) for rep in range(replica)]
|
||||
t2E_dict = {}
|
||||
for n in range(nn + 1):
|
||||
samples = []
|
||||
|
@ -309,8 +418,8 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
samples.append([])
|
||||
for cnfg in rep:
|
||||
samples[-1].append(cnfg[n])
|
||||
samples[-1] = samples[-1][r_start[nrep]:r_stop[nrep]]
|
||||
new_obs = Obs(samples, [(w.split('.'))[0] for w in ls])
|
||||
samples[-1] = samples[-1][r_start_index[nrep]:r_stop_index[nrep]][::r_step]
|
||||
new_obs = Obs(samples, rep_names, idl=idl)
|
||||
t2E_dict[n * dn * eps] = (n * dn * eps) ** 2 * new_obs / (spatial_extent ** 3) - 0.3
|
||||
|
||||
zero_crossing = np.argmax(np.array(
|
||||
|
@ -323,32 +432,62 @@ def extract_t0(path, prefix, dtr_read, xmin,
|
|||
[o.gamma_method() for o in y]
|
||||
|
||||
fit_result = fit_lin(x, y)
|
||||
|
||||
if kwargs.get('plot_fit'):
|
||||
plt.figure()
|
||||
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
|
||||
ax0 = plt.subplot(gs[0])
|
||||
xmore = list(t2E_dict.keys())[zero_crossing - fit_range - 2: zero_crossing + fit_range + 2]
|
||||
ymore = list(t2E_dict.values())[zero_crossing - fit_range - 2: zero_crossing + fit_range + 2]
|
||||
[o.gamma_method() for o in ymore]
|
||||
ax0.errorbar(xmore, [yi.value for yi in ymore], yerr=[yi.dvalue for yi in ymore], fmt='x')
|
||||
xplot = np.linspace(np.min(x), np.max(x))
|
||||
yplot = [fit_result[0] + fit_result[1] * xi for xi in xplot]
|
||||
[yi.gamma_method() for yi in yplot]
|
||||
ax0.fill_between(xplot, y1=[yi.value - yi.dvalue for yi in yplot], y2=[yi.value + yi.dvalue for yi in yplot])
|
||||
retval = (-fit_result[0] / fit_result[1])
|
||||
retval.gamma_method()
|
||||
ylim = ax0.get_ylim()
|
||||
ax0.fill_betweenx(ylim, x1=retval.value - retval.dvalue, x2=retval.value + retval.dvalue, color='gray', alpha=0.4)
|
||||
ax0.set_ylim(ylim)
|
||||
ax0.set_ylabel(r'$t^2 \langle E(t) \rangle - 0.3 $')
|
||||
xlim = ax0.get_xlim()
|
||||
|
||||
fit_res = [fit_result[0] + fit_result[1] * xi for xi in x]
|
||||
residuals = (np.asarray([o.value for o in y]) - [o.value for o in fit_res]) / np.asarray([o.dvalue for o in y])
|
||||
ax1 = plt.subplot(gs[1])
|
||||
ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
|
||||
ax1.tick_params(direction='out')
|
||||
ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
|
||||
ax1.axhline(y=0.0, ls='--', color='k')
|
||||
ax1.fill_between(xlim, -1.0, 1.0, alpha=0.1, facecolor='k')
|
||||
ax1.set_xlim(xlim)
|
||||
ax1.set_ylabel('Residuals')
|
||||
ax1.set_xlabel(r'$t/a^2$')
|
||||
|
||||
plt.show()
|
||||
return -fit_result[0] / fit_result[1]
|
||||
|
||||
|
||||
def _parse_array_openQCD2(d, n, size, wa, quadrupel=False):
|
||||
arr = []
|
||||
if d == 2:
|
||||
tot = 0
|
||||
for i in range(n[d - 1] - 1):
|
||||
for i in range(n[0]):
|
||||
tmp = wa[i * n[1]:(i + 1) * n[1]]
|
||||
if quadrupel:
|
||||
tmp = wa[tot:n[d - 1]]
|
||||
tmp2 = []
|
||||
for i in range(len(tmp)):
|
||||
if i % 2 == 0:
|
||||
tmp2.append(tmp[i])
|
||||
for j in range(0, len(tmp), 2):
|
||||
tmp2.append(tmp[j])
|
||||
arr.append(tmp2)
|
||||
else:
|
||||
arr.append(np.asarray(wa[tot:n[d - 1]]))
|
||||
arr.append(np.asarray(tmp))
|
||||
|
||||
else:
|
||||
raise Exception('Only two-dimensional arrays supported!')
|
||||
|
||||
return arr
|
||||
|
||||
|
||||
# mimic the read_array routine of openQCD-2.0.
|
||||
# fp is the opened file handle
|
||||
# returns the dict array
|
||||
# at this point we only parse a 2d array
|
||||
# d = 2
|
||||
# n = [nfct[irw], 2*nsrc[irw]]
|
||||
def _read_array_openQCD2(fp):
|
||||
t = fp.read(4)
|
||||
d = struct.unpack('i', t)[0]
|
||||
|
@ -380,42 +519,40 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
|||
|
||||
Parameters
|
||||
----------
|
||||
path:
|
||||
path : str
|
||||
path of the measurement files
|
||||
prefix:
|
||||
prefix : str
|
||||
prefix of the measurement files, e.g. <prefix>_id0_r0.ms.dat
|
||||
c: double
|
||||
c : double
|
||||
Smearing radius in units of the lattice extent, c = sqrt(8 t0) / L
|
||||
dtr_cnfg: int
|
||||
dtr_cnfg : int
|
||||
(optional) parameter that specifies the number of trajectories
|
||||
between two configs.
|
||||
if it is not set, the distance between two measurements
|
||||
in the file is assumed to be
|
||||
the distance between two configurations.
|
||||
steps: int
|
||||
steps : int
|
||||
(optional) (maybe only necessary for openQCD2.0)
|
||||
nt step size, guessed if not given
|
||||
version: str
|
||||
version : str
|
||||
version string of the openQCD (sfqcd) version used to create
|
||||
the ensemble
|
||||
L: int
|
||||
L : int
|
||||
spatial length of the lattice in L/a.
|
||||
HAS to be set if version != sfqcd, since openQCD does not provide
|
||||
this in the header
|
||||
r_start: list
|
||||
r_start : list
|
||||
offset of the first ensemble, making it easier to match
|
||||
later on with other Obs
|
||||
r_stop: list
|
||||
r_stop : list
|
||||
last configurations that need to be read (per replicum)
|
||||
files: list
|
||||
files : list
|
||||
specify the exact files that need to be read
|
||||
from path, pratical if e.g. only one replicum is needed
|
||||
names: list
|
||||
from path, practical if e.g. only one replicum is needed
|
||||
names : list
|
||||
Alternative labeling for replicas/ensembles.
|
||||
Has to have the appropriate length
|
||||
"""
|
||||
# one could read L from the header in case of sfQCD
|
||||
# c = 0.35
|
||||
known_versions = ["1.0", "1.2", "1.4", "1.6", "2.0", "sfqcd"]
|
||||
|
||||
if version not in known_versions:
|
||||
|
@ -435,11 +572,9 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
|||
r_start = kwargs.get("r_start")
|
||||
if "r_stop" in kwargs:
|
||||
r_stop = kwargs.get("r_stop")
|
||||
# if one wants to read specific files with this method...
|
||||
if "files" in kwargs:
|
||||
files = kwargs.get("files")
|
||||
else:
|
||||
# find files in path
|
||||
found = []
|
||||
files = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(path + "/"):
|
||||
|
@ -450,14 +585,12 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
|||
if fnmatch.fnmatch(f, prefix + "*" + ".ms.dat"):
|
||||
files.append(f)
|
||||
print(files)
|
||||
# now that we found our files, we dechiffer them...
|
||||
rep_names = []
|
||||
|
||||
deltas = []
|
||||
idl = []
|
||||
for rep, file in enumerate(files):
|
||||
with open(path + "/" + file, "rb") as fp:
|
||||
# header
|
||||
t = fp.read(12)
|
||||
header = struct.unpack('<iii', t)
|
||||
# step size in integration steps "dnms"
|
||||
|
@ -486,7 +619,6 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
|||
Q = []
|
||||
ncs = []
|
||||
while 0 < 1:
|
||||
# int nt
|
||||
t = fp.read(4)
|
||||
if(len(t) < 4):
|
||||
break
|
||||
|
@ -533,8 +665,6 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
|||
if len(Q_round) != len(ncs) // dtr_cnfg:
|
||||
raise Exception("qtops and ncs dont have the same length")
|
||||
|
||||
# replica = len(files)
|
||||
|
||||
truncated_file = file[:-7]
|
||||
print(truncated_file)
|
||||
idl_start = 1
|
||||
|
@ -562,17 +692,24 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="1.2", **kwargs):
|
|||
rep_names = names
|
||||
deltas.append(np.array(Q_round))
|
||||
idl.append(range(idl_start, idl_stop))
|
||||
# print(idl)
|
||||
result = Obs(deltas, rep_names, idl=idl)
|
||||
return result
|
||||
|
||||
|
||||
def read_qtop_sector(target=0, **kwargs):
|
||||
"""target: int
|
||||
specifies the topological sector to be reweighted to (default 0)
|
||||
q_top: Obs
|
||||
alternatively takes args of read_qtop method as kwargs
|
||||
"""Constructs reweighting factors to a specified topological sector.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
target : int
|
||||
Specifies the topological sector to be reweighted to (default 0)
|
||||
q_top : Obs
|
||||
Alternatively takes args of read_qtop method as kwargs
|
||||
"""
|
||||
|
||||
if not isinstance(target, int):
|
||||
raise Exception("'target' has to be an integer.")
|
||||
|
||||
if "q_top" in kwargs:
|
||||
qtop = kwargs.get("q_top")
|
||||
else:
|
||||
|
@ -603,7 +740,6 @@ def read_qtop_sector(target=0, **kwargs):
|
|||
dtr_cnfg = 1
|
||||
qtop = read_qtop(path, prefix, c, dtr_cnfg=dtr_cnfg,
|
||||
version=version, **kwargs)
|
||||
# unpack to original values, project onto target sector
|
||||
names = qtop.names
|
||||
print(names)
|
||||
print(qtop.deltas.keys())
|
||||
|
|
|
@ -2,11 +2,20 @@
|
|||
|
||||
|
||||
def check_idl(idl, che):
|
||||
"""Checks if list of configurations is contained in an idl
|
||||
|
||||
Parameters
|
||||
----------
|
||||
idl : range or list
|
||||
idl of the current replicum
|
||||
che : list
|
||||
list of configurations to be checked against
|
||||
"""
|
||||
missing = []
|
||||
for c in che:
|
||||
if c not in idl:
|
||||
missing.append(c)
|
||||
# print missing such that it can directly be parsed to slurm terminal
|
||||
# print missing configurations such that it can directly be parsed to slurm terminal
|
||||
if not (len(missing) == 0):
|
||||
print(len(missing), "configs missing")
|
||||
miss_str = str(missing[0])
|
||||
|
|
|
@ -213,8 +213,6 @@ def _scalar_mat_op(op, obs, **kwargs):
|
|||
"""Computes the matrix to scalar operation op to a given matrix of Obs."""
|
||||
def _mat(x, **kwargs):
|
||||
dim = int(np.sqrt(len(x)))
|
||||
if np.sqrt(len(x)) != dim:
|
||||
raise Exception('Input has to have dim**2 entries')
|
||||
|
||||
mat = []
|
||||
for i in range(dim):
|
||||
|
@ -227,8 +225,6 @@ def _scalar_mat_op(op, obs, **kwargs):
|
|||
|
||||
if isinstance(obs, np.ndarray):
|
||||
raveled_obs = (1 * (obs.ravel())).tolist()
|
||||
elif isinstance(obs, list):
|
||||
raveled_obs = obs
|
||||
else:
|
||||
raise TypeError('Unproper type of input.')
|
||||
return derived_observable(_mat, raveled_obs, **kwargs)
|
||||
|
@ -248,10 +244,7 @@ def _mat_mat_op(op, obs, **kwargs):
|
|||
A[n, m] = entry
|
||||
B[n, m] = 0.0
|
||||
big_matrix = np.block([[A, -B], [B, A]])
|
||||
if kwargs.get('num_grad') is True:
|
||||
op_big_matrix = _num_diff_mat_mat_op(op, big_matrix, **kwargs)
|
||||
else:
|
||||
op_big_matrix = derived_observable(lambda x, **kwargs: op(x), [big_matrix], array_mode=True)[0]
|
||||
op_big_matrix = derived_observable(lambda x, **kwargs: op(x), [big_matrix], array_mode=True)[0]
|
||||
dim = op_big_matrix.shape[0]
|
||||
op_A = op_big_matrix[0: dim // 2, 0: dim // 2]
|
||||
op_B = op_big_matrix[dim // 2:, 0: dim // 2]
|
||||
|
@ -260,15 +253,11 @@ def _mat_mat_op(op, obs, **kwargs):
|
|||
res[n, m] = CObs(op_A[n, m], op_B[n, m])
|
||||
return res
|
||||
else:
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_mat_mat_op(op, obs, **kwargs)
|
||||
return derived_observable(lambda x, **kwargs: op(x), [obs], array_mode=True)[0]
|
||||
|
||||
|
||||
def eigh(obs, **kwargs):
|
||||
"""Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh."""
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_eigh(obs, **kwargs)
|
||||
w = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[0], obs)
|
||||
v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs)
|
||||
return w, v
|
||||
|
@ -276,232 +265,18 @@ def eigh(obs, **kwargs):
|
|||
|
||||
def eig(obs, **kwargs):
|
||||
"""Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig."""
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_eig(obs, **kwargs)
|
||||
# Note: Automatic differentiation of eig is implemented in the git of autograd
|
||||
# but not yet released to PyPi (1.3)
|
||||
w = derived_observable(lambda x, **kwargs: anp.real(anp.linalg.eig(x)[0]), obs)
|
||||
return w
|
||||
|
||||
|
||||
def pinv(obs, **kwargs):
|
||||
"""Computes the Moore-Penrose pseudoinverse of a matrix of Obs."""
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_pinv(obs, **kwargs)
|
||||
return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)
|
||||
|
||||
|
||||
def svd(obs, **kwargs):
|
||||
"""Computes the singular value decomposition of a matrix of Obs."""
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_svd(obs, **kwargs)
|
||||
u = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[0], obs)
|
||||
s = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[1], obs)
|
||||
vh = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[2], obs)
|
||||
return (u, s, vh)
|
||||
|
||||
|
||||
# Variants for numerical differentiation
|
||||
|
||||
def _num_diff_mat_mat_op(op, obs, **kwargs):
|
||||
"""Computes the matrix to matrix operation op to a given matrix of Obs elementwise
|
||||
which is suitable for numerical differentiation."""
|
||||
def _mat(x, **kwargs):
|
||||
dim = int(np.sqrt(len(x)))
|
||||
if np.sqrt(len(x)) != dim:
|
||||
raise Exception('Input has to have dim**2 entries')
|
||||
|
||||
mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(x[j + dim * i])
|
||||
mat.append(row)
|
||||
|
||||
return op(np.array(mat))[kwargs.get('i')][kwargs.get('j')]
|
||||
|
||||
if isinstance(obs, np.ndarray):
|
||||
raveled_obs = (1 * (obs.ravel())).tolist()
|
||||
elif isinstance(obs, list):
|
||||
raveled_obs = obs
|
||||
else:
|
||||
raise TypeError('Unproper type of input.')
|
||||
|
||||
dim = int(np.sqrt(len(raveled_obs)))
|
||||
|
||||
res_mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(derived_observable(_mat, raveled_obs, i=i, j=j, **kwargs))
|
||||
res_mat.append(row)
|
||||
|
||||
return np.array(res_mat) @ np.identity(dim)
|
||||
|
||||
|
||||
def _num_diff_eigh(obs, **kwargs):
|
||||
"""Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh
|
||||
elementwise which is suitable for numerical differentiation."""
|
||||
def _mat(x, **kwargs):
|
||||
dim = int(np.sqrt(len(x)))
|
||||
if np.sqrt(len(x)) != dim:
|
||||
raise Exception('Input has to have dim**2 entries')
|
||||
|
||||
mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(x[j + dim * i])
|
||||
mat.append(row)
|
||||
|
||||
n = kwargs.get('n')
|
||||
res = np.linalg.eigh(np.array(mat))[n]
|
||||
|
||||
if n == 0:
|
||||
return res[kwargs.get('i')]
|
||||
else:
|
||||
return res[kwargs.get('i')][kwargs.get('j')]
|
||||
|
||||
if isinstance(obs, np.ndarray):
|
||||
raveled_obs = (1 * (obs.ravel())).tolist()
|
||||
elif isinstance(obs, list):
|
||||
raveled_obs = obs
|
||||
else:
|
||||
raise TypeError('Unproper type of input.')
|
||||
|
||||
dim = int(np.sqrt(len(raveled_obs)))
|
||||
|
||||
res_vec = []
|
||||
for i in range(dim):
|
||||
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
||||
|
||||
res_mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(derived_observable(_mat, raveled_obs, n=1, i=i, j=j, **kwargs))
|
||||
res_mat.append(row)
|
||||
|
||||
return (np.array(res_vec) @ np.identity(dim), np.array(res_mat) @ np.identity(dim))
|
||||
|
||||
|
||||
def _num_diff_eig(obs, **kwargs):
|
||||
"""Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig
|
||||
elementwise which is suitable for numerical differentiation."""
|
||||
def _mat(x, **kwargs):
|
||||
dim = int(np.sqrt(len(x)))
|
||||
if np.sqrt(len(x)) != dim:
|
||||
raise Exception('Input has to have dim**2 entries')
|
||||
|
||||
mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(x[j + dim * i])
|
||||
mat.append(row)
|
||||
|
||||
n = kwargs.get('n')
|
||||
res = np.linalg.eig(np.array(mat))[n]
|
||||
|
||||
if n == 0:
|
||||
# Discard imaginary part of eigenvalue here
|
||||
return np.real(res[kwargs.get('i')])
|
||||
else:
|
||||
return res[kwargs.get('i')][kwargs.get('j')]
|
||||
|
||||
if isinstance(obs, np.ndarray):
|
||||
raveled_obs = (1 * (obs.ravel())).tolist()
|
||||
elif isinstance(obs, list):
|
||||
raveled_obs = obs
|
||||
else:
|
||||
raise TypeError('Unproper type of input.')
|
||||
|
||||
dim = int(np.sqrt(len(raveled_obs)))
|
||||
|
||||
res_vec = []
|
||||
for i in range(dim):
|
||||
# Note: Automatic differentiation of eig is implemented in the git of autograd
|
||||
# but not yet released to PyPi (1.3)
|
||||
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
||||
|
||||
return np.array(res_vec) @ np.identity(dim)
|
||||
|
||||
|
||||
def _num_diff_pinv(obs, **kwargs):
|
||||
"""Computes the Moore-Penrose pseudoinverse of a matrix of Obs elementwise which is suitable
|
||||
for numerical differentiation."""
|
||||
def _mat(x, **kwargs):
|
||||
shape = kwargs.get('shape')
|
||||
|
||||
mat = []
|
||||
for i in range(shape[0]):
|
||||
row = []
|
||||
for j in range(shape[1]):
|
||||
row.append(x[j + shape[1] * i])
|
||||
mat.append(row)
|
||||
|
||||
return np.linalg.pinv(np.array(mat))[kwargs.get('i')][kwargs.get('j')]
|
||||
|
||||
if isinstance(obs, np.ndarray):
|
||||
shape = obs.shape
|
||||
raveled_obs = (1 * (obs.ravel())).tolist()
|
||||
else:
|
||||
raise TypeError('Unproper type of input.')
|
||||
|
||||
res_mat = []
|
||||
for i in range(shape[1]):
|
||||
row = []
|
||||
for j in range(shape[0]):
|
||||
row.append(derived_observable(_mat, raveled_obs, shape=shape, i=i, j=j, **kwargs))
|
||||
res_mat.append(row)
|
||||
|
||||
return np.array(res_mat) @ np.identity(shape[0])
|
||||
|
||||
|
||||
def _num_diff_svd(obs, **kwargs):
|
||||
"""Computes the singular value decomposition of a matrix of Obs elementwise which
|
||||
is suitable for numerical differentiation."""
|
||||
def _mat(x, **kwargs):
|
||||
shape = kwargs.get('shape')
|
||||
|
||||
mat = []
|
||||
for i in range(shape[0]):
|
||||
row = []
|
||||
for j in range(shape[1]):
|
||||
row.append(x[j + shape[1] * i])
|
||||
mat.append(row)
|
||||
|
||||
res = np.linalg.svd(np.array(mat), full_matrices=False)
|
||||
|
||||
if kwargs.get('n') == 1:
|
||||
return res[1][kwargs.get('i')]
|
||||
else:
|
||||
return res[kwargs.get('n')][kwargs.get('i')][kwargs.get('j')]
|
||||
|
||||
if isinstance(obs, np.ndarray):
|
||||
shape = obs.shape
|
||||
raveled_obs = (1 * (obs.ravel())).tolist()
|
||||
else:
|
||||
raise TypeError('Unproper type of input.')
|
||||
|
||||
mid_index = min(shape[0], shape[1])
|
||||
|
||||
res_mat0 = []
|
||||
for i in range(shape[0]):
|
||||
row = []
|
||||
for j in range(mid_index):
|
||||
row.append(derived_observable(_mat, raveled_obs, shape=shape, n=0, i=i, j=j, **kwargs))
|
||||
res_mat0.append(row)
|
||||
|
||||
res_mat1 = []
|
||||
for i in range(mid_index):
|
||||
res_mat1.append(derived_observable(_mat, raveled_obs, shape=shape, n=1, i=i, **kwargs))
|
||||
|
||||
res_mat2 = []
|
||||
for i in range(mid_index):
|
||||
row = []
|
||||
for j in range(shape[1]):
|
||||
row.append(derived_observable(_mat, raveled_obs, shape=shape, n=2, i=i, j=j, **kwargs))
|
||||
res_mat2.append(row)
|
||||
|
||||
return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1]))
|
||||
|
|
|
@ -70,3 +70,19 @@ def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
|
|||
data.append(np.sqrt(1 - a ** 2) * rand[i] + a * data[-1])
|
||||
corr_data = np.array(data) - np.mean(data, axis=0) + means
|
||||
return [Obs([dat], [name]) for dat in corr_data.T]
|
||||
|
||||
|
||||
def _assert_equal_properties(ol, otype=Obs):
|
||||
if not isinstance(ol[0], otype):
|
||||
raise Exception("Wrong data type in list.")
|
||||
for o in ol[1:]:
|
||||
if not isinstance(o, otype):
|
||||
raise Exception("Wrong data type in list.")
|
||||
if not ol[0].is_merged == o.is_merged:
|
||||
raise Exception("All Obs in list have to have the same state 'is_merged'.")
|
||||
if not ol[0].reweighted == o.reweighted:
|
||||
raise Exception("All Obs in list have to have the same property 'reweighted'.")
|
||||
if not ol[0].e_content == o.e_content:
|
||||
raise Exception("All Obs in list have to be defined on the same set of configs.")
|
||||
if not ol[0].idl == o.idl:
|
||||
raise Exception("All Obs in list have to be defined on the same set of configurations.")
|
||||
|
|
|
@ -4,6 +4,7 @@ import numpy as np
|
|||
import autograd.numpy as anp # Thinly-wrapped numpy
|
||||
from autograd import jacobian
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.stats import skew, skewtest, kurtosis, kurtosistest
|
||||
import numdifftools as nd
|
||||
from itertools import groupby
|
||||
from .covobs import Covobs
|
||||
|
@ -287,8 +288,6 @@ class Obs:
|
|||
_compute_drho(1)
|
||||
if self.tau_exp[e_name] > 0:
|
||||
texp = self.tau_exp[e_name]
|
||||
# if type(self.idl[e_name]) is range: # scale tau_exp according to step size
|
||||
# texp /= self.idl[e_name].step
|
||||
# Critical slowing down analysis
|
||||
if w_max // 2 <= 1:
|
||||
raise Exception("Need at least 8 samples for tau_exp error analysis")
|
||||
|
@ -434,10 +433,6 @@ class Obs:
|
|||
my_string_list.append(my_string)
|
||||
print('\n'.join(my_string_list))
|
||||
|
||||
def print(self, level=1):
|
||||
warnings.warn("Method 'print' renamed to 'details'", DeprecationWarning)
|
||||
self.details(level > 1)
|
||||
|
||||
def is_zero_within_error(self, sigma=1):
|
||||
"""Checks whether the observable is zero within 'sigma' standard errors.
|
||||
|
||||
|
@ -473,8 +468,8 @@ class Obs:
|
|||
if not hasattr(self, 'e_dvalue'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
|
||||
fig = plt.figure()
|
||||
for e, e_name in enumerate(self.mc_names):
|
||||
fig = plt.figure()
|
||||
plt.xlabel(r'$W$')
|
||||
plt.ylabel(r'$\tau_\mathrm{int}$')
|
||||
length = int(len(self.e_n_tauint[e_name]))
|
||||
|
@ -499,13 +494,20 @@ class Obs:
|
|||
plt.ylim(bottom=0.0)
|
||||
plt.draw()
|
||||
if save:
|
||||
fig.savefig(save)
|
||||
fig.savefig(save + "_" + str(e))
|
||||
|
||||
def plot_rho(self):
|
||||
"""Plot normalized autocorrelation function time for each ensemble."""
|
||||
def plot_rho(self, save=None):
|
||||
"""Plot normalized autocorrelation function time for each ensemble.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
save : str
|
||||
saves the figure to a file named 'save' if.
|
||||
"""
|
||||
if not hasattr(self, 'e_dvalue'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
for e, e_name in enumerate(self.mc_names):
|
||||
fig = plt.figure()
|
||||
plt.xlabel('W')
|
||||
plt.ylabel('rho')
|
||||
length = int(len(self.e_drho[e_name]))
|
||||
|
@ -522,6 +524,8 @@ class Obs:
|
|||
plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
|
||||
plt.xlim(-0.5, xmax)
|
||||
plt.draw()
|
||||
if save:
|
||||
fig.savefig(save + "_" + str(e))
|
||||
|
||||
def plot_rep_dist(self):
|
||||
"""Plot replica distribution for each ensemble with more than one replicum."""
|
||||
|
@ -557,18 +561,24 @@ class Obs:
|
|||
plt.figure()
|
||||
r_length = []
|
||||
tmp = []
|
||||
tmp_expanded = []
|
||||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
tmp.append(self.deltas[r_name] + self.r_values[r_name])
|
||||
if expand:
|
||||
tmp.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
|
||||
tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
|
||||
r_length.append(len(tmp_expanded[-1]))
|
||||
else:
|
||||
tmp.append(self.deltas[r_name] + self.r_values[r_name])
|
||||
r_length.append(len(tmp[-1]))
|
||||
r_length.append(len(tmp[-1]))
|
||||
e_N = np.sum(r_length)
|
||||
x = np.arange(e_N)
|
||||
y = np.concatenate(tmp, axis=0)
|
||||
y_test = np.concatenate(tmp, axis=0)
|
||||
if expand:
|
||||
y = np.concatenate(tmp_expanded, axis=0)
|
||||
else:
|
||||
y = y_test
|
||||
plt.errorbar(x, y, fmt='.', markersize=3)
|
||||
plt.xlim(-0.5, e_N - 0.5)
|
||||
plt.title(e_name)
|
||||
plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
|
||||
plt.draw()
|
||||
|
||||
def plot_piechart(self):
|
||||
|
@ -587,22 +597,32 @@ class Obs:
|
|||
|
||||
return dict(zip(self.e_names, sizes))
|
||||
|
||||
def dump(self, name, **kwargs):
|
||||
"""Dump the Obs to a pickle file 'name'.
|
||||
def dump(self, filename, datatype="json.gz", **kwargs):
|
||||
"""Dump the Obs to a file 'name' of chosen format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
name : str
|
||||
filename : str
|
||||
name of the file to be saved.
|
||||
datatype : str
|
||||
Format of the exported file. Supported formats include
|
||||
"json.gz" and "pickle"
|
||||
path : str
|
||||
specifies a custom path for the file (default '.')
|
||||
"""
|
||||
if 'path' in kwargs:
|
||||
file_name = kwargs.get('path') + '/' + name + '.p'
|
||||
file_name = kwargs.get('path') + '/' + filename
|
||||
else:
|
||||
file_name = name + '.p'
|
||||
with open(file_name, 'wb') as fb:
|
||||
pickle.dump(self, fb)
|
||||
file_name = filename
|
||||
|
||||
if datatype == "json.gz":
|
||||
from .input.json import dump_to_json
|
||||
dump_to_json([self], file_name)
|
||||
elif datatype == "pickle":
|
||||
with open(file_name + '.p', 'wb') as fb:
|
||||
pickle.dump(self, fb)
|
||||
else:
|
||||
raise Exception("Unknown datatype " + str(datatype))
|
||||
|
||||
def export_jackknife(self):
|
||||
"""Export jackknife samples from the Obs
|
||||
|
@ -799,9 +819,6 @@ class Obs:
|
|||
def arctanh(self):
|
||||
return derived_observable(lambda x: anp.arctanh(x[0]), [self])
|
||||
|
||||
def sinc(self):
|
||||
return derived_observable(lambda x: anp.sinc(x[0]), [self])
|
||||
|
||||
|
||||
class CObs:
|
||||
"""Class for a complex valued observable."""
|
||||
|
@ -1106,14 +1123,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
raise Exception('Multi mode currently not supported for numerical derivative')
|
||||
options = {
|
||||
'base_step': 0.1,
|
||||
'step_ratio': 2.5,
|
||||
'num_steps': None,
|
||||
'step_nom': None,
|
||||
'offset': None,
|
||||
'num_extrap': None,
|
||||
'use_exact_steps': None,
|
||||
'check_num_steps': None,
|
||||
'scale': None}
|
||||
'step_ratio': 2.5}
|
||||
for key in options.keys():
|
||||
kwarg = kwargs.get(key)
|
||||
if kwarg is not None:
|
||||
|
|
|
@ -1 +1 @@
|
|||
__version__ = "2.0.0"
|
||||
__version__ = "2.0.0+dev"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue