mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-16 12:33:41 +02:00
added basic functionality for covobs
This commit is contained in:
parent
1326e9c863
commit
30ba138558
2 changed files with 315 additions and 150 deletions
54
pyerrors/covobs.py
Normal file
54
pyerrors/covobs.py
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class Covobs:
|
||||||
|
|
||||||
|
def __init__(self, mean, cov, name, pos=None, grad=None):
|
||||||
|
""" Initialize Covobs object.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
mean : float
|
||||||
|
Mean value of the new Obs
|
||||||
|
cov : list or array
|
||||||
|
2d Covariance matrix or 1d diagonal entries
|
||||||
|
name : str
|
||||||
|
identifier for the covariance matrix
|
||||||
|
pos : int
|
||||||
|
Position of the variance belonging to mean in cov.
|
||||||
|
Is taken to be 1 if cov is 0-dimensional
|
||||||
|
grad : list or array
|
||||||
|
Gradient of the Covobs wrt. the means belonging to cov.
|
||||||
|
"""
|
||||||
|
self.cov = np.array(cov)
|
||||||
|
if self.cov.ndim == 0:
|
||||||
|
self.N = 1
|
||||||
|
elif self.cov.ndim == 1:
|
||||||
|
self.N = len(self.cov)
|
||||||
|
self.cov = np.diag(self.cov)
|
||||||
|
elif self.cov.ndim == 2:
|
||||||
|
self.N = self.cov.shape[0]
|
||||||
|
if self.cov.shape[1] != self.N:
|
||||||
|
raise Exception('Covariance matrix has to be a square matrix!')
|
||||||
|
else:
|
||||||
|
raise Exception('Covariance matrix has to be a 2 dimensional square matrix!')
|
||||||
|
self.name = name
|
||||||
|
if grad is None:
|
||||||
|
if pos is None:
|
||||||
|
if self.N == 1:
|
||||||
|
pos = 0
|
||||||
|
else:
|
||||||
|
raise Exception('Have to specify position of cov-element belonging to mean!')
|
||||||
|
else:
|
||||||
|
if pos > self.N:
|
||||||
|
raise Exception('pos %d too large for covariance matrix with dimension %dx%d!' % (pos, self.N, self.N))
|
||||||
|
self.grad = np.zeros((self.N, 1))
|
||||||
|
self.grad[pos] = 1.
|
||||||
|
else:
|
||||||
|
self.grad = np.array(grad)
|
||||||
|
self.value = mean
|
||||||
|
|
||||||
|
def errsq(self):
|
||||||
|
""" Return the variance (= square of the error) of the Covobs
|
||||||
|
"""
|
||||||
|
return float(np.dot(np.transpose(self.grad), np.dot(self.cov, self.grad)))
|
411
pyerrors/obs.py
411
pyerrors/obs.py
|
@ -6,6 +6,7 @@ from autograd import jacobian
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import numdifftools as nd
|
import numdifftools as nd
|
||||||
from itertools import groupby
|
from itertools import groupby
|
||||||
|
from .covobs import Covobs
|
||||||
|
|
||||||
|
|
||||||
class Obs:
|
class Obs:
|
||||||
|
@ -37,11 +38,11 @@ class Obs:
|
||||||
Dictionary for N_sigma values. If an entry for a given ensemble exists
|
Dictionary for N_sigma values. If an entry for a given ensemble exists
|
||||||
this overwrites the standard value for that ensemble.
|
this overwrites the standard value for that ensemble.
|
||||||
"""
|
"""
|
||||||
__slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
|
#__slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
|
||||||
'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
|
# 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
|
||||||
'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
|
# 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
|
||||||
'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
|
# 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
|
||||||
'idl', 'is_merged', 'tag', '__dict__']
|
# 'idl', 'is_merged', 'tag', '__dict__']
|
||||||
|
|
||||||
S_global = 2.0
|
S_global = 2.0
|
||||||
S_dict = {}
|
S_dict = {}
|
||||||
|
@ -51,7 +52,7 @@ class Obs:
|
||||||
N_sigma_dict = {}
|
N_sigma_dict = {}
|
||||||
filter_eps = 1e-10
|
filter_eps = 1e-10
|
||||||
|
|
||||||
def __init__(self, samples, names, idl=None, means=None, **kwargs):
|
def __init__(self, samples, names, idl=None, means=None, covobs=None, **kwargs):
|
||||||
""" Initialize Obs object.
|
""" Initialize Obs object.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
@ -67,7 +68,7 @@ class Obs:
|
||||||
already subtracted from the samples
|
already subtracted from the samples
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if means is None:
|
if means is None and not kwargs.get('empty', False):
|
||||||
if len(samples) != len(names):
|
if len(samples) != len(names):
|
||||||
raise Exception('Length of samples and names incompatible.')
|
raise Exception('Length of samples and names incompatible.')
|
||||||
if idl is not None:
|
if idl is not None:
|
||||||
|
@ -80,52 +81,65 @@ class Obs:
|
||||||
if min(len(x) for x in samples) <= 4:
|
if min(len(x) for x in samples) <= 4:
|
||||||
raise Exception('Samples have to have at least 5 entries.')
|
raise Exception('Samples have to have at least 5 entries.')
|
||||||
|
|
||||||
self.names = sorted(names)
|
if kwargs.get('empty', False):
|
||||||
|
self.names = []
|
||||||
|
else:
|
||||||
|
self.names = sorted(names)
|
||||||
|
|
||||||
self.shape = {}
|
self.shape = {}
|
||||||
self.r_values = {}
|
self.r_values = {}
|
||||||
self.deltas = {}
|
self.deltas = {}
|
||||||
|
if covobs is None:
|
||||||
|
self.covobs = {}
|
||||||
|
else:
|
||||||
|
self.covobs = covobs
|
||||||
|
|
||||||
self.idl = {}
|
self.idl = {}
|
||||||
if idl is not None:
|
if not kwargs.get('empty', False):
|
||||||
for name, idx in sorted(zip(names, idl)):
|
if idl is not None:
|
||||||
if isinstance(idx, range):
|
for name, idx in sorted(zip(names, idl)):
|
||||||
self.idl[name] = idx
|
if isinstance(idx, range):
|
||||||
elif isinstance(idx, (list, np.ndarray)):
|
self.idl[name] = idx
|
||||||
dc = np.unique(np.diff(idx))
|
elif isinstance(idx, (list, np.ndarray)):
|
||||||
if np.any(dc < 0):
|
dc = np.unique(np.diff(idx))
|
||||||
raise Exception("Unsorted idx for idl[%s]" % (name))
|
if np.any(dc < 0):
|
||||||
if len(dc) == 1:
|
raise Exception("Unsorted idx for idl[%s]" % (name))
|
||||||
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
|
if len(dc) == 1:
|
||||||
|
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
|
||||||
|
else:
|
||||||
|
self.idl[name] = list(idx)
|
||||||
else:
|
else:
|
||||||
self.idl[name] = list(idx)
|
raise Exception('incompatible type for idl[%s].' % (name))
|
||||||
else:
|
else:
|
||||||
raise Exception('incompatible type for idl[%s].' % (name))
|
for name, sample in sorted(zip(names, samples)):
|
||||||
else:
|
self.idl[name] = range(1, len(sample) + 1)
|
||||||
for name, sample in sorted(zip(names, samples)):
|
|
||||||
self.idl[name] = range(1, len(sample) + 1)
|
|
||||||
|
|
||||||
if means is not None:
|
if means is not None:
|
||||||
for name, sample, mean in sorted(zip(names, samples, means)):
|
for name, sample, mean in sorted(zip(names, samples, means)):
|
||||||
self.shape[name] = len(self.idl[name])
|
self.shape[name] = len(self.idl[name])
|
||||||
if len(sample) != self.shape[name]:
|
if len(sample) != self.shape[name]:
|
||||||
raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
||||||
self.r_values[name] = mean
|
self.r_values[name] = mean
|
||||||
self.deltas[name] = sample
|
self.deltas[name] = sample
|
||||||
else:
|
else:
|
||||||
for name, sample in sorted(zip(names, samples)):
|
for name, sample in sorted(zip(names, samples)):
|
||||||
self.shape[name] = len(self.idl[name])
|
self.shape[name] = len(self.idl[name])
|
||||||
if len(sample) != self.shape[name]:
|
if len(sample) != self.shape[name]:
|
||||||
raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
||||||
self.r_values[name] = np.mean(sample)
|
self.r_values[name] = np.mean(sample)
|
||||||
self.deltas[name] = sample - self.r_values[name]
|
self.deltas[name] = sample - self.r_values[name]
|
||||||
self.is_merged = {}
|
self.is_merged = {}
|
||||||
self.N = sum(list(self.shape.values()))
|
self.N = sum(list(self.shape.values()))
|
||||||
|
|
||||||
self._value = 0
|
self._value = 0
|
||||||
if means is None:
|
if means is None:
|
||||||
for name in self.names:
|
for name in self.names:
|
||||||
self._value += self.shape[name] * self.r_values[name]
|
self._value += self.shape[name] * self.r_values[name]
|
||||||
self._value /= self.N
|
self._value /= self.N
|
||||||
|
else:
|
||||||
|
self._value = 0
|
||||||
|
self.is_merged = {}
|
||||||
|
self.N = 0
|
||||||
|
|
||||||
self._dvalue = 0.0
|
self._dvalue = 0.0
|
||||||
self.ddvalue = 0.0
|
self.ddvalue = 0.0
|
||||||
|
@ -220,91 +234,96 @@ class Obs:
|
||||||
_parse_kwarg('N_sigma')
|
_parse_kwarg('N_sigma')
|
||||||
|
|
||||||
for e, e_name in enumerate(self.e_names):
|
for e, e_name in enumerate(self.e_names):
|
||||||
|
if e_name not in self.covobs:
|
||||||
|
r_length = []
|
||||||
|
for r_name in e_content[e_name]:
|
||||||
|
if isinstance(self.idl[r_name], range):
|
||||||
|
r_length.append(len(self.idl[r_name]))
|
||||||
|
else:
|
||||||
|
r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
|
||||||
|
|
||||||
r_length = []
|
e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
|
||||||
for r_name in e_content[e_name]:
|
w_max = max(r_length) // 2
|
||||||
if isinstance(self.idl[r_name], range):
|
e_gamma[e_name] = np.zeros(w_max)
|
||||||
r_length.append(len(self.idl[r_name]))
|
self.e_rho[e_name] = np.zeros(w_max)
|
||||||
else:
|
self.e_drho[e_name] = np.zeros(w_max)
|
||||||
r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
|
|
||||||
|
|
||||||
e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
|
for r_name in e_content[e_name]:
|
||||||
w_max = max(r_length) // 2
|
e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
|
||||||
e_gamma[e_name] = np.zeros(w_max)
|
|
||||||
self.e_rho[e_name] = np.zeros(w_max)
|
|
||||||
self.e_drho[e_name] = np.zeros(w_max)
|
|
||||||
|
|
||||||
for r_name in e_content[e_name]:
|
gamma_div = np.zeros(w_max)
|
||||||
e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
|
for r_name in e_content[e_name]:
|
||||||
|
gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
|
||||||
|
e_gamma[e_name] /= gamma_div[:w_max]
|
||||||
|
|
||||||
gamma_div = np.zeros(w_max)
|
if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero
|
||||||
for r_name in e_content[e_name]:
|
|
||||||
gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
|
|
||||||
e_gamma[e_name] /= gamma_div[:w_max]
|
|
||||||
|
|
||||||
if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero
|
|
||||||
self.e_tauint[e_name] = 0.5
|
|
||||||
self.e_dtauint[e_name] = 0.0
|
|
||||||
self.e_dvalue[e_name] = 0.0
|
|
||||||
self.e_ddvalue[e_name] = 0.0
|
|
||||||
self.e_windowsize[e_name] = 0
|
|
||||||
continue
|
|
||||||
|
|
||||||
self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
|
|
||||||
self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
|
|
||||||
# Make sure no entry of tauint is smaller than 0.5
|
|
||||||
self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
|
|
||||||
# hep-lat/0306017 eq. (42)
|
|
||||||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
|
|
||||||
self.e_n_dtauint[e_name][0] = 0.0
|
|
||||||
|
|
||||||
def _compute_drho(i):
|
|
||||||
tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
|
||||||
self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
|
|
||||||
|
|
||||||
_compute_drho(1)
|
|
||||||
if self.tau_exp[e_name] > 0:
|
|
||||||
texp = self.tau_exp[e_name]
|
|
||||||
# if type(self.idl[e_name]) is range: # scale tau_exp according to step size
|
|
||||||
# texp /= self.idl[e_name].step
|
|
||||||
# Critical slowing down analysis
|
|
||||||
if w_max // 2 <= 1:
|
|
||||||
raise Exception("Need at least 8 samples for tau_exp error analysis")
|
|
||||||
for n in range(1, w_max // 2):
|
|
||||||
_compute_drho(n + 1)
|
|
||||||
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
|
||||||
# Bias correction hep-lat/0306017 eq. (49) included
|
|
||||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive
|
|
||||||
self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
|
|
||||||
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
|
||||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
|
||||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
|
||||||
self.e_windowsize[e_name] = n
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if self.S[e_name] == 0.0:
|
|
||||||
self.e_tauint[e_name] = 0.5
|
self.e_tauint[e_name] = 0.5
|
||||||
self.e_dtauint[e_name] = 0.0
|
self.e_dtauint[e_name] = 0.0
|
||||||
self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
|
self.e_dvalue[e_name] = 0.0
|
||||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
|
self.e_ddvalue[e_name] = 0.0
|
||||||
self.e_windowsize[e_name] = 0
|
self.e_windowsize[e_name] = 0
|
||||||
else:
|
continue
|
||||||
# Standard automatic windowing procedure
|
|
||||||
tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
|
||||||
g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N)
|
self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
|
||||||
for n in range(1, w_max):
|
# Make sure no entry of tauint is smaller than 0.5
|
||||||
if n < w_max // 2 - 2:
|
self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
|
||||||
_compute_drho(n + 1)
|
# hep-lat/0306017 eq. (42)
|
||||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
self.e_n_dtauint[e_name][0] = 0.0
|
||||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
|
||||||
|
def _compute_drho(i):
|
||||||
|
tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
||||||
|
self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
|
||||||
|
|
||||||
|
_compute_drho(1)
|
||||||
|
if self.tau_exp[e_name] > 0:
|
||||||
|
texp = self.tau_exp[e_name]
|
||||||
|
# if type(self.idl[e_name]) is range: # scale tau_exp according to step size
|
||||||
|
# texp /= self.idl[e_name].step
|
||||||
|
# Critical slowing down analysis
|
||||||
|
if w_max // 2 <= 1:
|
||||||
|
raise Exception("Need at least 8 samples for tau_exp error analysis")
|
||||||
|
for n in range(1, w_max // 2):
|
||||||
|
_compute_drho(n + 1)
|
||||||
|
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||||
|
# Bias correction hep-lat/0306017 eq. (49) included
|
||||||
|
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive
|
||||||
|
self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
|
||||||
|
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
||||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||||
self.e_windowsize[e_name] = n
|
self.e_windowsize[e_name] = n
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
|
if self.S[e_name] == 0.0:
|
||||||
|
self.e_tauint[e_name] = 0.5
|
||||||
|
self.e_dtauint[e_name] = 0.0
|
||||||
|
self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
|
||||||
|
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
|
||||||
|
self.e_windowsize[e_name] = 0
|
||||||
|
else:
|
||||||
|
# Standard automatic windowing procedure
|
||||||
|
tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
||||||
|
g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N)
|
||||||
|
for n in range(1, w_max):
|
||||||
|
if n < w_max // 2 - 2:
|
||||||
|
_compute_drho(n + 1)
|
||||||
|
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||||
|
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||||
|
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||||
|
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||||
|
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||||
|
self.e_windowsize[e_name] = n
|
||||||
|
break
|
||||||
|
|
||||||
self._dvalue += self.e_dvalue[e_name] ** 2
|
self._dvalue += self.e_dvalue[e_name] ** 2
|
||||||
self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
|
self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
|
||||||
|
self.e_ddvalue[e_name] = 0
|
||||||
|
self._dvalue += self.e_dvalue[e_name]**2
|
||||||
|
|
||||||
self._dvalue = np.sqrt(self.dvalue)
|
self._dvalue = np.sqrt(self.dvalue)
|
||||||
if self._dvalue == 0.0:
|
if self._dvalue == 0.0:
|
||||||
|
@ -367,12 +386,15 @@ class Obs:
|
||||||
if len(self.e_names) > 1:
|
if len(self.e_names) > 1:
|
||||||
print(' Ensemble errors:')
|
print(' Ensemble errors:')
|
||||||
for e_name in self.e_names:
|
for e_name in self.e_names:
|
||||||
if len(self.e_names) > 1:
|
if e_name not in self.covobs:
|
||||||
print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
|
if len(self.e_names) > 1:
|
||||||
if self.tau_exp[e_name] > 0:
|
print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
|
||||||
print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name]))
|
if self.tau_exp[e_name] > 0:
|
||||||
|
print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name]))
|
||||||
|
else:
|
||||||
|
print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
|
||||||
else:
|
else:
|
||||||
print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
|
print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
|
||||||
if ens_content is True:
|
if ens_content is True:
|
||||||
if len(self.e_names) == 1:
|
if len(self.e_names) == 1:
|
||||||
print(self.N, 'samples in', len(self.e_names), 'ensemble:')
|
print(self.N, 'samples in', len(self.e_names), 'ensemble:')
|
||||||
|
@ -380,25 +402,28 @@ class Obs:
|
||||||
print(self.N, 'samples in', len(self.e_names), 'ensembles:')
|
print(self.N, 'samples in', len(self.e_names), 'ensembles:')
|
||||||
my_string_list = []
|
my_string_list = []
|
||||||
for key, value in sorted(self.e_content.items()):
|
for key, value in sorted(self.e_content.items()):
|
||||||
my_string = ' ' + "\u00B7 Ensemble '" + key + "' "
|
if key not in self.covobs:
|
||||||
if len(value) == 1:
|
my_string = ' ' + "\u00B7 Ensemble '" + key + "' "
|
||||||
my_string += f': {self.shape[value[0]]} configurations'
|
if len(value) == 1:
|
||||||
if isinstance(self.idl[value[0]], range):
|
my_string += f': {self.shape[value[0]]} configurations'
|
||||||
my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
|
if isinstance(self.idl[value[0]], range):
|
||||||
else:
|
my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
|
||||||
my_string += ' (irregular range)'
|
|
||||||
else:
|
|
||||||
sublist = []
|
|
||||||
for v in value:
|
|
||||||
my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
|
|
||||||
my_substring += f': {self.shape[v]} configurations'
|
|
||||||
if isinstance(self.idl[v], range):
|
|
||||||
my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
|
|
||||||
else:
|
else:
|
||||||
my_substring += ' (irregular range)'
|
my_string += ' (irregular range)'
|
||||||
sublist.append(my_substring)
|
else:
|
||||||
|
sublist = []
|
||||||
|
for v in value:
|
||||||
|
my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
|
||||||
|
my_substring += f': {self.shape[v]} configurations'
|
||||||
|
if isinstance(self.idl[v], range):
|
||||||
|
my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
|
||||||
|
else:
|
||||||
|
my_substring += ' (irregular range)'
|
||||||
|
sublist.append(my_substring)
|
||||||
|
|
||||||
my_string += '\n' + '\n'.join(sublist)
|
my_string += '\n' + '\n'.join(sublist)
|
||||||
|
else:
|
||||||
|
my_string = ' ' + "\u00B7 Covobs '" + key + "' "
|
||||||
my_string_list.append(my_string)
|
my_string_list.append(my_string)
|
||||||
print('\n'.join(my_string_list))
|
print('\n'.join(my_string_list))
|
||||||
|
|
||||||
|
@ -1028,6 +1053,15 @@ def derived_observable(func, data, **kwargs):
|
||||||
if isinstance(raveled_data[i], (int, float)):
|
if isinstance(raveled_data[i], (int, float)):
|
||||||
raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name], idl=[first_idl])
|
raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name], idl=[first_idl])
|
||||||
|
|
||||||
|
allcov = {}
|
||||||
|
for o in raveled_data:
|
||||||
|
for name in o.covobs:
|
||||||
|
if name in allcov:
|
||||||
|
if not np.array_equal(allcov[name], o.covobs[name].cov):
|
||||||
|
raise Exception('Inconsistent covariance matrices for %s!' % (name))
|
||||||
|
else:
|
||||||
|
allcov[name] = o.covobs[name].cov
|
||||||
|
|
||||||
n_obs = len(raveled_data)
|
n_obs = len(raveled_data)
|
||||||
new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
|
new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
|
||||||
|
|
||||||
|
@ -1100,24 +1134,41 @@ def derived_observable(func, data, **kwargs):
|
||||||
|
|
||||||
for i_val, new_val in np.ndenumerate(new_values):
|
for i_val, new_val in np.ndenumerate(new_values):
|
||||||
new_deltas = {}
|
new_deltas = {}
|
||||||
|
new_grad = {}
|
||||||
for j_obs, obs in np.ndenumerate(data):
|
for j_obs, obs in np.ndenumerate(data):
|
||||||
for name in obs.names:
|
for name in obs.names:
|
||||||
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
|
if name in obs.covobs:
|
||||||
|
if name in new_grad:
|
||||||
|
new_grad[name] += deriv[i_val + j_obs] * obs.covobs[name].grad
|
||||||
|
else:
|
||||||
|
new_grad[name] = deriv[i_val + j_obs] * obs.covobs[name].grad
|
||||||
|
else:
|
||||||
|
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
|
||||||
|
|
||||||
|
new_covobs = {name: Covobs(obs.covobs[name].value, obs.covobs[name].cov, obs.covobs[name].name, grad=new_grad[name]) for name in new_grad}
|
||||||
|
|
||||||
new_samples = []
|
new_samples = []
|
||||||
new_means = []
|
new_means = []
|
||||||
new_idl = []
|
new_idl = []
|
||||||
|
new_names_obs = []
|
||||||
for name in new_names:
|
for name in new_names:
|
||||||
if is_merged[name]:
|
if name not in new_covobs:
|
||||||
filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
|
if is_merged[name]:
|
||||||
else:
|
filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
|
||||||
filtered_deltas = new_deltas[name]
|
else:
|
||||||
filtered_idl_d = new_idl_d[name]
|
filtered_deltas = new_deltas[name]
|
||||||
|
filtered_idl_d = new_idl_d[name]
|
||||||
|
|
||||||
new_samples.append(filtered_deltas)
|
new_samples.append(filtered_deltas)
|
||||||
new_idl.append(filtered_idl_d)
|
new_idl.append(filtered_idl_d)
|
||||||
new_means.append(new_r_values[name][i_val])
|
new_means.append(new_r_values[name][i_val])
|
||||||
final_result[i_val] = Obs(new_samples, new_names, means=new_means, idl=new_idl)
|
new_names_obs.append(name)
|
||||||
|
final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
|
||||||
|
for name in new_covobs:
|
||||||
|
final_result[i_val].names.append(name)
|
||||||
|
final_result[i_val].shape[name] = 1
|
||||||
|
final_result[i_val].idl[name] = []
|
||||||
|
final_result[i_val].covobs = new_covobs
|
||||||
final_result[i_val]._value = new_val
|
final_result[i_val]._value = new_val
|
||||||
final_result[i_val].is_merged = is_merged
|
final_result[i_val].is_merged = is_merged
|
||||||
final_result[i_val].reweighted = reweighted
|
final_result[i_val].reweighted = reweighted
|
||||||
|
@ -1603,3 +1654,63 @@ def merge_obs(list_of_obs):
|
||||||
o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
|
o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
|
||||||
o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
|
o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
|
||||||
return o
|
return o
|
||||||
|
|
||||||
|
|
||||||
|
def covobs_to_obs(co):
|
||||||
|
"""Make an Obs out of a Covobs
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
co : Covobs
|
||||||
|
Covobs to be embedded into the Obs
|
||||||
|
"""
|
||||||
|
o = Obs(None, None, empty=True)
|
||||||
|
o._value = co.value
|
||||||
|
o.names.append(co.name)
|
||||||
|
o.covobs[co.name] = co
|
||||||
|
o._dvalue = np.sqrt(co.errsq())
|
||||||
|
o.shape[co.name] = 1
|
||||||
|
o.idl[co.name] = []
|
||||||
|
return o
|
||||||
|
|
||||||
|
|
||||||
|
def create_Covobs(mean, cov, name, pos=None, grad=None):
|
||||||
|
"""Make an Obs based on a Covobs
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
mean : float
|
||||||
|
Mean value of the new Obs
|
||||||
|
cov : list or array
|
||||||
|
2d Covariance matrix or 1d diagonal entries
|
||||||
|
name : str
|
||||||
|
identifier for the covariance matrix
|
||||||
|
pos : int
|
||||||
|
Position of the variance belonging to mean in cov.
|
||||||
|
Is taken to be 1 if cov is 0-dimensional
|
||||||
|
grad : list or array
|
||||||
|
Gradient of the Covobs wrt. the means belonging to cov.
|
||||||
|
"""
|
||||||
|
return covobs_to_obs(Covobs(mean, cov, name, pos=pos, grad=grad))
|
||||||
|
|
||||||
|
|
||||||
|
def create_Covobs_list(means, cov, name, grad=None):
|
||||||
|
"""Make a list of Obs based Covobs
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
mean : list of floats
|
||||||
|
N mean values of the new Obs
|
||||||
|
cov : list or array
|
||||||
|
2d (NxN) Covariance matrix or 1d diagonal entries
|
||||||
|
name : str
|
||||||
|
identifier for the covariance matrix
|
||||||
|
grad : list or array
|
||||||
|
Gradient of the Covobs wrt. the means belonging to cov.
|
||||||
|
"""
|
||||||
|
ol = []
|
||||||
|
for i in range(len(means)):
|
||||||
|
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
||||||
|
if ol[0].covobs[name].N != len(means):
|
||||||
|
raise Exception('You have to provide %d mean values!' % (ol[0].N))
|
||||||
|
return ol
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue