mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 06:40:24 +01:00
Merge branch 'develop' into feature/irregularMC
This commit is contained in:
commit
9580a3a080
19 changed files with 618 additions and 221 deletions
|
@ -1,4 +1,4 @@
|
|||
name: CI
|
||||
name: pytest
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -30,6 +30,7 @@ jobs:
|
|||
pip install .
|
||||
pip install pytest
|
||||
pip install pytest-cov
|
||||
pip install pytest-benchmark
|
||||
|
||||
- name: Run tests
|
||||
run: pytest --cov=pyerrors -v
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -2,6 +2,7 @@ __pycache__
|
|||
*.pyc
|
||||
.ipynb_*
|
||||
.coverage
|
||||
.benchmarks
|
||||
.cache
|
||||
examples/B1k2_pcac_plateau.p
|
||||
examples/Untitled.*
|
||||
|
|
|
@ -23,8 +23,9 @@ When implementing a new feature or fixing a bug please add meaningful tests to t
|
|||
### Continous integration
|
||||
For all pull requests tests are executed for the most recent python releases via
|
||||
```
|
||||
pytest -v
|
||||
pytest --cov=pyerrors -v
|
||||
```
|
||||
requiring `pytest`, `pytest-cov` and `pytest-benchmark`
|
||||
and the linter `flake8` is executed with the command
|
||||
```
|
||||
flake8 --ignore=E501,E722 --exclude=__init__.py pyerrors
|
||||
|
|
10
README.md
10
README.md
|
@ -1,4 +1,4 @@
|
|||
[](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [](https://github.com/fjosw/pyerrors/actions/workflows/CI.yml) [](https://www.python.org/downloads/)
|
||||
[](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [](https://www.python.org/downloads/)
|
||||
# pyerrors
|
||||
`pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data.
|
||||
It is based on the **gamma method** [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are:
|
||||
|
@ -16,7 +16,11 @@ There exist similar implementations of gamma method error analysis suites in
|
|||
## Installation
|
||||
To install the most recent release of `pyerrors` run
|
||||
```bash
|
||||
pip install git+https://github.com/fjosw/pyerrors.git
|
||||
pip install git+https://github.com/fjosw/pyerrors.git@master
|
||||
```
|
||||
to install the current `develop` version run
|
||||
```bash
|
||||
pip install git+https://github.com/fjosw/pyerrors.git@develop
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
@ -31,6 +35,7 @@ obs1.print()
|
|||
Often one is interested in secondary observables which can be arbitrary functions of primary observables. `pyerrors` overloads most basic math operations and `numpy` functions such that the user can work with `Obs` objects as if they were floats
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
obs3 = 12.0 / obs1 ** 2 - np.exp(-1.0 / obs2)
|
||||
obs3.gamma_method()
|
||||
obs3.print()
|
||||
|
@ -44,6 +49,5 @@ More detailed examples can be found in the `examples` folder:
|
|||
* [04_fit_example](examples/04_fit_example.ipynb)
|
||||
* [05_matrix_operations](examples/05_matrix_operations.ipynb)
|
||||
|
||||
|
||||
## License
|
||||
[MIT](https://choosealicense.com/licenses/mit/)
|
||||
|
|
|
@ -5,7 +5,7 @@ import matplotlib.pyplot as plt
|
|||
import scipy.linalg
|
||||
from .pyerrors import Obs, dump_object
|
||||
from .fits import standard_fit
|
||||
from .linalg import eigh, mat_mat_op
|
||||
from .linalg import eigh, inv, cholesky
|
||||
from .roots import find_root
|
||||
|
||||
|
||||
|
@ -187,10 +187,10 @@ class Corr:
|
|||
def Eigenvalue(self, t0, state=1):
|
||||
G = self.smearing_symmetric()
|
||||
G0 = G.content[t0]
|
||||
L = mat_mat_op(anp.linalg.cholesky, G0)
|
||||
Li = mat_mat_op(anp.linalg.inv, L)
|
||||
L = cholesky(G0)
|
||||
Li = inv(L)
|
||||
LT = L.T
|
||||
LTi = mat_mat_op(anp.linalg.inv, LT)
|
||||
LTi = inv(LT)
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
Gt = G.content[t]
|
||||
|
|
|
@ -175,7 +175,7 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
"""
|
||||
|
||||
for obs in obs_list:
|
||||
if not obs.e_names:
|
||||
if not hasattr(obs, 'e_names'):
|
||||
raise Exception('Run the gamma method first for all obs.')
|
||||
|
||||
bdio = ctypes.cdll.LoadLibrary(bdio_path)
|
||||
|
|
|
@ -109,3 +109,60 @@ def read_ExternalLeg_hd5(path, filestem, ens_id, order='F'):
|
|||
matrix[si, sj, ci, cj].gamma_method()
|
||||
|
||||
return Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom)
|
||||
|
||||
|
||||
def read_Bilinear_hd5(path, filestem, ens_id, order='F'):
|
||||
"""Read hadrons Bilinear hdf5 file and output an array of CObs
|
||||
|
||||
Parameters
|
||||
-----------------
|
||||
path -- path to the files to read
|
||||
filestem -- namestem of the files to read
|
||||
ens_id -- name of the ensemble, required for internal bookkeeping
|
||||
order -- order in which the array is to be reshaped,
|
||||
'F' for the first index changing fastest (9 4x4 matrices) default.
|
||||
'C' for the last index changing fastest (16 3x3 matrices),
|
||||
"""
|
||||
|
||||
files = _get_files(path, filestem)
|
||||
|
||||
mom_in = None
|
||||
mom_out = None
|
||||
|
||||
corr_data = {}
|
||||
for hd5_file in files:
|
||||
file = h5py.File(path + '/' + hd5_file, "r")
|
||||
for i in range(16):
|
||||
name = file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['gamma'][0].decode('UTF-8')
|
||||
if name not in corr_data:
|
||||
corr_data[name] = []
|
||||
raw_data = file['Bilinear/Bilinear_' + str(i) + '/corr'][0][0].view('complex')
|
||||
corr_data[name].append(raw_data)
|
||||
if mom_in is not None:
|
||||
assert np.allclose(mom_in, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int))
|
||||
else:
|
||||
mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
|
||||
if mom_out is not None:
|
||||
assert np.allclose(mom_out, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int))
|
||||
else:
|
||||
mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int)
|
||||
|
||||
file.close()
|
||||
|
||||
result_dict = {}
|
||||
|
||||
for key, data in corr_data.items():
|
||||
local_data = np.array(data)
|
||||
|
||||
rolled_array = np.rollaxis(local_data, 0, 5)
|
||||
|
||||
matrix = np.empty((rolled_array.shape[:-1]), dtype=object)
|
||||
for si, sj, ci, cj in np.ndindex(rolled_array.shape[:-1]):
|
||||
real = Obs([rolled_array[si, sj, ci, cj].real], [ens_id])
|
||||
imag = Obs([rolled_array[si, sj, ci, cj].imag], [ens_id])
|
||||
matrix[si, sj, ci, cj] = CObs(real, imag)
|
||||
matrix[si, sj, ci, cj].gamma_method()
|
||||
|
||||
result_dict[key] = Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom_in, mom_out=mom_out)
|
||||
|
||||
return result_dict
|
||||
|
|
|
@ -2,59 +2,174 @@
|
|||
# coding: utf-8
|
||||
|
||||
import numpy as np
|
||||
from autograd import jacobian
|
||||
import autograd.numpy as anp # Thinly-wrapped numpy
|
||||
from .pyerrors import derived_observable, CObs
|
||||
from .pyerrors import derived_observable, CObs, Obs
|
||||
|
||||
|
||||
# This code block is directly taken from the current master branch of autograd and remains
|
||||
# only until the new version is released on PyPi
|
||||
from functools import partial
|
||||
from autograd.extend import defvjp
|
||||
|
||||
_dot = partial(anp.einsum, '...ij,...jk->...ik')
|
||||
|
||||
def derived_array(func, data, **kwargs):
|
||||
"""Construct a derived Obs according to func(data, **kwargs) of matrix value data
|
||||
using automatic differentiation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func -- arbitrary function of the form func(data, **kwargs). For the
|
||||
automatic differentiation to work, all numpy functions have to have
|
||||
the autograd wrapper (use 'import autograd.numpy as anp').
|
||||
data -- list of Obs, e.g. [obs1, obs2, obs3].
|
||||
|
||||
Keyword arguments
|
||||
-----------------
|
||||
man_grad -- manually supply a list or an array which contains the jacobian
|
||||
of func. Use cautiously, supplying the wrong derivative will
|
||||
not be intercepted.
|
||||
"""
|
||||
|
||||
data = np.asarray(data)
|
||||
raveled_data = data.ravel()
|
||||
|
||||
# Workaround for matrix operations containing non Obs data
|
||||
for i_data in raveled_data:
|
||||
if isinstance(i_data, Obs):
|
||||
first_name = i_data.names[0]
|
||||
first_shape = i_data.shape[first_name]
|
||||
break
|
||||
|
||||
for i in range(len(raveled_data)):
|
||||
if isinstance(raveled_data[i], (int, float)):
|
||||
raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name])
|
||||
|
||||
n_obs = len(raveled_data)
|
||||
new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
|
||||
|
||||
new_shape = {}
|
||||
for i_data in raveled_data:
|
||||
for name in new_names:
|
||||
tmp = i_data.shape.get(name)
|
||||
if tmp is not None:
|
||||
if new_shape.get(name) is None:
|
||||
new_shape[name] = tmp
|
||||
else:
|
||||
if new_shape[name] != tmp:
|
||||
raise Exception('Shapes of ensemble', name, 'do not match.')
|
||||
if data.ndim == 1:
|
||||
values = np.array([o.value for o in data])
|
||||
else:
|
||||
values = np.vectorize(lambda x: x.value)(data)
|
||||
|
||||
new_values = func(values, **kwargs)
|
||||
|
||||
new_r_values = {}
|
||||
for name in new_names:
|
||||
tmp_values = np.zeros(n_obs)
|
||||
for i, item in enumerate(raveled_data):
|
||||
tmp = item.r_values.get(name)
|
||||
if tmp is None:
|
||||
tmp = item.value
|
||||
tmp_values[i] = tmp
|
||||
tmp_values = np.array(tmp_values).reshape(data.shape)
|
||||
new_r_values[name] = func(tmp_values, **kwargs)
|
||||
|
||||
if 'man_grad' in kwargs:
|
||||
deriv = np.asarray(kwargs.get('man_grad'))
|
||||
if new_values.shape + data.shape != deriv.shape:
|
||||
raise Exception('Manual derivative does not have correct shape.')
|
||||
elif kwargs.get('num_grad') is True:
|
||||
raise Exception('Multi mode currently not supported for numerical derivative')
|
||||
else:
|
||||
deriv = jacobian(func)(values, **kwargs)
|
||||
|
||||
final_result = np.zeros(new_values.shape, dtype=object)
|
||||
|
||||
d_extracted = {}
|
||||
for name in new_names:
|
||||
d_extracted[name] = []
|
||||
for i_dat, dat in enumerate(data):
|
||||
ens_length = dat.ravel()[0].shape[name]
|
||||
d_extracted[name].append(np.array([o.deltas[name] for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
|
||||
|
||||
for i_val, new_val in np.ndenumerate(new_values):
|
||||
new_deltas = {}
|
||||
for name in new_names:
|
||||
ens_length = d_extracted[name][0].shape[-1]
|
||||
new_deltas[name] = np.zeros(ens_length)
|
||||
for i_dat, dat in enumerate(d_extracted[name]):
|
||||
new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
|
||||
|
||||
new_samples = []
|
||||
new_means = []
|
||||
for name in new_names:
|
||||
new_samples.append(new_deltas[name])
|
||||
new_means.append(new_r_values[name][i_val])
|
||||
|
||||
final_result[i_val] = Obs(new_samples, new_names, means=new_means)
|
||||
final_result[i_val]._value = new_val
|
||||
|
||||
return final_result
|
||||
|
||||
|
||||
# batched diag
|
||||
def _diag(a):
|
||||
return anp.eye(a.shape[-1]) * a
|
||||
def matmul(*operands):
|
||||
"""Matrix multiply all operands.
|
||||
|
||||
Supports real and complex valued matrices and is faster compared to
|
||||
standard multiplication via the @ operator.
|
||||
"""
|
||||
if any(isinstance(o[0, 0], CObs) for o in operands):
|
||||
extended_operands = []
|
||||
for op in operands:
|
||||
tmp = np.vectorize(lambda x: (np.real(x), np.imag(x)))(op)
|
||||
extended_operands.append(tmp[0])
|
||||
extended_operands.append(tmp[1])
|
||||
|
||||
def multi_dot(operands, part):
|
||||
stack_r = operands[0]
|
||||
stack_i = operands[1]
|
||||
for op_r, op_i in zip(operands[2::2], operands[3::2]):
|
||||
tmp_r = stack_r @ op_r - stack_i @ op_i
|
||||
tmp_i = stack_r @ op_i + stack_i @ op_r
|
||||
|
||||
stack_r = tmp_r
|
||||
stack_i = tmp_i
|
||||
|
||||
if part == 'Real':
|
||||
return stack_r
|
||||
else:
|
||||
return stack_i
|
||||
|
||||
def multi_dot_r(operands):
|
||||
return multi_dot(operands, 'Real')
|
||||
|
||||
def multi_dot_i(operands):
|
||||
return multi_dot(operands, 'Imag')
|
||||
|
||||
Nr = derived_array(multi_dot_r, extended_operands)
|
||||
Ni = derived_array(multi_dot_i, extended_operands)
|
||||
|
||||
res = np.empty_like(Nr)
|
||||
for (n, m), entry in np.ndenumerate(Nr):
|
||||
res[n, m] = CObs(Nr[n, m], Ni[n, m])
|
||||
|
||||
return res
|
||||
else:
|
||||
def multi_dot(operands):
|
||||
stack = operands[0]
|
||||
for op in operands[1:]:
|
||||
stack = stack @ op
|
||||
return stack
|
||||
return derived_array(multi_dot, operands)
|
||||
|
||||
|
||||
# batched diagonal, similar to matrix_diag in tensorflow
|
||||
def _matrix_diag(a):
|
||||
reps = anp.array(a.shape)
|
||||
reps[:-1] = 1
|
||||
reps[-1] = a.shape[-1]
|
||||
newshape = list(a.shape) + [a.shape[-1]]
|
||||
return _diag(anp.tile(a, reps).reshape(newshape))
|
||||
|
||||
# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
|
||||
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
|
||||
def inv(x):
|
||||
"""Inverse of Obs or CObs valued matrices."""
|
||||
return _mat_mat_op(anp.linalg.inv, x)
|
||||
|
||||
|
||||
def grad_eig(ans, x):
|
||||
"""Gradient of a general square (complex valued) matrix"""
|
||||
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
||||
n = e.shape[-1]
|
||||
|
||||
def vjp(g):
|
||||
ge, gu = g
|
||||
ge = _matrix_diag(ge)
|
||||
f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
|
||||
f -= _diag(f)
|
||||
ut = anp.swapaxes(u, -1, -2)
|
||||
r1 = f * _dot(ut, gu)
|
||||
r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n)))
|
||||
r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut)
|
||||
if not anp.iscomplexobj(x):
|
||||
r = anp.real(r)
|
||||
# the derivative is still complex for real input (imaginary delta is allowed), real output
|
||||
# but the derivative should be real in real input case when imaginary delta is forbidden
|
||||
return r
|
||||
return vjp
|
||||
|
||||
|
||||
defvjp(anp.linalg.eig, grad_eig)
|
||||
# End of the code block from autograd.master
|
||||
def cholesky(x):
|
||||
"""Cholesky decompostion of Obs or CObs valued matrices."""
|
||||
return _mat_mat_op(anp.linalg.cholesky, x)
|
||||
|
||||
|
||||
def scalar_mat_op(op, obs, **kwargs):
|
||||
|
@ -82,7 +197,7 @@ def scalar_mat_op(op, obs, **kwargs):
|
|||
return derived_observable(_mat, raveled_obs, **kwargs)
|
||||
|
||||
|
||||
def mat_mat_op(op, obs, **kwargs):
|
||||
def _mat_mat_op(op, obs, **kwargs):
|
||||
"""Computes the matrix to matrix operation op to a given matrix of Obs."""
|
||||
# Use real representation to calculate matrix operations for complex matrices
|
||||
if isinstance(obs.ravel()[0], CObs):
|
||||
|
@ -99,15 +214,18 @@ def mat_mat_op(op, obs, **kwargs):
|
|||
if kwargs.get('num_grad') is True:
|
||||
op_big_matrix = _num_diff_mat_mat_op(op, big_matrix, **kwargs)
|
||||
else:
|
||||
op_big_matrix = derived_observable(lambda x, **kwargs: op(x), big_matrix)
|
||||
op_big_matrix = derived_array(lambda x, **kwargs: op(x), [big_matrix])[0]
|
||||
dim = op_big_matrix.shape[0]
|
||||
op_A = op_big_matrix[0: dim // 2, 0: dim // 2]
|
||||
op_B = op_big_matrix[dim // 2:, 0: dim // 2]
|
||||
return (1 + 0j) * op_A + 1j * op_B
|
||||
res = np.empty_like(op_A)
|
||||
for (n, m), entry in np.ndenumerate(op_A):
|
||||
res[n, m] = CObs(op_A[n, m], op_B[n, m])
|
||||
return res
|
||||
else:
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_mat_mat_op(op, obs, **kwargs)
|
||||
return derived_observable(lambda x, **kwargs: op(x), obs)
|
||||
return derived_array(lambda x, **kwargs: op(x), [obs])[0]
|
||||
|
||||
|
||||
def eigh(obs, **kwargs):
|
||||
|
@ -146,7 +264,7 @@ def svd(obs, **kwargs):
|
|||
return (u, s, vh)
|
||||
|
||||
|
||||
def slog_det(obs, **kwargs):
|
||||
def slogdet(obs, **kwargs):
|
||||
"""Computes the determinant of a matrix of Obs via np.linalg.slogdet."""
|
||||
def _mat(x):
|
||||
dim = int(np.sqrt(len(x)))
|
||||
|
@ -375,3 +493,51 @@ def _num_diff_svd(obs, **kwargs):
|
|||
res_mat2.append(row)
|
||||
|
||||
return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1]))
|
||||
|
||||
|
||||
# This code block is directly taken from the current master branch of autograd and remains
|
||||
# only until the new version is released on PyPi
|
||||
_dot = partial(anp.einsum, '...ij,...jk->...ik')
|
||||
|
||||
|
||||
# batched diag
|
||||
def _diag(a):
|
||||
return anp.eye(a.shape[-1]) * a
|
||||
|
||||
|
||||
# batched diagonal, similar to matrix_diag in tensorflow
|
||||
def _matrix_diag(a):
|
||||
reps = anp.array(a.shape)
|
||||
reps[:-1] = 1
|
||||
reps[-1] = a.shape[-1]
|
||||
newshape = list(a.shape) + [a.shape[-1]]
|
||||
return _diag(anp.tile(a, reps).reshape(newshape))
|
||||
|
||||
# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
|
||||
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
|
||||
|
||||
|
||||
def grad_eig(ans, x):
|
||||
"""Gradient of a general square (complex valued) matrix"""
|
||||
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
||||
n = e.shape[-1]
|
||||
|
||||
def vjp(g):
|
||||
ge, gu = g
|
||||
ge = _matrix_diag(ge)
|
||||
f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
|
||||
f -= _diag(f)
|
||||
ut = anp.swapaxes(u, -1, -2)
|
||||
r1 = f * _dot(ut, gu)
|
||||
r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n)))
|
||||
r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut)
|
||||
if not anp.iscomplexobj(x):
|
||||
r = anp.real(r)
|
||||
# the derivative is still complex for real input (imaginary delta is allowed), real output
|
||||
# but the derivative should be real in real input case when imaginary delta is forbidden
|
||||
return r
|
||||
return vjp
|
||||
|
||||
|
||||
defvjp(anp.linalg.eig, grad_eig)
|
||||
# End of the code block from autograd.master
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import warnings
|
||||
import numpy as np
|
||||
import autograd.numpy as anp
|
||||
from .linalg import mat_mat_op
|
||||
from .linalg import inv, matmul
|
||||
from .dirac import gamma, gamma5
|
||||
|
||||
|
||||
|
@ -27,10 +26,9 @@ class Npr_matrix(np.ndarray):
|
|||
if self.shape != (12, 12):
|
||||
raise Exception('g5H only works for 12x12 matrices.')
|
||||
extended_g5 = np.kron(np.eye(3, dtype=int), gamma5)
|
||||
new_matrix = extended_g5 @ self.conj().T @ extended_g5
|
||||
new_matrix.mom_in = self.mom_out
|
||||
new_matrix.mom_out = self.mom_in
|
||||
return new_matrix
|
||||
return Npr_matrix(matmul(extended_g5, self.conj().T, extended_g5),
|
||||
mom_in=self.mom_out,
|
||||
mom_out=self.mom_in)
|
||||
|
||||
def _propagate_mom(self, other, name):
|
||||
s_mom = getattr(self, name, None)
|
||||
|
@ -69,7 +67,7 @@ def inv_propagator(prop):
|
|||
""" Inverts a 12x12 quark propagator"""
|
||||
if prop.shape != (12, 12):
|
||||
raise Exception("Only 12x12 propagators can be inverted.")
|
||||
return Npr_matrix(mat_mat_op(anp.linalg.inv, prop), prop.mom_in)
|
||||
return Npr_matrix(inv(prop), prop.mom_in)
|
||||
|
||||
|
||||
def Zq(inv_prop, fermion='Wilson'):
|
||||
|
@ -87,10 +85,18 @@ def Zq(inv_prop, fermion='Wilson'):
|
|||
|
||||
if fermion == 'Wilson':
|
||||
p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3]) / np.sum(sin_mom ** 2)
|
||||
elif fermion == 'Continuum':
|
||||
p_mom = 2 * np.pi / L * mom
|
||||
p_slash = -1j * (p_mom[0] * gamma[0] + p_mom[1] * gamma[1] + p_mom[2] * gamma[2] + p_mom[3] * gamma[3]) / np.sum(p_mom ** 2)
|
||||
elif fermion == 'DWF':
|
||||
W = np.sum(1 - np.cos(2 * np.pi / L * mom))
|
||||
s2 = np.sum(sin_mom ** 2)
|
||||
p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3])
|
||||
p_slash /= 2 * (W - 1 + np.sqrt((1 - W) ** 2 + s2))
|
||||
else:
|
||||
raise Exception("Fermion type '" + fermion + "' not implemented")
|
||||
|
||||
res = 1 / 12. * np.trace(inv_prop @ np.kron(np.eye(3, dtype=int), p_slash))
|
||||
res = 1 / 12. * np.trace(matmul(inv_prop, np.kron(np.eye(3, dtype=int), p_slash)))
|
||||
res.gamma_method()
|
||||
|
||||
if not res.imag.is_zero_within_error(5):
|
||||
|
|
|
@ -34,11 +34,11 @@ class Obs:
|
|||
ensemble.
|
||||
N_sigma_global -- Standard value for N_sigma (default 1.0)
|
||||
"""
|
||||
# __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', 'value', 'dvalue',
|
||||
# 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 'e_names',
|
||||
# 'e_content', 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
|
||||
# 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
|
||||
# 'tag']
|
||||
__slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
|
||||
'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', 'e_names',
|
||||
'e_content', 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
|
||||
'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
|
||||
'tag', '__dict__']
|
||||
|
||||
e_tag_global = 0
|
||||
S_global = 2.0
|
||||
|
@ -111,23 +111,6 @@ class Obs:
|
|||
self.ddvalue = 0.0
|
||||
self.reweighted = 0
|
||||
|
||||
self.S = {}
|
||||
self.tau_exp = {}
|
||||
self.N_sigma = 0
|
||||
|
||||
self.e_names = {}
|
||||
self.e_content = {}
|
||||
|
||||
self.e_dvalue = {}
|
||||
self.e_ddvalue = {}
|
||||
self.e_tauint = {}
|
||||
self.e_dtauint = {}
|
||||
self.e_windowsize = {}
|
||||
self.e_rho = {}
|
||||
self.e_drho = {}
|
||||
self.e_n_tauint = {}
|
||||
self.e_n_dtauint = {}
|
||||
|
||||
self.tag = None
|
||||
|
||||
@property
|
||||
|
@ -392,33 +375,35 @@ class Obs:
|
|||
else:
|
||||
percentage = np.abs(self.dvalue / self.value) * 100
|
||||
print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue, self.ddvalue, percentage))
|
||||
if len(self.e_names) > 1:
|
||||
print(' Ensemble errors:')
|
||||
for e_name in self.e_names:
|
||||
if hasattr(self, 'e_names'):
|
||||
if len(self.e_names) > 1:
|
||||
print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
|
||||
if self.tau_exp[e_name] > 0:
|
||||
print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
|
||||
else:
|
||||
print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
|
||||
if level > 1:
|
||||
print(self.N, 'samples in', len(self.e_names), 'ensembles:')
|
||||
print(' Ensemble errors:')
|
||||
for e_name in self.e_names:
|
||||
print(e_name, ':', self.e_content[e_name])
|
||||
if len(self.e_names) > 1:
|
||||
print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
|
||||
if self.tau_exp[e_name] > 0:
|
||||
print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
|
||||
else:
|
||||
print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
|
||||
if level > 1:
|
||||
print(self.N, 'samples in', len(self.e_names), 'ensembles:')
|
||||
for e_name in self.e_names:
|
||||
print(e_name, ':', self.e_content[e_name])
|
||||
|
||||
def is_zero_within_error(self, sigma=1):
|
||||
""" Checks whether the observable is zero within 'sigma' standard errors.
|
||||
"""Checks whether the observable is zero within 'sigma' standard errors.
|
||||
|
||||
Works only properly when the gamma method was run.
|
||||
"""
|
||||
return np.abs(self.value) <= sigma * self.dvalue
|
||||
return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue
|
||||
|
||||
def is_zero(self):
|
||||
"""Checks whether the observable is zero within machine precision."""
|
||||
return np.isclose(0.0, self.value) and all(np.allclose(0.0, delta) for delta in self.deltas.values())
|
||||
|
||||
def plot_tauint(self, save=None):
|
||||
"""Plot integrated autocorrelation time for each ensemble."""
|
||||
if not self.e_names:
|
||||
if not hasattr(self, 'e_names'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
|
||||
fig = plt.figure()
|
||||
|
@ -451,7 +436,7 @@ class Obs:
|
|||
|
||||
def plot_rho(self):
|
||||
"""Plot normalized autocorrelation function time for each ensemble."""
|
||||
if not self.e_names:
|
||||
if not hasattr(self, 'e_names'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
plt.xlabel('W')
|
||||
|
@ -473,7 +458,7 @@ class Obs:
|
|||
|
||||
def plot_rep_dist(self):
|
||||
"""Plot replica distribution for each ensemble with more than one replicum."""
|
||||
if not self.e_names:
|
||||
if not hasattr(self, 'e_names'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
if len(self.e_content[e_name]) == 1:
|
||||
|
@ -495,7 +480,7 @@ class Obs:
|
|||
|
||||
def plot_history(self, expand=True):
|
||||
"""Plot derived Monte Carlo history for each ensemble."""
|
||||
if not self.e_names:
|
||||
if not hasattr(self, 'e_names'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
|
@ -519,7 +504,7 @@ class Obs:
|
|||
def plot_piechart(self):
|
||||
"""Plot piechart which shows the fractional contribution of each
|
||||
ensemble to the error and returns a dictionary containing the fractions."""
|
||||
if not self.e_names:
|
||||
if not hasattr(self, 'e_names'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
if self.dvalue == 0.0:
|
||||
raise Exception('Error is 0.0')
|
||||
|
@ -737,19 +722,23 @@ class CObs:
|
|||
return self._imag
|
||||
|
||||
def gamma_method(self, **kwargs):
|
||||
"""Executes the gamma_method for the real and the imaginary part."""
|
||||
if isinstance(self.real, Obs):
|
||||
self.real.gamma_method(**kwargs)
|
||||
if isinstance(self.imag, Obs):
|
||||
self.imag.gamma_method(**kwargs)
|
||||
|
||||
def is_zero(self):
|
||||
"""Checks whether both real and imaginary part are zero within machine precision."""
|
||||
return self.real == 0.0 and self.imag == 0.0
|
||||
|
||||
def conjugate(self):
|
||||
return CObs(self.real, -self.imag)
|
||||
|
||||
def __add__(self, other):
|
||||
if hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
if isinstance(other, np.ndarray):
|
||||
return other + self
|
||||
elif hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
return CObs(self.real + other.real,
|
||||
self.imag + other.imag)
|
||||
else:
|
||||
|
@ -759,7 +748,9 @@ class CObs:
|
|||
return self + y
|
||||
|
||||
def __sub__(self, other):
|
||||
if hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
if isinstance(other, np.ndarray):
|
||||
return -1 * (other - self)
|
||||
elif hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
return CObs(self.real - other.real, self.imag - other.imag)
|
||||
else:
|
||||
return CObs(self.real - other, self.imag)
|
||||
|
@ -768,29 +759,43 @@ class CObs:
|
|||
return -1 * (self - other)
|
||||
|
||||
def __mul__(self, other):
|
||||
if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
|
||||
return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
|
||||
[self.real, other.real, self.imag, other.imag],
|
||||
man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
|
||||
derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
|
||||
[self.real, other.real, self.imag, other.imag],
|
||||
man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
|
||||
elif hasattr(other, 'real') and getattr(other, 'imag', 0) != 0:
|
||||
return CObs(self.real * other.real - self.imag * other.imag,
|
||||
self.imag * other.real + self.real * other.imag)
|
||||
if isinstance(other, np.ndarray):
|
||||
return other * self
|
||||
elif hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
|
||||
return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
|
||||
[self.real, other.real, self.imag, other.imag],
|
||||
man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
|
||||
derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
|
||||
[self.real, other.real, self.imag, other.imag],
|
||||
man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
|
||||
elif getattr(other, 'imag', 0) != 0:
|
||||
return CObs(self.real * other.real - self.imag * other.imag,
|
||||
self.imag * other.real + self.real * other.imag)
|
||||
else:
|
||||
return CObs(self.real * other.real, self.imag * other.real)
|
||||
else:
|
||||
return CObs(self.real * np.real(other), self.imag * np.real(other))
|
||||
return CObs(self.real * other, self.imag * other)
|
||||
|
||||
def __rmul__(self, other):
|
||||
return self * other
|
||||
|
||||
def __truediv__(self, other):
|
||||
if hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
if isinstance(other, np.ndarray):
|
||||
return 1 / (other / self)
|
||||
elif hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
r = other.real ** 2 + other.imag ** 2
|
||||
return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
|
||||
else:
|
||||
return CObs(self.real / other, self.imag / other)
|
||||
|
||||
def __rtruediv__(self, other):
|
||||
r = self.real ** 2 + self.imag ** 2
|
||||
if hasattr(other, 'real') and hasattr(other, 'imag'):
|
||||
return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
|
||||
else:
|
||||
return CObs(self.real * other / r, -self.imag * other / r)
|
||||
|
||||
def __abs__(self):
|
||||
return np.sqrt(self.real**2 + self.imag**2)
|
||||
|
||||
|
@ -1148,7 +1153,7 @@ def covariance(obs1, obs2, correlation=False, **kwargs):
|
|||
(1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], merge_idx([obs1.idl[name], obs2.idl[name]])]])))):
|
||||
raise Exception('Shapes of ensemble', name, 'do not fit')
|
||||
|
||||
if obs1.e_names == {} or obs2.e_names == {}:
|
||||
if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
|
||||
raise Exception('The gamma method has to be applied to both Obs first.')
|
||||
|
||||
dvalue = 0
|
||||
|
@ -1232,7 +1237,7 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
|||
|
||||
return gamma
|
||||
|
||||
if obs1.e_names == {} or obs2.e_names == {}:
|
||||
if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
|
||||
raise Exception('The gamma method has to be applied to both Obs first.')
|
||||
|
||||
dvalue = 0
|
||||
|
@ -1322,7 +1327,7 @@ def covariance3(obs1, obs2, correlation=False, **kwargs):
|
|||
(1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], merge_idx([obs1.idl[name], obs2.idl[name]])]])))):
|
||||
raise Exception('Shapes of ensemble', name, 'do not fit')
|
||||
|
||||
if obs1.e_names == {} or obs2.e_names == {}:
|
||||
if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
|
||||
raise Exception('The gamma method has to be applied to both Obs first.')
|
||||
|
||||
tau_exp = []
|
||||
|
|
2
setup.py
2
setup.py
|
@ -3,7 +3,7 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
setup(name='pyerrors',
|
||||
version='1.1.0',
|
||||
version='2.0.0',
|
||||
description='Error analysis for lattice QCD',
|
||||
author='Fabian Joswig',
|
||||
author_email='fabian.joswig@ed.ac.uk',
|
||||
|
|
49
tests/benchmark_test.py
Normal file
49
tests/benchmark_test.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
import numpy as np
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
import time
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
length = 1000
|
||||
|
||||
def mul(x, y):
|
||||
return x * y
|
||||
|
||||
|
||||
def test_b_mul(benchmark):
|
||||
my_obs = pe.Obs([np.random.rand(length)], ['t1'])
|
||||
|
||||
benchmark(mul, my_obs, my_obs)
|
||||
|
||||
|
||||
def test_b_cmul(benchmark):
|
||||
my_obs = pe.CObs(pe.Obs([np.random.rand(length)], ['t1']),
|
||||
pe.Obs([np.random.rand(length)], ['t1']))
|
||||
|
||||
benchmark(mul, my_obs, my_obs)
|
||||
|
||||
|
||||
def test_b_matmul(benchmark):
|
||||
dim = 4
|
||||
my_list = []
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.Obs([np.random.rand(length)], ['t1']))
|
||||
my_array = np.array(my_list).reshape((dim, dim))
|
||||
|
||||
benchmark(pe.linalg.matmul, my_array, my_array)
|
||||
|
||||
def test_b_cmatmul(benchmark):
|
||||
dim = 4
|
||||
my_list = []
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']),
|
||||
pe.Obs([np.random.rand(length)], ['t1'])))
|
||||
my_array = np.array(my_list).reshape((dim, dim))
|
||||
|
||||
benchmark(pe.linalg.matmul, my_array, my_array)
|
||||
|
||||
|
||||
def test_b_gamma(benchmark):
|
||||
my_obs = pe.Obs([np.random.rand(length)], ['t1'])
|
||||
benchmark(my_obs.gamma_method)
|
164
tests/linalg_test.py
Normal file
164
tests/linalg_test.py
Normal file
|
@ -0,0 +1,164 @@
|
|||
import numpy as np
|
||||
import autograd.numpy as anp
|
||||
import math
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
|
||||
def test_matmul():
|
||||
for dim in [4, 8]:
|
||||
my_list = []
|
||||
length = 1000 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
|
||||
my_array = np.array(my_list).reshape((dim, dim))
|
||||
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
assert e.is_zero(), t
|
||||
|
||||
my_list = []
|
||||
length = 1000 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
|
||||
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
|
||||
my_array = np.array(my_list).reshape((dim, dim))
|
||||
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
assert e.is_zero(), t
|
||||
|
||||
|
||||
def test_multi_dot():
|
||||
for dim in [4, 8]:
|
||||
my_list = []
|
||||
length = 1000 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
|
||||
my_array = np.array(my_list).reshape((dim, dim))
|
||||
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
assert e.is_zero(), t
|
||||
|
||||
my_list = []
|
||||
length = 1000 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
|
||||
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
|
||||
my_array = np.array(my_list).reshape((dim, dim))
|
||||
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
assert e.is_zero(), t
|
||||
|
||||
|
||||
def test_matrix_inverse():
|
||||
content = []
|
||||
for t in range(9):
|
||||
exponent = np.random.normal(3, 5)
|
||||
content.append(pe.pseudo_Obs(2 + 10 ** exponent, 10 ** (exponent - 1), 't'))
|
||||
|
||||
content.append(1.0) # Add 1.0 as a float
|
||||
matrix = np.diag(content)
|
||||
inverse_matrix = pe.linalg.inv(matrix)
|
||||
assert all([o.is_zero() for o in np.diag(matrix) * np.diag(inverse_matrix) - 1])
|
||||
|
||||
|
||||
def test_complex_matrix_inverse():
|
||||
dimension = 6
|
||||
base_matrix = np.empty((dimension, dimension), dtype=object)
|
||||
matrix = np.empty((dimension, dimension), dtype=complex)
|
||||
for (n, m), entry in np.ndenumerate(base_matrix):
|
||||
exponent_real = np.random.normal(3, 5)
|
||||
exponent_imag = np.random.normal(3, 5)
|
||||
base_matrix[n, m] = pe.CObs(pe.pseudo_Obs(2 + 10 ** exponent_real, 10 ** (exponent_real - 1), 't'),
|
||||
pe.pseudo_Obs(2 + 10 ** exponent_imag, 10 ** (exponent_imag - 1), 't'))
|
||||
|
||||
# Construct invertible matrix
|
||||
obs_matrix = np.identity(dimension) + base_matrix @ base_matrix.T
|
||||
|
||||
for (n, m), entry in np.ndenumerate(obs_matrix):
|
||||
matrix[n, m] = entry.real.value + 1j * entry.imag.value
|
||||
|
||||
inverse_matrix = np.linalg.inv(matrix)
|
||||
inverse_obs_matrix = pe.linalg.inv(obs_matrix)
|
||||
for (n, m), entry in np.ndenumerate(inverse_matrix):
|
||||
assert np.isclose(inverse_matrix[n, m].real, inverse_obs_matrix[n, m].real.value)
|
||||
assert np.isclose(inverse_matrix[n, m].imag, inverse_obs_matrix[n, m].imag.value)
|
||||
|
||||
|
||||
def test_matrix_functions():
|
||||
dim = 4
|
||||
matrix = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(pe.pseudo_Obs(np.random.rand(), 0.2 + 0.1 * np.random.rand(), 'e1'))
|
||||
matrix.append(row)
|
||||
matrix = np.array(matrix) @ np.identity(dim)
|
||||
|
||||
# Check inverse of matrix
|
||||
inv = pe.linalg.inv(matrix)
|
||||
check_inv = matrix @ inv
|
||||
|
||||
for (i, j), entry in np.ndenumerate(check_inv):
|
||||
entry.gamma_method()
|
||||
if(i == j):
|
||||
assert math.isclose(entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
else:
|
||||
assert math.isclose(entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
assert math.isclose(entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
|
||||
|
||||
# Check Cholesky decomposition
|
||||
sym = np.dot(matrix, matrix.T)
|
||||
cholesky = pe.linalg.cholesky(sym)
|
||||
check = cholesky @ cholesky.T
|
||||
|
||||
for (i, j), entry in np.ndenumerate(check):
|
||||
diff = entry - sym[i, j]
|
||||
assert diff.is_zero()
|
||||
|
||||
# Check eigh
|
||||
e, v = pe.linalg.eigh(sym)
|
||||
for i in range(dim):
|
||||
tmp = sym @ v[:, i] - v[:, i] * e[i]
|
||||
for j in range(dim):
|
||||
assert tmp[j].is_zero()
|
||||
|
||||
# Check svd
|
||||
u, v, vh = pe.linalg.svd(sym)
|
||||
diff = sym - u @ np.diag(v) @ vh
|
||||
|
||||
for (i, j), entry in np.ndenumerate(diff):
|
||||
assert entry.is_zero()
|
||||
|
||||
|
||||
def test_complex_matrix_operations():
|
||||
dimension = 4
|
||||
base_matrix = np.empty((dimension, dimension), dtype=object)
|
||||
for (n, m), entry in np.ndenumerate(base_matrix):
|
||||
exponent_real = np.random.normal(3, 5)
|
||||
exponent_imag = np.random.normal(3, 5)
|
||||
base_matrix[n, m] = pe.CObs(pe.pseudo_Obs(2 + 10 ** exponent_real, 10 ** (exponent_real - 1), 't'),
|
||||
pe.pseudo_Obs(2 + 10 ** exponent_imag, 10 ** (exponent_imag - 1), 't'))
|
||||
|
||||
for other in [2, 2.3, (1 - 0.1j), (0 + 2.1j)]:
|
||||
ta = base_matrix * other
|
||||
tb = other * base_matrix
|
||||
diff = ta - tb
|
||||
for (i, j), entry in np.ndenumerate(diff):
|
||||
assert entry.is_zero()
|
||||
ta = base_matrix + other
|
||||
tb = other + base_matrix
|
||||
diff = ta - tb
|
||||
for (i, j), entry in np.ndenumerate(diff):
|
||||
assert entry.is_zero()
|
||||
ta = base_matrix - other
|
||||
tb = other - base_matrix
|
||||
diff = ta + tb
|
||||
for (i, j), entry in np.ndenumerate(diff):
|
||||
assert entry.is_zero()
|
||||
ta = base_matrix / other
|
||||
tb = other / base_matrix
|
||||
diff = ta * tb - 1
|
||||
for (i, j), entry in np.ndenumerate(diff):
|
||||
assert entry.is_zero()
|
|
@ -54,8 +54,12 @@ def test_function_overloading():
|
|||
t1 = f([a, b])
|
||||
t2 = pe.derived_observable(f, [a, b])
|
||||
c = t2 - t1
|
||||
assert c.value == 0.0, str(i)
|
||||
assert np.all(np.abs(c.deltas['e1']) < 1e-14), str(i)
|
||||
assert c.is_zero()
|
||||
|
||||
assert np.log(np.exp(b)) == b
|
||||
assert np.exp(np.log(b)) == b
|
||||
assert np.sqrt(b ** 2) == b
|
||||
assert np.sqrt(b) ** 2 == b
|
||||
|
||||
|
||||
def test_overloading_vectorization():
|
||||
|
@ -91,6 +95,28 @@ def test_gamma_method():
|
|||
assert test_obs.e_tauint['t'] - 10.5 <= test_obs.e_dtauint['t']
|
||||
|
||||
|
||||
def test_gamma_method_persistance():
|
||||
my_obs = pe.Obs([np.random.rand(730)], ['t'])
|
||||
my_obs.gamma_method()
|
||||
value = my_obs.value
|
||||
dvalue = my_obs.dvalue
|
||||
ddvalue = my_obs.ddvalue
|
||||
my_obs = 1.0 * my_obs
|
||||
my_obs.gamma_method()
|
||||
assert value == my_obs.value
|
||||
assert dvalue == my_obs.dvalue
|
||||
assert ddvalue == my_obs.ddvalue
|
||||
my_obs.gamma_method()
|
||||
assert value == my_obs.value
|
||||
assert dvalue == my_obs.dvalue
|
||||
assert ddvalue == my_obs.ddvalue
|
||||
my_obs.gamma_method(S=3.7)
|
||||
my_obs.gamma_method()
|
||||
assert value == my_obs.value
|
||||
assert dvalue == my_obs.dvalue
|
||||
assert ddvalue == my_obs.ddvalue
|
||||
|
||||
|
||||
def test_covariance_is_variance():
|
||||
value = np.random.normal(5, 10)
|
||||
dvalue = np.abs(np.random.normal(0, 1))
|
||||
|
@ -197,6 +223,7 @@ def test_overloaded_functions():
|
|||
assert np.abs((ad_obs.value - item(val)) / ad_obs.value) < 1e-10, item.__name__
|
||||
assert np.abs(ad_obs.dvalue - dval * np.abs(deriv[i](val))) < 1e-6, item.__name__
|
||||
|
||||
|
||||
def test_utils():
|
||||
my_obs = pe.pseudo_Obs(1.0, 0.5, 't')
|
||||
my_obs.print(0)
|
||||
|
@ -211,6 +238,7 @@ def test_utils():
|
|||
assert my_obs > (my_obs - 1)
|
||||
assert my_obs < (my_obs + 1)
|
||||
|
||||
|
||||
def test_cobs():
|
||||
obs1 = pe.pseudo_Obs(1.0, 0.1, 't')
|
||||
obs2 = pe.pseudo_Obs(-0.2, 0.03, 't')
|
||||
|
@ -227,22 +255,22 @@ def test_cobs():
|
|||
|
||||
fs = [[lambda x: x[0] + x[1], lambda x: x[1] + x[0]],
|
||||
[lambda x: x[0] * x[1], lambda x: x[1] * x[0]]]
|
||||
for other in [1, 1.1, (1.1-0.2j), pe.CObs(obs1), pe.CObs(obs1, obs2)]:
|
||||
for other in [3, 1.1, (1.1 - 0.2j), (2.3 + 0j), (0.0 + 7.7j), pe.CObs(obs1), pe.CObs(obs1, obs2)]:
|
||||
for funcs in fs:
|
||||
ta = funcs[0]([my_cobs, other])
|
||||
tb = funcs[1]([my_cobs, other])
|
||||
diff = ta - tb
|
||||
assert np.isclose(0.0, float(diff.real))
|
||||
assert np.isclose(0.0, float(diff.imag))
|
||||
assert np.allclose(0.0, diff.real.deltas['t'])
|
||||
assert np.allclose(0.0, diff.imag.deltas['t'])
|
||||
assert diff.is_zero()
|
||||
|
||||
ta = my_cobs - other
|
||||
tb = other - my_cobs
|
||||
diff = ta + tb
|
||||
assert np.isclose(0.0, float(diff.real))
|
||||
assert np.isclose(0.0, float(diff.imag))
|
||||
assert np.allclose(0.0, diff.real.deltas['t'])
|
||||
assert np.allclose(0.0, diff.imag.deltas['t'])
|
||||
assert diff.is_zero()
|
||||
|
||||
div = my_cobs / other
|
||||
ta = my_cobs / other
|
||||
tb = other / my_cobs
|
||||
diff = ta * tb - 1
|
||||
assert diff.is_zero()
|
||||
|
||||
assert (my_cobs / other * other - my_cobs).is_zero()
|
||||
assert (other / my_cobs * my_cobs - other).is_zero()
|
|
@ -16,4 +16,4 @@ def test_root_linear():
|
|||
|
||||
assert np.isclose(my_root.value, value)
|
||||
difference = my_obs - my_root
|
||||
assert np.allclose(0.0, difference.deltas['t'])
|
||||
assert difference.is_zero()
|
|
@ -1,85 +0,0 @@
|
|||
import autograd.numpy as np
|
||||
import math
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
|
||||
def test_matrix_inverse():
|
||||
content = []
|
||||
for t in range(9):
|
||||
exponent = np.random.normal(3, 5)
|
||||
content.append(pe.pseudo_Obs(2 + 10 ** exponent, 10 ** (exponent - 1), 't'))
|
||||
|
||||
content.append(1.0) # Add 1.0 as a float
|
||||
matrix = np.diag(content)
|
||||
inverse_matrix = pe.linalg.mat_mat_op(np.linalg.inv, matrix)
|
||||
assert all([o.is_zero() for o in np.diag(matrix) * np.diag(inverse_matrix) - 1])
|
||||
|
||||
|
||||
def test_complex_matrix_inverse():
|
||||
dimension = 6
|
||||
base_matrix = np.empty((dimension, dimension), dtype=object)
|
||||
matrix = np.empty((dimension, dimension), dtype=complex)
|
||||
for (n, m), entry in np.ndenumerate(base_matrix):
|
||||
exponent_real = np.random.normal(3, 5)
|
||||
exponent_imag = np.random.normal(3, 5)
|
||||
base_matrix[n, m] = pe.CObs(pe.pseudo_Obs(2 + 10 ** exponent_real, 10 ** (exponent_real - 1), 't'),
|
||||
pe.pseudo_Obs(2 + 10 ** exponent_imag, 10 ** (exponent_imag - 1), 't'))
|
||||
|
||||
# Construct invertible matrix
|
||||
obs_matrix = np.identity(dimension) + base_matrix @ base_matrix.T
|
||||
|
||||
for (n, m), entry in np.ndenumerate(obs_matrix):
|
||||
matrix[n, m] = entry.real.value + 1j * entry.imag.value
|
||||
|
||||
inverse_matrix = np.linalg.inv(matrix)
|
||||
inverse_obs_matrix = pe.linalg.mat_mat_op(np.linalg.inv, obs_matrix)
|
||||
for (n, m), entry in np.ndenumerate(inverse_matrix):
|
||||
assert np.isclose(inverse_matrix[n, m].real, inverse_obs_matrix[n, m].real.value)
|
||||
assert np.isclose(inverse_matrix[n, m].imag, inverse_obs_matrix[n, m].imag.value)
|
||||
|
||||
|
||||
def test_matrix_functions():
|
||||
dim = 3 + int(4 * np.random.rand())
|
||||
print(dim)
|
||||
matrix = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
for j in range(dim):
|
||||
row.append(pe.pseudo_Obs(np.random.rand(), 0.2 + 0.1 * np.random.rand(), 'e1'))
|
||||
matrix.append(row)
|
||||
matrix = np.array(matrix) @ np.identity(dim)
|
||||
|
||||
# Check inverse of matrix
|
||||
inv = pe.linalg.mat_mat_op(np.linalg.inv, matrix)
|
||||
check_inv = matrix @ inv
|
||||
|
||||
for (i, j), entry in np.ndenumerate(check_inv):
|
||||
entry.gamma_method()
|
||||
if(i == j):
|
||||
assert math.isclose(entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
else:
|
||||
assert math.isclose(entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
assert math.isclose(entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
|
||||
|
||||
# Check Cholesky decomposition
|
||||
sym = np.dot(matrix, matrix.T)
|
||||
cholesky = pe.linalg.mat_mat_op(np.linalg.cholesky, sym)
|
||||
check = cholesky @ cholesky.T
|
||||
|
||||
for (i, j), entry in np.ndenumerate(check):
|
||||
diff = entry - sym[i, j]
|
||||
diff.gamma_method()
|
||||
assert math.isclose(diff.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j)
|
||||
assert math.isclose(diff.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j)
|
||||
|
||||
# Check eigh
|
||||
e, v = pe.linalg.eigh(sym)
|
||||
for i in range(dim):
|
||||
tmp = sym @ v[:, i] - v[:, i] * e[i]
|
||||
for j in range(dim):
|
||||
tmp[j].gamma_method()
|
||||
assert math.isclose(tmp[j].value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j)
|
||||
assert math.isclose(tmp[j].dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j)
|
Loading…
Add table
Reference in a new issue