Compare commits

..

No commits in common. "develop" and "v2.12.0" have entirely different histories.

28 changed files with 298 additions and 1050 deletions

View file

@ -27,17 +27,17 @@ jobs:
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: uv
uses: astral-sh/setup-uv@v5
- name: Install - name: Install
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super
uv pip install wheel --system python -m pip install --upgrade pip
uv pip install . --system pip install wheel
uv pip install pytest nbmake --system pip install .
uv pip install -U matplotlib!=3.7.0 --system # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files. pip install pytest
pip install nbmake
pip install -U matplotlib!=3.7.0 # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
- name: Run tests - name: Run tests
run: pytest -vv --nbmake examples/*.ipynb run: pytest -vv --nbmake examples/*.ipynb

View file

@ -17,12 +17,10 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] python-version: ["3.9", "3.10", "3.11", "3.12"]
include: include:
- os: macos-latest - os: macos-latest
python-version: "3.12" python-version: "3.10"
- os: ubuntu-24.04-arm
python-version: "3.12"
steps: steps:
- name: Checkout source - name: Checkout source
@ -32,15 +30,19 @@ jobs:
uses: actions/setup-python@v5 uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: uv
uses: astral-sh/setup-uv@v5
- name: Install - name: Install
run: | run: |
uv pip install wheel --system python -m pip install --upgrade pip
uv pip install . --system pip install wheel
uv pip install pytest pytest-cov pytest-benchmark hypothesis --system pip install .
uv pip freeze --system pip install pytest
pip install pytest-cov
pip install pytest-benchmark
pip install hypothesis
pip install py
pip install pyarrow
pip freeze
- name: Run tests - name: Run tests
run: pytest --cov=pyerrors -vv run: pytest --cov=pyerrors -vv

View file

@ -1,58 +0,0 @@
name: Release
on:
workflow_dispatch:
release:
types: [published]
jobs:
build:
name: Build sdist and wheel
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
name: Checkout repository
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install pypa/build
run: >-
python3 -m
pip install
build
--user
- name: Build wheel and source tarball
run: python3 -m build
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
if-no-files-found: error
publish:
needs: [build]
name: Upload to PyPI
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/pyerrors
permissions:
id-token: write
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Sanity check
run: ls -la dist/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1

View file

@ -1,15 +0,0 @@
name: ruff
on:
push:
branches:
- master
- develop
pull_request:
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v2
with:
src: "./pyerrors"

View file

@ -2,29 +2,6 @@
All notable changes to this project will be documented in this file. All notable changes to this project will be documented in this file.
## [2.14.0] - 2025-03-09
### Added
- Explicit checks of the provided inverse matrix for correlated fits #259
### Changed
- Compute derivative for pow explicitly instead of relying on autograd. This results in a ~4x speedup for pow operations #246
- More explicit exception types #248
### Fixed
- Removed the possibility to create an Obs from data on several replica #258
- Fix range in `set_prange` #247
- Fix ensemble name handling in sfcf input modules #253
- Correct error message for fit shape mismatch #257
## [2.13.0] - 2024-11-03
### Added
- Allow providing lower triangular matrix constructed from a Cholesky decomposition in least squares function for correlated fits.
### Fixed
- Corrected bug that prevented combined fits with multiple x-obs in some cases.
## [2.12.0] - 2024-08-22 ## [2.12.0] - 2024-08-22
### Changed ### Changed

View file

@ -1,4 +1,4 @@
[![](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![arXiv](https://img.shields.io/badge/arXiv-2209.14371-b31b1b.svg)](https://arxiv.org/abs/2209.14371) [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2023.108750-blue)](https://doi.org/10.1016/j.cpc.2023.108750) [![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![arXiv](https://img.shields.io/badge/arXiv-2209.14371-b31b1b.svg)](https://arxiv.org/abs/2209.14371) [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2023.108750-blue)](https://doi.org/10.1016/j.cpc.2023.108750)
# pyerrors # pyerrors
`pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations. `pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations.
@ -14,6 +14,11 @@ Install the most recent release using pip and [pypi](https://pypi.org/project/py
python -m pip install pyerrors # Fresh install python -m pip install pyerrors # Fresh install
python -m pip install -U pyerrors # Update python -m pip install -U pyerrors # Update
``` ```
Install the most recent release using conda and [conda-forge](https://anaconda.org/conda-forge/pyerrors):
```bash
conda install -c conda-forge pyerrors # Fresh install
conda update -c conda-forge pyerrors # Update
```
## Contributing ## Contributing
We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md). We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md).

File diff suppressed because one or more lines are too long

View file

@ -481,12 +481,12 @@ from .obs import *
from .correlators import * from .correlators import *
from .fits import * from .fits import *
from .misc import * from .misc import *
from . import dirac as dirac from . import dirac
from . import input as input from . import input
from . import linalg as linalg from . import linalg
from . import mpm as mpm from . import mpm
from . import roots as roots from . import roots
from . import integrate as integrate from . import integrate
from . import special as special from . import special
from .version import __version__ as __version__ from .version import __version__

View file

@ -101,7 +101,7 @@ class Corr:
self.N = 1 self.N = 1
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]): elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
self.content = data_input self.content = data_input
noNull = [a for a in self.content if a is not None] # To check if the matrices are correct for all undefined elements noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
self.N = noNull[0].shape[0] self.N = noNull[0].shape[0]
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]: if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
raise ValueError("Smearing matrices are not NxN.") raise ValueError("Smearing matrices are not NxN.")
@ -141,7 +141,7 @@ class Corr:
def gamma_method(self, **kwargs): def gamma_method(self, **kwargs):
"""Apply the gamma method to the content of the Corr.""" """Apply the gamma method to the content of the Corr."""
for item in self.content: for item in self.content:
if item is not None: if not (item is None):
if self.N == 1: if self.N == 1:
item[0].gamma_method(**kwargs) item[0].gamma_method(**kwargs)
else: else:
@ -159,7 +159,7 @@ class Corr:
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
""" """
if self.N == 1: if self.N == 1:
raise ValueError("Trying to project a Corr, that already has N=1.") raise Exception("Trying to project a Corr, that already has N=1.")
if vector_l is None: if vector_l is None:
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.]) vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
@ -167,16 +167,16 @@ class Corr:
vector_r = vector_l vector_r = vector_l
if isinstance(vector_l, list) and not isinstance(vector_r, list): if isinstance(vector_l, list) and not isinstance(vector_r, list):
if len(vector_l) != self.T: if len(vector_l) != self.T:
raise ValueError("Length of vector list must be equal to T") raise Exception("Length of vector list must be equal to T")
vector_r = [vector_r] * self.T vector_r = [vector_r] * self.T
if isinstance(vector_r, list) and not isinstance(vector_l, list): if isinstance(vector_r, list) and not isinstance(vector_l, list):
if len(vector_r) != self.T: if len(vector_r) != self.T:
raise ValueError("Length of vector list must be equal to T") raise Exception("Length of vector list must be equal to T")
vector_l = [vector_l] * self.T vector_l = [vector_l] * self.T
if not isinstance(vector_l, list): if not isinstance(vector_l, list):
if not vector_l.shape == vector_r.shape == (self.N,): if not vector_l.shape == vector_r.shape == (self.N,):
raise ValueError("Vectors are of wrong shape!") raise Exception("Vectors are of wrong shape!")
if normalize: if normalize:
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r) vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content] newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
@ -201,7 +201,7 @@ class Corr:
Second index to be picked. Second index to be picked.
""" """
if self.N == 1: if self.N == 1:
raise ValueError("Trying to pick item from projected Corr") raise Exception("Trying to pick item from projected Corr")
newcontent = [None if (item is None) else item[i, j] for item in self.content] newcontent = [None if (item is None) else item[i, j] for item in self.content]
return Corr(newcontent) return Corr(newcontent)
@ -212,8 +212,8 @@ class Corr:
timeslice and the error on each timeslice. timeslice and the error on each timeslice.
""" """
if self.N != 1: if self.N != 1:
raise ValueError("Can only make Corr[N=1] plottable") raise Exception("Can only make Corr[N=1] plottable")
x_list = [x for x in range(self.T) if self.content[x] is not None] x_list = [x for x in range(self.T) if not self.content[x] is None]
y_list = [y[0].value for y in self.content if y is not None] y_list = [y[0].value for y in self.content if y is not None]
y_err_list = [y[0].dvalue for y in self.content if y is not None] y_err_list = [y[0].dvalue for y in self.content if y is not None]
@ -222,9 +222,9 @@ class Corr:
def symmetric(self): def symmetric(self):
""" Symmetrize the correlator around x0=0.""" """ Symmetrize the correlator around x0=0."""
if self.N != 1: if self.N != 1:
raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.') raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.')
if self.T % 2 != 0: if self.T % 2 != 0:
raise ValueError("Can not symmetrize odd T") raise Exception("Can not symmetrize odd T")
if self.content[0] is not None: if self.content[0] is not None:
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0: if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
@ -237,7 +237,7 @@ class Corr:
else: else:
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t])) newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Corr could not be symmetrized: No redundant values") raise Exception("Corr could not be symmetrized: No redundant values")
return Corr(newcontent, prange=self.prange) return Corr(newcontent, prange=self.prange)
def anti_symmetric(self): def anti_symmetric(self):
@ -245,7 +245,7 @@ class Corr:
if self.N != 1: if self.N != 1:
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.') raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
if self.T % 2 != 0: if self.T % 2 != 0:
raise ValueError("Can not symmetrize odd T") raise Exception("Can not symmetrize odd T")
test = 1 * self test = 1 * self
test.gamma_method() test.gamma_method()
@ -259,7 +259,7 @@ class Corr:
else: else:
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t])) newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Corr could not be symmetrized: No redundant values") raise Exception("Corr could not be symmetrized: No redundant values")
return Corr(newcontent, prange=self.prange) return Corr(newcontent, prange=self.prange)
def is_matrix_symmetric(self): def is_matrix_symmetric(self):
@ -292,7 +292,7 @@ class Corr:
def matrix_symmetric(self): def matrix_symmetric(self):
"""Symmetrizes the correlator matrices on every timeslice.""" """Symmetrizes the correlator matrices on every timeslice."""
if self.N == 1: if self.N == 1:
raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.") raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.")
if self.is_matrix_symmetric(): if self.is_matrix_symmetric():
return 1.0 * self return 1.0 * self
else: else:
@ -336,10 +336,10 @@ class Corr:
''' '''
if self.N == 1: if self.N == 1:
raise ValueError("GEVP methods only works on correlator matrices and not single correlators.") raise Exception("GEVP methods only works on correlator matrices and not single correlators.")
if ts is not None: if ts is not None:
if (ts <= t0): if (ts <= t0):
raise ValueError("ts has to be larger than t0.") raise Exception("ts has to be larger than t0.")
if "sorted_list" in kwargs: if "sorted_list" in kwargs:
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning) warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
@ -371,9 +371,9 @@ class Corr:
if sort is None: if sort is None:
if (ts is None): if (ts is None):
raise ValueError("ts is required if sort=None.") raise Exception("ts is required if sort=None.")
if (self.content[t0] is None) or (self.content[ts] is None): if (self.content[t0] is None) or (self.content[ts] is None):
raise ValueError("Corr not defined at t0/ts.") raise Exception("Corr not defined at t0/ts.")
Gt = _get_mat_at_t(ts) Gt = _get_mat_at_t(ts)
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv) reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
if kwargs.get('auto_gamma', False) and vector_obs: if kwargs.get('auto_gamma', False) and vector_obs:
@ -391,14 +391,14 @@ class Corr:
all_vecs.append(None) all_vecs.append(None)
if sort == "Eigenvector": if sort == "Eigenvector":
if ts is None: if ts is None:
raise ValueError("ts is required for the Eigenvector sorting method.") raise Exception("ts is required for the Eigenvector sorting method.")
all_vecs = _sort_vectors(all_vecs, ts) all_vecs = _sort_vectors(all_vecs, ts)
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)] reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
if kwargs.get('auto_gamma', False) and vector_obs: if kwargs.get('auto_gamma', False) and vector_obs:
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs] [[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
else: else:
raise ValueError("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.") raise Exception("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
if "state" in kwargs: if "state" in kwargs:
return reordered_vecs[kwargs.get("state")] return reordered_vecs[kwargs.get("state")]
@ -435,7 +435,7 @@ class Corr:
""" """
if self.N != 1: if self.N != 1:
raise NotImplementedError("Multi-operator Prony not implemented!") raise Exception("Multi-operator Prony not implemented!")
array = np.empty([N, N], dtype="object") array = np.empty([N, N], dtype="object")
new_content = [] new_content = []
@ -502,7 +502,7 @@ class Corr:
correlator or a Corr of same length. correlator or a Corr of same length.
""" """
if self.N != 1: if self.N != 1:
raise ValueError("Only one-dimensional correlators can be safely correlated.") raise Exception("Only one-dimensional correlators can be safely correlated.")
new_content = [] new_content = []
for x0, t_slice in enumerate(self.content): for x0, t_slice in enumerate(self.content):
if _check_for_none(self, t_slice): if _check_for_none(self, t_slice):
@ -516,7 +516,7 @@ class Corr:
elif isinstance(partner, Obs): # Should this include CObs? elif isinstance(partner, Obs): # Should this include CObs?
new_content.append(np.array([correlate(o, partner) for o in t_slice])) new_content.append(np.array([correlate(o, partner) for o in t_slice]))
else: else:
raise TypeError("Can only correlate with an Obs or a Corr.") raise Exception("Can only correlate with an Obs or a Corr.")
return Corr(new_content) return Corr(new_content)
@ -583,7 +583,7 @@ class Corr:
Available choice: symmetric, forward, backward, improved, log, default: symmetric Available choice: symmetric, forward, backward, improved, log, default: symmetric
""" """
if self.N != 1: if self.N != 1:
raise ValueError("deriv only implemented for one-dimensional correlators.") raise Exception("deriv only implemented for one-dimensional correlators.")
if variant == "symmetric": if variant == "symmetric":
newcontent = [] newcontent = []
for t in range(1, self.T - 1): for t in range(1, self.T - 1):
@ -592,7 +592,7 @@ class Corr:
else: else:
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1])) newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError('Derivative is undefined at all timeslices') raise Exception('Derivative is undefined at all timeslices')
return Corr(newcontent, padding=[1, 1]) return Corr(newcontent, padding=[1, 1])
elif variant == "forward": elif variant == "forward":
newcontent = [] newcontent = []
@ -602,7 +602,7 @@ class Corr:
else: else:
newcontent.append(self.content[t + 1] - self.content[t]) newcontent.append(self.content[t + 1] - self.content[t])
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices") raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[0, 1]) return Corr(newcontent, padding=[0, 1])
elif variant == "backward": elif variant == "backward":
newcontent = [] newcontent = []
@ -612,7 +612,7 @@ class Corr:
else: else:
newcontent.append(self.content[t] - self.content[t - 1]) newcontent.append(self.content[t] - self.content[t - 1])
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices") raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[1, 0]) return Corr(newcontent, padding=[1, 0])
elif variant == "improved": elif variant == "improved":
newcontent = [] newcontent = []
@ -622,7 +622,7 @@ class Corr:
else: else:
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2])) newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError('Derivative is undefined at all timeslices') raise Exception('Derivative is undefined at all timeslices')
return Corr(newcontent, padding=[2, 2]) return Corr(newcontent, padding=[2, 2])
elif variant == 'log': elif variant == 'log':
newcontent = [] newcontent = []
@ -632,11 +632,11 @@ class Corr:
else: else:
newcontent.append(np.log(self.content[t])) newcontent.append(np.log(self.content[t]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Log is undefined at all timeslices") raise Exception("Log is undefined at all timeslices")
logcorr = Corr(newcontent) logcorr = Corr(newcontent)
return self * logcorr.deriv('symmetric') return self * logcorr.deriv('symmetric')
else: else:
raise ValueError("Unknown variant.") raise Exception("Unknown variant.")
def second_deriv(self, variant="symmetric"): def second_deriv(self, variant="symmetric"):
r"""Return the second derivative of the correlator with respect to x0. r"""Return the second derivative of the correlator with respect to x0.
@ -656,7 +656,7 @@ class Corr:
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$ $$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
""" """
if self.N != 1: if self.N != 1:
raise ValueError("second_deriv only implemented for one-dimensional correlators.") raise Exception("second_deriv only implemented for one-dimensional correlators.")
if variant == "symmetric": if variant == "symmetric":
newcontent = [] newcontent = []
for t in range(1, self.T - 1): for t in range(1, self.T - 1):
@ -665,7 +665,7 @@ class Corr:
else: else:
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1])) newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices") raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[1, 1]) return Corr(newcontent, padding=[1, 1])
elif variant == "big_symmetric": elif variant == "big_symmetric":
newcontent = [] newcontent = []
@ -675,7 +675,7 @@ class Corr:
else: else:
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4) newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices") raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[2, 2]) return Corr(newcontent, padding=[2, 2])
elif variant == "improved": elif variant == "improved":
newcontent = [] newcontent = []
@ -685,7 +685,7 @@ class Corr:
else: else:
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2])) newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices") raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[2, 2]) return Corr(newcontent, padding=[2, 2])
elif variant == 'log': elif variant == 'log':
newcontent = [] newcontent = []
@ -695,11 +695,11 @@ class Corr:
else: else:
newcontent.append(np.log(self.content[t])) newcontent.append(np.log(self.content[t]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("Log is undefined at all timeslices") raise Exception("Log is undefined at all timeslices")
logcorr = Corr(newcontent) logcorr = Corr(newcontent)
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2) return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
else: else:
raise ValueError("Unknown variant.") raise Exception("Unknown variant.")
def m_eff(self, variant='log', guess=1.0): def m_eff(self, variant='log', guess=1.0):
"""Returns the effective mass of the correlator as correlator object """Returns the effective mass of the correlator as correlator object
@ -728,7 +728,7 @@ class Corr:
else: else:
newcontent.append(self.content[t] / self.content[t + 1]) newcontent.append(self.content[t] / self.content[t + 1])
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError('m_eff is undefined at all timeslices') raise Exception('m_eff is undefined at all timeslices')
return np.log(Corr(newcontent, padding=[0, 1])) return np.log(Corr(newcontent, padding=[0, 1]))
@ -742,7 +742,7 @@ class Corr:
else: else:
newcontent.append(self.content[t - 1] / self.content[t + 1]) newcontent.append(self.content[t - 1] / self.content[t + 1])
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError('m_eff is undefined at all timeslices') raise Exception('m_eff is undefined at all timeslices')
return np.log(Corr(newcontent, padding=[1, 1])) / 2 return np.log(Corr(newcontent, padding=[1, 1])) / 2
@ -767,7 +767,7 @@ class Corr:
else: else:
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess))) newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError('m_eff is undefined at all timeslices') raise Exception('m_eff is undefined at all timeslices')
return Corr(newcontent, padding=[0, 1]) return Corr(newcontent, padding=[0, 1])
@ -779,11 +779,11 @@ class Corr:
else: else:
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t])) newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
if (all([x is None for x in newcontent])): if (all([x is None for x in newcontent])):
raise ValueError("m_eff is undefined at all timeslices") raise Exception("m_eff is undefined at all timeslices")
return np.arccosh(Corr(newcontent, padding=[1, 1])) return np.arccosh(Corr(newcontent, padding=[1, 1]))
else: else:
raise ValueError('Unknown variant.') raise Exception('Unknown variant.')
def fit(self, function, fitrange=None, silent=False, **kwargs): def fit(self, function, fitrange=None, silent=False, **kwargs):
r'''Fits function to the data r'''Fits function to the data
@ -801,7 +801,7 @@ class Corr:
Decides whether output is printed to the standard output. Decides whether output is printed to the standard output.
''' '''
if self.N != 1: if self.N != 1:
raise ValueError("Correlator must be projected before fitting") raise Exception("Correlator must be projected before fitting")
if fitrange is None: if fitrange is None:
if self.prange: if self.prange:
@ -810,12 +810,12 @@ class Corr:
fitrange = [0, self.T - 1] fitrange = [0, self.T - 1]
else: else:
if not isinstance(fitrange, list): if not isinstance(fitrange, list):
raise TypeError("fitrange has to be a list with two elements") raise Exception("fitrange has to be a list with two elements")
if len(fitrange) != 2: if len(fitrange) != 2:
raise ValueError("fitrange has to have exactly two elements [fit_start, fit_stop]") raise Exception("fitrange has to have exactly two elements [fit_start, fit_stop]")
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None]) xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None]) ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
result = least_squares(xs, ys, function, silent=silent, **kwargs) result = least_squares(xs, ys, function, silent=silent, **kwargs)
return result return result
@ -840,9 +840,9 @@ class Corr:
else: else:
raise Exception("no plateau range provided") raise Exception("no plateau range provided")
if self.N != 1: if self.N != 1:
raise ValueError("Correlator must be projected before getting a plateau.") raise Exception("Correlator must be projected before getting a plateau.")
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])): if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
raise ValueError("plateau is undefined at all timeslices in plateaurange.") raise Exception("plateau is undefined at all timeslices in plateaurange.")
if auto_gamma: if auto_gamma:
self.gamma_method() self.gamma_method()
if method == "fit": if method == "fit":
@ -854,16 +854,16 @@ class Corr:
return returnvalue return returnvalue
else: else:
raise ValueError("Unsupported plateau method: " + method) raise Exception("Unsupported plateau method: " + method)
def set_prange(self, prange): def set_prange(self, prange):
"""Sets the attribute prange of the Corr object.""" """Sets the attribute prange of the Corr object."""
if not len(prange) == 2: if not len(prange) == 2:
raise ValueError("prange must be a list or array with two values") raise Exception("prange must be a list or array with two values")
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))): if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
raise TypeError("Start and end point must be integers") raise Exception("Start and end point must be integers")
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]): if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
raise ValueError("Start and end point must define a range in the interval 0,T") raise Exception("Start and end point must define a range in the interval 0,T")
self.prange = prange self.prange = prange
return return
@ -900,7 +900,7 @@ class Corr:
Optional title of the figure. Optional title of the figure.
""" """
if self.N != 1: if self.N != 1:
raise ValueError("Correlator must be projected before plotting") raise Exception("Correlator must be projected before plotting")
if auto_gamma: if auto_gamma:
self.gamma_method() self.gamma_method()
@ -941,7 +941,7 @@ class Corr:
hide_from = None hide_from = None
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor']) ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
else: else:
raise TypeError("'comp' must be a correlator or a list of correlators.") raise Exception("'comp' must be a correlator or a list of correlators.")
if plateau: if plateau:
if isinstance(plateau, Obs): if isinstance(plateau, Obs):
@ -950,14 +950,14 @@ class Corr:
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau)) ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-') ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
else: else:
raise TypeError("'plateau' must be an Obs") raise Exception("'plateau' must be an Obs")
if references: if references:
if isinstance(references, list): if isinstance(references, list):
for ref in references: for ref in references:
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--') ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
else: else:
raise TypeError("'references' must be a list of floating pint values.") raise Exception("'references' must be a list of floating pint values.")
if self.prange: if self.prange:
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0) ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
@ -991,7 +991,7 @@ class Corr:
if isinstance(save, str): if isinstance(save, str):
fig.savefig(save, bbox_inches='tight') fig.savefig(save, bbox_inches='tight')
else: else:
raise TypeError("'save' has to be a string.") raise Exception("'save' has to be a string.")
def spaghetti_plot(self, logscale=True): def spaghetti_plot(self, logscale=True):
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations. """Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
@ -1002,7 +1002,7 @@ class Corr:
Determines whether the scale of the y-axis is logarithmic or standard. Determines whether the scale of the y-axis is logarithmic or standard.
""" """
if self.N != 1: if self.N != 1:
raise ValueError("Correlator needs to be projected first.") raise Exception("Correlator needs to be projected first.")
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist])) mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None] x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
@ -1044,7 +1044,7 @@ class Corr:
elif datatype == "pickle": elif datatype == "pickle":
dump_object(self, filename, **kwargs) dump_object(self, filename, **kwargs)
else: else:
raise ValueError("Unknown datatype " + str(datatype)) raise Exception("Unknown datatype " + str(datatype))
def print(self, print_range=None): def print(self, print_range=None):
print(self.__repr__(print_range)) print(self.__repr__(print_range))
@ -1094,7 +1094,7 @@ class Corr:
def __add__(self, y): def __add__(self, y):
if isinstance(y, Corr): if isinstance(y, Corr):
if ((self.N != y.N) or (self.T != y.T)): if ((self.N != y.N) or (self.T != y.T)):
raise ValueError("Addition of Corrs with different shape") raise Exception("Addition of Corrs with different shape")
newcontent = [] newcontent = []
for t in range(self.T): for t in range(self.T):
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
@ -1122,7 +1122,7 @@ class Corr:
def __mul__(self, y): def __mul__(self, y):
if isinstance(y, Corr): if isinstance(y, Corr):
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T") raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
newcontent = [] newcontent = []
for t in range(self.T): for t in range(self.T):
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
@ -1193,7 +1193,7 @@ class Corr:
def __truediv__(self, y): def __truediv__(self, y):
if isinstance(y, Corr): if isinstance(y, Corr):
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T): if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T") raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
newcontent = [] newcontent = []
for t in range(self.T): for t in range(self.T):
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
@ -1207,16 +1207,16 @@ class Corr:
newcontent[t] = None newcontent[t] = None
if all([item is None for item in newcontent]): if all([item is None for item in newcontent]):
raise ValueError("Division returns completely undefined correlator") raise Exception("Division returns completely undefined correlator")
return Corr(newcontent) return Corr(newcontent)
elif isinstance(y, (Obs, CObs)): elif isinstance(y, (Obs, CObs)):
if isinstance(y, Obs): if isinstance(y, Obs):
if y.value == 0: if y.value == 0:
raise ValueError('Division by zero will return undefined correlator') raise Exception('Division by zero will return undefined correlator')
if isinstance(y, CObs): if isinstance(y, CObs):
if y.is_zero(): if y.is_zero():
raise ValueError('Division by zero will return undefined correlator') raise Exception('Division by zero will return undefined correlator')
newcontent = [] newcontent = []
for t in range(self.T): for t in range(self.T):
@ -1228,7 +1228,7 @@ class Corr:
elif isinstance(y, (int, float)): elif isinstance(y, (int, float)):
if y == 0: if y == 0:
raise ValueError('Division by zero will return undefined correlator') raise Exception('Division by zero will return undefined correlator')
newcontent = [] newcontent = []
for t in range(self.T): for t in range(self.T):
if _check_for_none(self, self.content[t]): if _check_for_none(self, self.content[t]):
@ -1284,7 +1284,7 @@ class Corr:
if np.isnan(tmp_sum.value): if np.isnan(tmp_sum.value):
newcontent[t] = None newcontent[t] = None
if all([item is None for item in newcontent]): if all([item is None for item in newcontent]):
raise ValueError('Operation returns undefined correlator') raise Exception('Operation returns undefined correlator')
return Corr(newcontent) return Corr(newcontent)
def sin(self): def sin(self):
@ -1392,13 +1392,13 @@ class Corr:
''' '''
if self.N == 1: if self.N == 1:
raise ValueError('Method cannot be applied to one-dimensional correlators.') raise Exception('Method cannot be applied to one-dimensional correlators.')
if basematrix is None: if basematrix is None:
basematrix = self basematrix = self
if Ntrunc >= basematrix.N: if Ntrunc >= basematrix.N:
raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N)) raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
if basematrix.N != self.N: if basematrix.N != self.N:
raise ValueError('basematrix and targetmatrix have to be of the same size.') raise Exception('basematrix and targetmatrix have to be of the same size.')
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc] evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]

View file

@ -34,7 +34,7 @@ def epsilon_tensor(i, j, k):
""" """
test_set = set((i, j, k)) test_set = set((i, j, k))
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))): if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
raise ValueError("Unexpected input", i, j, k) raise Exception("Unexpected input", i, j, k)
return (i - j) * (j - k) * (k - i) / 2 return (i - j) * (j - k) * (k - i) / 2
@ -52,7 +52,7 @@ def epsilon_tensor_rank4(i, j, k, o):
""" """
test_set = set((i, j, k, o)) test_set = set((i, j, k, o))
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))): if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
raise ValueError("Unexpected input", i, j, k, o) raise Exception("Unexpected input", i, j, k, o)
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12 return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
@ -92,5 +92,5 @@ def Grid_gamma(gamma_tag):
elif gamma_tag == 'SigmaZT': elif gamma_tag == 'SigmaZT':
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2]) g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
else: else:
raise ValueError('Unkown gamma structure', gamma_tag) raise Exception('Unkown gamma structure', gamma_tag)
return g return g

View file

@ -14,7 +14,7 @@ from autograd import hessian as auto_hessian
from autograd import elementwise_grad as egrad from autograd import elementwise_grad as egrad
from numdifftools import Jacobian as num_jacobian from numdifftools import Jacobian as num_jacobian
from numdifftools import Hessian as num_hessian from numdifftools import Hessian as num_hessian
from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky from .obs import Obs, derived_observable, covariance, cov_Obs
class Fit_result(Sequence): class Fit_result(Sequence):
@ -151,14 +151,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`. For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix). In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning). This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
inv_chol_cov_matrix [array,list], optional
array: shape = (no of y values) X (no of y values)
list: for an uncombined fit: [""]
for a combined fit: list of keys belonging to the corr_matrix saved in the array, must be the same as the keys of the y dict in alphabetical order
If correlated_fit=True is set as well, can provide an inverse covariance matrix (y errors, dy_f included!) of your own choosing for a correlated fit.
The matrix must be a lower triangular matrix constructed from a Cholesky decomposition: The function invert_corr_cov_cholesky(corr, inverrdiag) can be
used to construct it from a correlation matrix (corr) and the errors dy_f of the data points (inverrdiag = np.diag(1 / np.asarray(dy_f))). For the correct
ordering the correlation matrix (corr) can be sorted via the function sort_corr(corr, kl, yd) where kl is the list of keys and yd the y dict.
expected_chisquare : bool expected_chisquare : bool
If True estimates the expected chisquare which is If True estimates the expected chisquare which is
corrected by effects caused by correlated input data (default False). corrected by effects caused by correlated input data (default False).
@ -173,57 +165,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
------- -------
output : Fit_result output : Fit_result
Parameters and information on the fitted result. Parameters and information on the fitted result.
Examples
------
>>> # Example of a correlated (correlated_fit = True, inv_chol_cov_matrix handed over) combined fit, based on a randomly generated data set
>>> import numpy as np
>>> from scipy.stats import norm
>>> from scipy.linalg import cholesky
>>> import pyerrors as pe
>>> # generating the random data set
>>> num_samples = 400
>>> N = 3
>>> x = np.arange(N)
>>> x1 = norm.rvs(size=(N, num_samples)) # generate random numbers
>>> x2 = norm.rvs(size=(N, num_samples)) # generate random numbers
>>> r = r1 = r2 = np.zeros((N, N))
>>> y = {}
>>> for i in range(N):
>>> for j in range(N):
>>> r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
>>> errl = np.sqrt([3.4, 2.5, 3.6]) # set y errors
>>> for i in range(N):
>>> for j in range(N):
>>> r[i, j] *= errl[i] * errl[j] # element in covariance matrix
>>> c = cholesky(r, lower=True)
>>> y = {'a': np.dot(c, x1), 'b': np.dot(c, x2)} # generate y data with the covariance matrix defined
>>> # random data set has been generated, now the dictionaries and the inverse covariance matrix to be handed over are built
>>> x_dict = {}
>>> y_dict = {}
>>> chol_inv_dict = {}
>>> data = []
>>> for key in y.keys():
>>> x_dict[key] = x
>>> for i in range(N):
>>> data.append(pe.Obs([[i + 1 + o for o in y[key][i]]], ['ens'])) # generate y Obs from the y data
>>> [o.gamma_method() for o in data]
>>> corr = pe.covariance(data, correlation=True)
>>> inverrdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
>>> chol_inv = pe.obs.invert_corr_cov_cholesky(corr, inverrdiag) # gives form of the inverse covariance matrix needed for the combined correlated fit below
>>> y_dict = {'a': data[:3], 'b': data[3:]}
>>> # common fit parameter p[0] in combined fit
>>> def fit1(p, x):
>>> return p[0] + p[1] * x
>>> def fit2(p, x):
>>> return p[0] + p[2] * x
>>> fitf_dict = {'a': fit1, 'b':fit2}
>>> fitp_inv_cov_combined_fit = pe.least_squares(x_dict,y_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,['a','b']])
Fit with 3 parameters
Method: Levenberg-Marquardt
`ftol` termination condition is satisfied.
chisquare/d.o.f.: 0.5388013574561786 # random
fit parameters [1.11897846 0.96361162 0.92325319] # random
''' '''
output = Fit_result() output = Fit_result()
@ -256,7 +197,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
if sorted(list(funcd.keys())) != key_ls: if sorted(list(funcd.keys())) != key_ls:
raise ValueError('x and func dictionaries do not contain the same keys.') raise ValueError('x and func dictionaries do not contain the same keys.')
x_all = np.concatenate([np.array(xd[key]).transpose() for key in key_ls]).transpose() x_all = np.concatenate([np.array(xd[key]) for key in key_ls])
y_all = np.concatenate([np.array(yd[key]) for key in key_ls]) y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
y_f = [o.value for o in y_all] y_f = [o.value for o in y_all]
@ -293,7 +234,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
if len(key_ls) > 1: if len(key_ls) > 1:
for key in key_ls: for key in key_ls:
if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape: if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape:
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {np.asarray(yd[key]).shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.") raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
if not silent: if not silent:
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
@ -356,21 +297,15 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
if kwargs.get('correlated_fit') is True: if kwargs.get('correlated_fit') is True:
if 'inv_chol_cov_matrix' in kwargs:
chol_inv = kwargs.get('inv_chol_cov_matrix')
if (chol_inv[0].shape[0] != len(dy_f)):
raise TypeError('The number of columns of the inverse covariance matrix handed over needs to be equal to the number of y errors.')
if (chol_inv[0].shape[0] != chol_inv[0].shape[1]):
raise TypeError('The inverse covariance matrix handed over needs to have the same number of rows as columns.')
if (chol_inv[1] != key_ls):
raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.')
chol_inv = chol_inv[0]
if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))):
raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.')
else:
corr = covariance(y_all, correlation=True, **kwargs) corr = covariance(y_all, correlation=True, **kwargs)
inverrdiag = np.diag(1 / np.asarray(dy_f)) covdiag = np.diag(1 / np.asarray(dy_f))
chol_inv = invert_corr_cov_cholesky(corr, inverrdiag) condn = np.linalg.cond(corr)
if condn > 0.1 / np.finfo(float).eps:
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
if condn > 1e13:
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
chol = np.linalg.cholesky(corr)
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
def general_chisqfunc(p, ivars, pr): def general_chisqfunc(p, ivars, pr):
model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
@ -415,6 +350,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
if kwargs.get('correlated_fit') is True: if kwargs.get('correlated_fit') is True:
def chisqfunc_residuals(p): def chisqfunc_residuals(p):
return general_chisqfunc(p, y_f, p_f) return general_chisqfunc(p, y_f, p_f)

View file

@ -5,11 +5,11 @@ r'''
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object. For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details. See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
''' '''
from . import bdio as bdio from . import bdio
from . import dobs as dobs from . import dobs
from . import hadrons as hadrons from . import hadrons
from . import json as json from . import json
from . import misc as misc from . import misc
from . import openQCD as openQCD from . import openQCD
from . import pandas as pandas from . import pandas
from . import sfcf as sfcf from . import sfcf

View file

@ -79,7 +79,7 @@ def _dict_to_xmlstring_spaces(d, space=' '):
o += space o += space
o += li + '\n' o += li + '\n'
if li.startswith('<') and not cm: if li.startswith('<') and not cm:
if '<%s' % ('/') not in li: if not '<%s' % ('/') in li:
c += 1 c += 1
cm = False cm = False
return o return o
@ -529,8 +529,7 @@ def import_dobs_string(content, full_output=False, separator_insertion=True):
deltas.append(repdeltas) deltas.append(repdeltas)
idl.append(repidl) idl.append(repidl)
obsmeans = [np.average(deltas[j]) for j in range(len(deltas))] res.append(Obs(deltas, obs_names, idl=idl))
res.append(Obs([np.array(deltas[j]) - obsmeans[j] for j in range(len(obsmeans))], obs_names, idl=idl, means=obsmeans))
res[-1]._value = mean[i] res[-1]._value = mean[i]
_check(len(e_names) == ne) _check(len(e_names) == ne)
@ -672,7 +671,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
o += space o += space
o += li + '\n' o += li + '\n'
if li.startswith('<') and not cm: if li.startswith('<') and not cm:
if '<%s' % ('/') not in li: if not '<%s' % ('/') in li:
c += 1 c += 1
cm = False cm = False
return o return o

View file

@ -113,7 +113,7 @@ def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
infos = [] infos = []
for hd5_file in files: for hd5_file in files:
h5file = h5py.File(path + '/' + hd5_file, "r") h5file = h5py.File(path + '/' + hd5_file, "r")
if group + '/' + entry not in h5file: if not group + '/' + entry in h5file:
raise Exception("Entry '" + entry + "' not contained in the files.") raise Exception("Entry '" + entry + "' not contained in the files.")
raw_data = h5file[group + '/' + entry + '/corr'] raw_data = h5file[group + '/' + entry + '/corr']
real_data = raw_data[:].view("complex") real_data = raw_data[:].view("complex")
@ -186,7 +186,7 @@ def _extract_real_arrays(path, files, tree, keys):
for hd5_file in files: for hd5_file in files:
h5file = h5py.File(path + '/' + hd5_file, "r") h5file = h5py.File(path + '/' + hd5_file, "r")
for key in keys: for key in keys:
if tree + '/' + key not in h5file: if not tree + '/' + key in h5file:
raise Exception("Entry '" + key + "' not contained in the files.") raise Exception("Entry '" + key + "' not contained in the files.")
raw_data = h5file[tree + '/' + key + '/data'] raw_data = h5file[tree + '/' + key + '/data']
real_data = raw_data[:].astype(np.double) real_data = raw_data[:].astype(np.double)

View file

@ -133,11 +133,10 @@ def create_json_string(ol, description='', indent=1):
names = [] names = []
idl = [] idl = []
for key, value in obs.idl.items(): for key, value in obs.idl.items():
samples.append(np.array([np.nan] * len(value))) samples.append([np.nan] * len(value))
names.append(key) names.append(key)
idl.append(value) idl.append(value)
my_obs = Obs(samples, names, idl, means=[np.nan for n in names]) my_obs = Obs(samples, names, idl)
my_obs._value = np.nan
my_obs._covobs = obs._covobs my_obs._covobs = obs._covobs
for name in obs._covobs: for name in obs._covobs:
my_obs.names.append(name) my_obs.names.append(name)
@ -332,8 +331,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
cd = _gen_covobsd_from_cdatad(o.get('cdata', {})) cd = _gen_covobsd_from_cdatad(o.get('cdata', {}))
if od: if od:
r_offsets = [np.average([ddi[0] for ddi in di]) for di in od['deltas']] ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
ret = Obs([np.array([ddi[0] for ddi in od['deltas'][i]]) - r_offsets[i] for i in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[0] for ro in r_offsets])
ret._value = values[0] ret._value = values[0]
else: else:
ret = Obs([], [], means=[]) ret = Obs([], [], means=[])
@ -358,8 +356,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
taglist = o.get('tag', layout * [None]) taglist = o.get('tag', layout * [None])
for i in range(layout): for i in range(layout):
if od: if od:
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']]) ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
ret[-1]._value = values[i] ret[-1]._value = values[i]
else: else:
ret.append(Obs([], [], means=[])) ret.append(Obs([], [], means=[]))
@ -386,8 +383,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
taglist = o.get('tag', N * [None]) taglist = o.get('tag', N * [None])
for i in range(N): for i in range(N):
if od: if od:
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']]) ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
ret[-1]._value = values[i] ret[-1]._value = values[i]
else: else:
ret.append(Obs([], [], means=[])) ret.append(Obs([], [], means=[]))

View file

@ -47,7 +47,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
Reweighting factors read Reweighting factors read
""" """
known_oqcd_versions = ['1.4', '1.6', '2.0'] known_oqcd_versions = ['1.4', '1.6', '2.0']
if version not in known_oqcd_versions: if not (version in known_oqcd_versions):
raise Exception('Unknown openQCD version defined!') raise Exception('Unknown openQCD version defined!')
print("Working with openQCD version " + version) print("Working with openQCD version " + version)
if 'postfix' in kwargs: if 'postfix' in kwargs:

View file

@ -127,8 +127,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
check_configs: list[list[int]] check_configs: list[list[int]]
list of list of supposed configs, eg. [range(1,1000)] list of list of supposed configs, eg. [range(1,1000)]
for one replicum with 1000 configs for one replicum with 1000 configs
rep_string: str
Separator of ensemble name and replicum. Example: In "ensAr0", "r" would be the separator string.
Returns Returns
------- -------
result: dict[list[Obs]] result: dict[list[Obs]]
@ -200,9 +199,9 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
else: else:
ens_name = kwargs.get("ens_name") ens_name = kwargs.get("ens_name")
if not appended: if not appended:
new_names = _get_rep_names(ls, ens_name, rep_sep=(kwargs.get('rep_string', 'r'))) new_names = _get_rep_names(ls, ens_name)
else: else:
new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name, rep_sep=(kwargs.get('rep_string', 'r'))) new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name)
new_names = sort_names(new_names) new_names = sort_names(new_names)
idl = [] idl = []
@ -647,22 +646,22 @@ def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
return T, rep_idl, data return T, rep_idl, data
def _get_rep_names(ls, ens_name=None, rep_sep='r'): def _get_rep_names(ls, ens_name=None):
new_names = [] new_names = []
for entry in ls: for entry in ls:
try: try:
idx = entry.index(rep_sep) idx = entry.index('r')
except Exception: except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if ens_name: if ens_name:
new_names.append(ens_name + '|' + entry[idx:]) new_names.append('ens_name' + '|' + entry[idx:])
else: else:
new_names.append(entry[:idx] + '|' + entry[idx:]) new_names.append(entry[:idx] + '|' + entry[idx:])
return new_names return new_names
def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'): def _get_appended_rep_names(ls, prefix, name, ens_name=None):
new_names = [] new_names = []
for exc in ls: for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name): if not fnmatch.fnmatch(exc, prefix + '*.' + name):
@ -671,12 +670,12 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
for entry in ls: for entry in ls:
myentry = entry[:-len(name) - 1] myentry = entry[:-len(name) - 1]
try: try:
idx = myentry.index(rep_sep) idx = myentry.index('r')
except Exception: except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.") raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if ens_name: if ens_name:
new_names.append(ens_name + '|' + entry[idx:]) new_names.append('ens_name' + '|' + entry[idx:])
else: else:
new_names.append(myentry[:idx] + '|' + myentry[idx:]) new_names.append(myentry[:idx] + '|' + myentry[idx:])
return new_names return new_names

View file

@ -82,8 +82,6 @@ class Obs:
raise ValueError('Names are not unique.') raise ValueError('Names are not unique.')
if not all(isinstance(x, str) for x in names): if not all(isinstance(x, str) for x in names):
raise TypeError('All names have to be strings.') raise TypeError('All names have to be strings.')
if len(set([o.split('|')[0] for o in names])) > 1:
raise ValueError('Cannot initialize Obs based on multiple ensembles. Please average separate Obs from each ensemble.')
else: else:
if not isinstance(names[0], str): if not isinstance(names[0], str):
raise TypeError('All names have to be strings.') raise TypeError('All names have to be strings.')
@ -224,7 +222,7 @@ class Obs:
tmp = kwargs.get(kwarg_name) tmp = kwargs.get(kwarg_name)
if isinstance(tmp, (int, float)): if isinstance(tmp, (int, float)):
if tmp < 0: if tmp < 0:
raise ValueError(kwarg_name + ' has to be larger or equal to 0.') raise Exception(kwarg_name + ' has to be larger or equal to 0.')
for e, e_name in enumerate(self.e_names): for e, e_name in enumerate(self.e_names):
getattr(self, kwarg_name)[e_name] = tmp getattr(self, kwarg_name)[e_name] = tmp
else: else:
@ -293,7 +291,7 @@ class Obs:
texp = self.tau_exp[e_name] texp = self.tau_exp[e_name]
# Critical slowing down analysis # Critical slowing down analysis
if w_max // 2 <= 1: if w_max // 2 <= 1:
raise ValueError("Need at least 8 samples for tau_exp error analysis") raise Exception("Need at least 8 samples for tau_exp error analysis")
for n in range(1, w_max // 2): for n in range(1, w_max // 2):
_compute_drho(n + 1) _compute_drho(n + 1)
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
@ -622,7 +620,7 @@ class Obs:
if not hasattr(self, 'e_dvalue'): if not hasattr(self, 'e_dvalue'):
raise Exception('Run the gamma method first.') raise Exception('Run the gamma method first.')
if np.isclose(0.0, self._dvalue, atol=1e-15): if np.isclose(0.0, self._dvalue, atol=1e-15):
raise ValueError('Error is 0.0') raise Exception('Error is 0.0')
labels = self.e_names labels = self.e_names
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
fig1, ax1 = plt.subplots() fig1, ax1 = plt.subplots()
@ -661,7 +659,7 @@ class Obs:
with open(file_name + '.p', 'wb') as fb: with open(file_name + '.p', 'wb') as fb:
pickle.dump(self, fb) pickle.dump(self, fb)
else: else:
raise TypeError("Unknown datatype " + str(datatype)) raise Exception("Unknown datatype " + str(datatype))
def export_jackknife(self): def export_jackknife(self):
"""Export jackknife samples from the Obs """Export jackknife samples from the Obs
@ -678,7 +676,7 @@ class Obs:
""" """
if len(self.names) != 1: if len(self.names) != 1:
raise ValueError("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
name = self.names[0] name = self.names[0]
full_data = self.deltas[name] + self.r_values[name] full_data = self.deltas[name] + self.r_values[name]
@ -713,7 +711,7 @@ class Obs:
should agree with samples from a full bootstrap analysis up to O(1/N). should agree with samples from a full bootstrap analysis up to O(1/N).
""" """
if len(self.names) != 1: if len(self.names) != 1:
raise ValueError("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
name = self.names[0] name = self.names[0]
length = self.N length = self.N
@ -858,12 +856,15 @@ class Obs:
def __pow__(self, y): def __pow__(self, y):
if isinstance(y, Obs): if isinstance(y, Obs):
return derived_observable(lambda x, **kwargs: x[0] ** x[1], [self, y], man_grad=[y.value * self.value ** (y.value - 1), self.value ** y.value * np.log(self.value)]) return derived_observable(lambda x: x[0] ** x[1], [self, y])
else: else:
return derived_observable(lambda x, **kwargs: x[0] ** y, [self], man_grad=[y * self.value ** (y - 1)]) return derived_observable(lambda x: x[0] ** y, [self])
def __rpow__(self, y): def __rpow__(self, y):
return derived_observable(lambda x, **kwargs: y ** x[0], [self], man_grad=[y ** self.value * np.log(y)]) if isinstance(y, Obs):
return derived_observable(lambda x: x[0] ** x[1], [y, self])
else:
return derived_observable(lambda x: y ** x[0], [self])
def __abs__(self): def __abs__(self):
return derived_observable(lambda x: anp.abs(x[0]), [self]) return derived_observable(lambda x: anp.abs(x[0]), [self])
@ -1269,7 +1270,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
if 'man_grad' in kwargs: if 'man_grad' in kwargs:
deriv = np.asarray(kwargs.get('man_grad')) deriv = np.asarray(kwargs.get('man_grad'))
if new_values.shape + data.shape != deriv.shape: if new_values.shape + data.shape != deriv.shape:
raise ValueError('Manual derivative does not have correct shape.') raise Exception('Manual derivative does not have correct shape.')
elif kwargs.get('num_grad') is True: elif kwargs.get('num_grad') is True:
if multi > 0: if multi > 0:
raise Exception('Multi mode currently not supported for numerical derivative') raise Exception('Multi mode currently not supported for numerical derivative')
@ -1335,7 +1336,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
raise ValueError('The same name has been used for deltas and covobs!') raise Exception('The same name has been used for deltas and covobs!')
new_samples = [] new_samples = []
new_means = [] new_means = []
new_idl = [] new_idl = []
@ -1376,7 +1377,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
Has to be a subset of idx_old. Has to be a subset of idx_old.
""" """
if not len(deltas) == len(idx_old): if not len(deltas) == len(idx_old):
raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
if type(idx_old) is range and type(idx_new) is range: if type(idx_old) is range and type(idx_new) is range:
if idx_old == idx_new: if idx_old == idx_new:
return deltas return deltas
@ -1384,7 +1385,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
return deltas return deltas
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
if len(indices) < len(idx_new): if len(indices) < len(idx_new):
raise ValueError('Error in _reduce_deltas: Config of idx_new not in idx_old') raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old')
return np.array(deltas)[indices] return np.array(deltas)[indices]
@ -1406,14 +1407,12 @@ def reweight(weight, obs, **kwargs):
result = [] result = []
for i in range(len(obs)): for i in range(len(obs)):
if len(obs[i].cov_names): if len(obs[i].cov_names):
raise ValueError('Error: Not possible to reweight an Obs that contains covobs!') raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
if not set(obs[i].names).issubset(weight.names): if not set(obs[i].names).issubset(weight.names):
raise ValueError('Error: Ensembles do not fit') raise Exception('Error: Ensembles do not fit')
if len(obs[i].mc_names) > 1 or len(weight.mc_names) > 1:
raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.')
for name in obs[i].names: for name in obs[i].names:
if not set(obs[i].idl[name]).issubset(weight.idl[name]): if not set(obs[i].idl[name]).issubset(weight.idl[name]):
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
new_samples = [] new_samples = []
w_deltas = {} w_deltas = {}
for name in sorted(obs[i].names): for name in sorted(obs[i].names):
@ -1446,21 +1445,18 @@ def correlate(obs_a, obs_b):
----- -----
Keep in mind to only correlate primary observables which have not been reweighted Keep in mind to only correlate primary observables which have not been reweighted
yet. The reweighting has to be applied after correlating the observables. yet. The reweighting has to be applied after correlating the observables.
Only works if a single ensemble is present in the Obs. Currently only works if ensembles are identical (this is not strictly necessary).
Currently only works if ensemble content is identical (this is not strictly necessary).
""" """
if len(obs_a.mc_names) > 1 or len(obs_b.mc_names) > 1:
raise ValueError('Error: Cannot correlate Obs that contain multiple ensembles.')
if sorted(obs_a.names) != sorted(obs_b.names): if sorted(obs_a.names) != sorted(obs_b.names):
raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
if len(obs_a.cov_names) or len(obs_b.cov_names): if len(obs_a.cov_names) or len(obs_b.cov_names):
raise ValueError('Error: Not possible to correlate Obs that contain covobs!') raise Exception('Error: Not possible to correlate Obs that contain covobs!')
for name in obs_a.names: for name in obs_a.names:
if obs_a.shape[name] != obs_b.shape[name]: if obs_a.shape[name] != obs_b.shape[name]:
raise ValueError('Shapes of ensemble', name, 'do not fit') raise Exception('Shapes of ensemble', name, 'do not fit')
if obs_a.idl[name] != obs_b.idl[name]: if obs_a.idl[name] != obs_b.idl[name]:
raise ValueError('idl of ensemble', name, 'do not fit') raise Exception('idl of ensemble', name, 'do not fit')
if obs_a.reweighted is True: if obs_a.reweighted is True:
warnings.warn("The first observable is already reweighted.", RuntimeWarning) warnings.warn("The first observable is already reweighted.", RuntimeWarning)
@ -1548,92 +1544,6 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
return cov return cov
def invert_corr_cov_cholesky(corr, inverrdiag):
"""Constructs a lower triangular matrix `chol` via the Cholesky decomposition of the correlation matrix `corr`
and then returns the inverse covariance matrix `chol_inv` as a lower triangular matrix by solving `chol * x = inverrdiag`.
Parameters
----------
corr : np.ndarray
correlation matrix
inverrdiag : np.ndarray
diagonal matrix, the entries are the inverse errors of the data points considered
"""
condn = np.linalg.cond(corr)
if condn > 0.1 / np.finfo(float).eps:
raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
if condn > 1e13:
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
chol = np.linalg.cholesky(corr)
chol_inv = scipy.linalg.solve_triangular(chol, inverrdiag, lower=True)
return chol_inv
def sort_corr(corr, kl, yd):
""" Reorders a correlation matrix to match the alphabetical order of its underlying y data.
The ordering of the input correlation matrix `corr` is given by the list of keys `kl`.
The input dictionary `yd` (with the same keys `kl`) must contain the corresponding y data
that the correlation matrix is based on.
This function sorts the list of keys `kl` alphabetically and sorts the matrix `corr`
according to this alphabetical order such that the sorted matrix `corr_sorted` corresponds
to the y data `yd` when arranged in an alphabetical order by its keys.
Parameters
----------
corr : np.ndarray
A square correlation matrix constructed using the order of the y data specified by `kl`.
The dimensions of `corr` should match the total number of y data points in `yd` combined.
kl : list of str
A list of keys that denotes the order in which the y data from `yd` was used to build the
input correlation matrix `corr`.
yd : dict of list
A dictionary where each key corresponds to a unique identifier, and its value is a list of
y data points. The total number of y data points across all keys must match the dimensions
of `corr`. The lists in the dictionary can be lists of Obs.
Returns
-------
np.ndarray
A new, sorted correlation matrix that corresponds to the y data from `yd` when arranged alphabetically by its keys.
Example
-------
>>> import numpy as np
>>> import pyerrors as pe
>>> corr = np.array([[1, 0.2, 0.3], [0.2, 1, 0.4], [0.3, 0.4, 1]])
>>> kl = ['b', 'a']
>>> yd = {'a': [1, 2], 'b': [3]}
>>> sorted_corr = pe.obs.sort_corr(corr, kl, yd)
>>> print(sorted_corr)
array([[1. , 0.3, 0.4],
[0.3, 1. , 0.2],
[0.4, 0.2, 1. ]])
"""
kl_sorted = sorted(kl)
posd = {}
ofs = 0
for ki, k in enumerate(kl):
posd[k] = [i + ofs for i in range(len(yd[k]))]
ofs += len(posd[k])
mapping = []
for k in kl_sorted:
for i in range(len(yd[k])):
mapping.append(posd[k][i])
corr_sorted = np.zeros_like(corr)
for i in range(corr.shape[0]):
for j in range(corr.shape[0]):
corr_sorted[i][j] = corr[mapping[i]][mapping[j]]
return corr_sorted
def _smooth_eigenvalues(corr, E): def _smooth_eigenvalues(corr, E):
"""Eigenvalue smoothing as described in hep-lat/9412087 """Eigenvalue smoothing as described in hep-lat/9412087
@ -1643,7 +1553,7 @@ def _smooth_eigenvalues(corr, E):
Number of eigenvalues to be left substantially unchanged Number of eigenvalues to be left substantially unchanged
""" """
if not (2 < E < corr.shape[0] - 1): if not (2 < E < corr.shape[0] - 1):
raise ValueError(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
vals, vec = np.linalg.eigh(corr) vals, vec = np.linalg.eigh(corr)
lambda_min = np.mean(vals[:-E]) lambda_min = np.mean(vals[:-E])
vals[vals < lambda_min] = lambda_min vals[vals < lambda_min] = lambda_min
@ -1762,11 +1672,7 @@ def import_bootstrap(boots, name, random_numbers):
def merge_obs(list_of_obs): def merge_obs(list_of_obs):
"""Combine all observables in list_of_obs into one new observable. """Combine all observables in list_of_obs into one new observable
This allows to merge Obs that have been computed on multiple replica
of the same ensemble.
If you like to merge Obs that are based on several ensembles, please
average them yourself.
Parameters Parameters
---------- ----------
@ -1779,9 +1685,9 @@ def merge_obs(list_of_obs):
""" """
replist = [item for obs in list_of_obs for item in obs.names] replist = [item for obs in list_of_obs for item in obs.names]
if (len(replist) == len(set(replist))) is False: if (len(replist) == len(set(replist))) is False:
raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist))) raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
if any([len(o.cov_names) for o in list_of_obs]): if any([len(o.cov_names) for o in list_of_obs]):
raise ValueError('Not possible to merge data that contains covobs!') raise Exception('Not possible to merge data that contains covobs!')
new_dict = {} new_dict = {}
idl_dict = {} idl_dict = {}
for o in list_of_obs: for o in list_of_obs:
@ -1832,7 +1738,7 @@ def cov_Obs(means, cov, name, grad=None):
for i in range(len(means)): for i in range(len(means)):
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
if ol[0].covobs[name].N != len(means): if ol[0].covobs[name].N != len(means):
raise ValueError('You have to provide %d mean values!' % (ol[0].N)) raise Exception('You have to provide %d mean values!' % (ol[0].N))
if len(ol) == 1: if len(ol) == 1:
return ol[0] return ol[0]
return ol return ol
@ -1848,7 +1754,7 @@ def _determine_gap(o, e_content, e_name):
gap = min(gaps) gap = min(gaps)
if not np.all([gi % gap == 0 for gi in gaps]): if not np.all([gi % gap == 0 for gi in gaps]):
raise ValueError(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
return gap return gap

View file

@ -1 +1 @@
__version__ = "2.15.0-dev" __version__ = "2.12.0"

View file

@ -1,6 +1,3 @@
[build-system] [build-system]
requires = ["setuptools >= 63.0.0", "wheel"] requires = ["setuptools >= 63.0.0", "wheel"]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
[tool.ruff.lint]
ignore = ["F403"]

View file

@ -36,7 +36,6 @@ setup(name='pyerrors',
'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.13',
'Topic :: Scientific/Engineering :: Physics' 'Topic :: Scientific/Engineering :: Physics'
], ],
) )

View file

@ -129,7 +129,7 @@ def test_m_eff():
with pytest.warns(RuntimeWarning): with pytest.warns(RuntimeWarning):
my_corr.m_eff('sinh') my_corr.m_eff('sinh')
with pytest.raises(ValueError): with pytest.raises(Exception):
my_corr.m_eff('unkown_variant') my_corr.m_eff('unkown_variant')
@ -140,7 +140,7 @@ def test_m_eff_negative_values():
assert m_eff_log[padding + 1] is None assert m_eff_log[padding + 1] is None
m_eff_cosh = my_corr.m_eff('cosh') m_eff_cosh = my_corr.m_eff('cosh')
assert m_eff_cosh[padding + 1] is None assert m_eff_cosh[padding + 1] is None
with pytest.raises(ValueError): with pytest.raises(Exception):
my_corr.m_eff('logsym') my_corr.m_eff('logsym')
@ -155,7 +155,7 @@ def test_correlate():
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')]) my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
corr1 = my_corr.correlate(my_corr) corr1 = my_corr.correlate(my_corr)
corr2 = my_corr.correlate(my_corr[0]) corr2 = my_corr.correlate(my_corr[0])
with pytest.raises(TypeError): with pytest.raises(Exception):
corr3 = my_corr.correlate(7.3) corr3 = my_corr.correlate(7.3)
@ -176,9 +176,9 @@ def test_fit_correlator():
assert fit_res[0] == my_corr[0] assert fit_res[0] == my_corr[0]
assert fit_res[1] == my_corr[1] - my_corr[0] assert fit_res[1] == my_corr[1] - my_corr[0]
with pytest.raises(TypeError): with pytest.raises(Exception):
my_corr.fit(f, "from 0 to 3") my_corr.fit(f, "from 0 to 3")
with pytest.raises(ValueError): with pytest.raises(Exception):
my_corr.fit(f, [0, 2, 3]) my_corr.fit(f, [0, 2, 3])
@ -256,11 +256,11 @@ def test_prange():
corr = pe.correlators.Corr(corr_content) corr = pe.correlators.Corr(corr_content)
corr.set_prange([2, 4]) corr.set_prange([2, 4])
with pytest.raises(ValueError): with pytest.raises(Exception):
corr.set_prange([2]) corr.set_prange([2])
with pytest.raises(TypeError): with pytest.raises(Exception):
corr.set_prange([2, 2.3]) corr.set_prange([2, 2.3])
with pytest.raises(ValueError): with pytest.raises(Exception):
corr.set_prange([4, 1]) corr.set_prange([4, 1])

View file

@ -30,7 +30,7 @@ def test_grid_dirac():
'SigmaYZ', 'SigmaYZ',
'SigmaZT']: 'SigmaZT']:
pe.dirac.Grid_gamma(gamma) pe.dirac.Grid_gamma(gamma)
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.dirac.Grid_gamma('Not a gamma matrix') pe.dirac.Grid_gamma('Not a gamma matrix')
@ -44,7 +44,7 @@ def test_epsilon_tensor():
(1, 1, 3) : 0.0} (1, 1, 3) : 0.0}
for key, value in check.items(): for key, value in check.items():
assert pe.dirac.epsilon_tensor(*key) == value assert pe.dirac.epsilon_tensor(*key) == value
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.dirac.epsilon_tensor(0, 1, 3) pe.dirac.epsilon_tensor(0, 1, 3)
@ -59,5 +59,5 @@ def test_epsilon_tensor_rank4():
(1, 2, 3, 1) : 0.0} (1, 2, 3, 1) : 0.0}
for key, value in check.items(): for key, value in check.items():
assert pe.dirac.epsilon_tensor_rank4(*key) == value assert pe.dirac.epsilon_tensor_rank4(*key) == value
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4) pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)

View file

@ -152,127 +152,6 @@ def test_alternative_solvers():
chisquare_values = np.array(chisquare_values) chisquare_values = np.array(chisquare_values)
assert np.all(np.isclose(chisquare_values, chisquare_values[0])) assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
def test_inv_cov_matrix_input_least_squares():
num_samples = 400
N = 10
x = norm.rvs(size=(N, num_samples)) # generate random numbers
r = np.zeros((N, N))
for i in range(N):
for j in range(N):
r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
errl = np.sqrt([3.4, 2.5, 3.6, 2.8, 4.2, 4.7, 4.9, 5.1, 3.2, 4.2]) # set y errors
for i in range(N):
for j in range(N):
r[i, j] *= errl[i] * errl[j] # element in covariance matrix
c = cholesky(r, lower=True)
y = np.dot(c, x)
x = np.arange(N)
x_dict = {}
y_dict = {}
for i,item in enumerate(x):
x_dict[str(item)] = [x[i]]
for linear in [True, False]:
data = []
for i in range(N):
if linear:
data.append(pe.Obs([[i + 1 + o for o in y[i]]], ['ens']))
else:
data.append(pe.Obs([[np.exp(-(i + 1)) + np.exp(-(i + 1)) * o for o in y[i]]], ['ens']))
[o.gamma_method() for o in data]
data_dict = {}
for i,item in enumerate(x):
data_dict[str(item)] = [data[i]]
corr = pe.covariance(data, correlation=True)
chol = np.linalg.cholesky(corr)
covdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
chol_inv_keys = [""]
chol_inv_keys_combined_fit = [str(item) for i,item in enumerate(x)]
if linear:
def fitf(p, x):
return p[1] + p[0] * x
fitf_dict = {}
for i,item in enumerate(x):
fitf_dict[str(item)] = fitf
else:
def fitf(p, x):
return p[1] * anp.exp(-p[0] * x)
fitf_dict = {}
for i,item in enumerate(x):
fitf_dict[str(item)] = fitf
fitpc = pe.least_squares(x, data, fitf, correlated_fit=True)
fitp_inv_cov = pe.least_squares(x, data, fitf, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys])
fitp_inv_cov_combined_fit = pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys_combined_fit])
for i in range(2):
diff_inv_cov = fitp_inv_cov[i] - fitpc[i]
diff_inv_cov.gamma_method()
assert(diff_inv_cov.is_zero(atol=0.0))
diff_inv_cov_combined_fit = fitp_inv_cov_combined_fit[i] - fitpc[i]
diff_inv_cov_combined_fit.gamma_method()
assert(diff_inv_cov_combined_fit.is_zero(atol=1e-12))
with pytest.raises(ValueError):
pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [corr,chol_inv_keys_combined_fit])
def test_least_squares_invalid_inv_cov_matrix_input():
xvals = []
yvals = []
err = 0.1
def func_valid(a,x):
return a[0] + a[1] * x
for x in range(1, 8, 2):
xvals.append(x)
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
[o.gamma_method() for o in yvals]
#dictionaries for a combined fit
xvals_dict = { }
yvals_dict = { }
for i,item in enumerate(np.arange(1, 8, 2)):
xvals_dict[str(item)] = [xvals[i]]
yvals_dict[str(item)] = [yvals[i]]
chol_inv_keys_combined_fit = ['1', '3', '5', '7']
chol_inv_keys_combined_fit_invalid = ['2', '7', '100', '8']
func_dict_valid = {"1": func_valid,"3": func_valid,"5": func_valid,"7": func_valid}
corr_valid = pe.covariance(yvals, correlation = True)
chol = np.linalg.cholesky(corr_valid)
covdiag = np.diag(1 / np.asarray([o.dvalue for o in yvals]))
chol_inv_valid = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
chol_inv_keys = [""]
pe.least_squares(xvals, yvals,func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys])
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit])
chol_inv_invalid_shape1 = np.zeros((len(yvals),len(yvals)-1))
chol_inv_invalid_shape2 = np.zeros((len(yvals)+2,len(yvals)))
# for an uncombined fit
with pytest.raises(TypeError):
pe.least_squares(xvals, yvals, func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys])
with pytest.raises(TypeError):
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys])
with pytest.raises(ValueError):
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
#repeat for a combined fit
with pytest.raises(TypeError):
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys_combined_fit])
with pytest.raises(TypeError):
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys_combined_fit])
with pytest.raises(ValueError):
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
def test_correlated_fit(): def test_correlated_fit():
num_samples = 400 num_samples = 400
@ -1085,20 +964,6 @@ def test_combined_resplot_qqplot():
fr = pe.least_squares(xd, yd, fd, resplot=True, qqplot=True) fr = pe.least_squares(xd, yd, fd, resplot=True, qqplot=True)
plt.close('all') plt.close('all')
def test_combined_fit_xerr():
fitd = {
'a' : lambda p, x: p[0] * x[0] + p[1] * x[1],
'b' : lambda p, x: p[0] * x[0] + p[2] * x[1],
'c' : lambda p, x: p[0] * x[0] + p[3] * x[1],
}
yd = {
'a': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'a' + str(i)) for i in range(5)],
'b': [pe.cov_Obs(1 + .1 * np.random.uniform(), .1**2, 'b' + str(i)) for i in range(6)],
'c': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'c' + str(i)) for i in range(3)],
}
xd = {k: np.transpose([[1 + .01 * np.random.uniform(), 2] for i in range(len(yd[k]))]) for k in fitd}
pe.fits.least_squares(xd, yd, fitd)
def test_x_multidim_fit(): def test_x_multidim_fit():
x1 = np.arange(1, 10) x1 = np.arange(1, 10)

View file

@ -12,7 +12,7 @@ def test_jsonio():
o = pe.pseudo_Obs(1.0, .2, 'one') o = pe.pseudo_Obs(1.0, .2, 'one')
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1') o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2') o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)]) o4 = pe.merge_obs([o2, o3])
otag = 'This has been merged!' otag = 'This has been merged!'
o4.tag = otag o4.tag = otag
do = o - .2 * o4 do = o - .2 * o4
@ -101,8 +101,8 @@ def test_json_string_reconstruction():
def test_json_corr_io(): def test_json_corr_io():
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]) for o in range(8)] my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']) for o in range(8)]
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]), my_list) rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']), my_list)
for obs_list in [my_list, rw_list]: for obs_list in [my_list, rw_list]:
for tag in [None, "test"]: for tag in [None, "test"]:
@ -111,8 +111,7 @@ def test_json_corr_io():
for corr_tag in [None, 'my_Corr_tag']: for corr_tag in [None, 'my_Corr_tag']:
for prange in [None, [3, 6]]: for prange in [None, [3, 6]]:
for gap in [False, True]: for gap in [False, True]:
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]: my_corr = pe.Corr(obs_list, padding=[pad, pad], prange=prange)
my_corr = mult * pe.Corr(obs_list, padding=[pad, pad], prange=prange)
my_corr.tag = corr_tag my_corr.tag = corr_tag
if gap: if gap:
my_corr.content[4] = None my_corr.content[4] = None
@ -129,23 +128,13 @@ def test_json_corr_io():
def test_json_corr_2d_io(): def test_json_corr_2d_io():
obs_list = [np.array([ obs_list = [np.array([[pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test')], [pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test')]]) for i in range(4)]
[
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321)]),
],
[
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321),]),
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
],
]) for i in range(4)]
for tag in [None, "test"]: for tag in [None, "test"]:
obs_list[3][0, 1].tag = tag obs_list[3][0, 1].tag = tag
for padding in [0, 1]: for padding in [0, 1]:
for prange in [None, [3, 6]]: for prange in [None, [3, 6]]:
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]: my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange)
my_corr = mult * pe.Corr(obs_list, padding=[padding, padding], prange=prange)
my_corr.tag = tag my_corr.tag = tag
pe.input.json.dump_to_json(my_corr, 'corr') pe.input.json.dump_to_json(my_corr, 'corr')
recover = pe.input.json.load_json('corr') recover = pe.input.json.load_json('corr')
@ -222,7 +211,6 @@ def test_json_dict_io():
'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'), 'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'),
'se': None, 'se': None,
'sf': 1.2, 'sf': 1.2,
'k': pe.cov_Obs(.1, .001**2, 'cov') * pe.merge_obs([pe.pseudo_Obs(1.0, 0.1, 'test|r2'), pe.pseudo_Obs(1.0, 0.1, 'test|r1', samples=321)]),
} }
} }
@ -326,7 +314,7 @@ def test_dobsio():
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1') o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2') o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)]) o4 = pe.merge_obs([o2, o3])
otag = 'This has been merged!' otag = 'This has been merged!'
o4.tag = otag o4.tag = otag
do = o - .2 * o4 do = o - .2 * o4
@ -340,7 +328,7 @@ def test_dobsio():
o5 /= co2[0] o5 /= co2[0]
o5.tag = 2 * otag o5.tag = 2 * otag
tt1 = pe.Obs([np.random.rand(100), np.random.rand(102)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 226, 2)]) tt1 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 222, 2)])
tt3 = pe.Obs([np.random.rand(102)], ['qe|r1']) tt3 = pe.Obs([np.random.rand(102)], ['qe|r1'])
tt = tt1 + tt3 tt = tt1 + tt3
@ -349,7 +337,7 @@ def test_dobsio():
tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)]) tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)])
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt), o4.reweight(o4)] ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt)]
print(ol) print(ol)
fname = 'test_rw' fname = 'test_rw'
@ -374,12 +362,9 @@ def test_dobsio():
def test_reconstruct_non_linear_r_obs(tmp_path): def test_reconstruct_non_linear_r_obs(tmp_path):
to = ( to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
pe.Obs([np.random.rand(500), np.random.rand(1200)], ["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
["e|r1", "e|r2", ], idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
idl=[range(1, 501), range(0, 1200)])
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
)
to = np.log(to ** 2) / to to = np.log(to ** 2) / to
to.dump((tmp_path / "test_equality").as_posix()) to.dump((tmp_path / "test_equality").as_posix())
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix()) ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
@ -387,12 +372,9 @@ def test_reconstruct_non_linear_r_obs(tmp_path):
def test_reconstruct_non_linear_r_obs_list(tmp_path): def test_reconstruct_non_linear_r_obs_list(tmp_path):
to = ( to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
pe.Obs([np.random.rand(500), np.random.rand(1200)], ["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
["e|r1", "e|r2", ], idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
idl=[range(1, 501), range(0, 1200)])
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
)
to = np.log(to ** 2) / to to = np.log(to ** 2) / to
for to_list in [[to, to, to], np.array([to, to, to])]: for to_list in [[to, to, to], np.array([to, to, to])]:
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix()) pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())

View file

@ -34,7 +34,7 @@ def test_matmul():
my_list = [] my_list = []
length = 100 + np.random.randint(200) length = 100 + np.random.randint(200)
for i in range(dim ** 2): for i in range(dim ** 2):
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])) my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
my_array = const * np.array(my_list).reshape((dim, dim)) my_array = const * np.array(my_list).reshape((dim, dim))
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
for t, e in np.ndenumerate(tt): for t, e in np.ndenumerate(tt):
@ -43,8 +43,8 @@ def test_matmul():
my_list = [] my_list = []
length = 100 + np.random.randint(200) length = 100 + np.random.randint(200)
for i in range(dim ** 2): for i in range(dim ** 2):
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']), my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))) pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
my_array = np.array(my_list).reshape((dim, dim)) * const my_array = np.array(my_list).reshape((dim, dim)) * const
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
for t, e in np.ndenumerate(tt): for t, e in np.ndenumerate(tt):
@ -151,7 +151,7 @@ def test_multi_dot():
my_list = [] my_list = []
length = 1000 + np.random.randint(200) length = 1000 + np.random.randint(200)
for i in range(dim ** 2): for i in range(dim ** 2):
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])) my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim)) my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim))
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
for t, e in np.ndenumerate(tt): for t, e in np.ndenumerate(tt):
@ -160,8 +160,8 @@ def test_multi_dot():
my_list = [] my_list = []
length = 1000 + np.random.randint(200) length = 1000 + np.random.randint(200)
for i in range(dim ** 2): for i in range(dim ** 2):
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']), my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))) pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov')
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
for t, e in np.ndenumerate(tt): for t, e in np.ndenumerate(tt):
@ -209,7 +209,7 @@ def test_irregular_matrix_inverse():
for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]: for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]:
irregular_array = [] irregular_array = []
for i in range(dim ** 2): for i in range(dim ** 2):
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl]) + pe.Obs([np.random.normal(0.25, 0.1, 10)], ['ens2'], idl=[range(1, 11)])) irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)]))
irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23') irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23')
invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T
@ -276,10 +276,10 @@ def test_matrix_functions():
for (i, j), entry in np.ndenumerate(check_inv): for (i, j), entry in np.ndenumerate(check_inv):
entry.gamma_method() entry.gamma_method()
if(i == j): if(i == j):
assert math.isclose(entry.value, 1.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value) assert math.isclose(entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
else: else:
assert math.isclose(entry.value, 0.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value) assert math.isclose(entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
assert math.isclose(entry.dvalue, 0.0, abs_tol=2e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue) assert math.isclose(entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
# Check Cholesky decomposition # Check Cholesky decomposition
sym = np.dot(matrix, matrix.T) sym = np.dot(matrix, matrix.T)

View file

@ -61,9 +61,9 @@ def test_Obs_exceptions():
my_obs.plot_rep_dist() my_obs.plot_rep_dist()
with pytest.raises(Exception): with pytest.raises(Exception):
my_obs.plot_piechart() my_obs.plot_piechart()
with pytest.raises(TypeError): with pytest.raises(Exception):
my_obs.gamma_method(S='2.3') my_obs.gamma_method(S='2.3')
with pytest.raises(ValueError): with pytest.raises(Exception):
my_obs.gamma_method(tau_exp=2.3) my_obs.gamma_method(tau_exp=2.3)
my_obs.gamma_method() my_obs.gamma_method()
my_obs.details() my_obs.details()
@ -199,7 +199,7 @@ def test_gamma_method_no_windowing():
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue) assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
obs.gamma_method(S=1.1) obs.gamma_method(S=1.1)
assert obs.e_tauint['ens'] > 0.5 assert obs.e_tauint['ens'] > 0.5
with pytest.raises(ValueError): with pytest.raises(Exception):
obs.gamma_method(S=-0.2) obs.gamma_method(S=-0.2)
@ -333,7 +333,7 @@ def test_derived_observables():
def test_multi_ens(): def test_multi_ens():
names = ['A0', 'A1|r001', 'A1|r002'] names = ['A0', 'A1|r001', 'A1|r002']
test_obs = pe.Obs([np.random.rand(50)], names[:1]) + pe.Obs([np.random.rand(50), np.random.rand(50)], names[1:]) test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
assert test_obs.e_names == ['A0', 'A1'] assert test_obs.e_names == ['A0', 'A1']
assert test_obs.e_content['A0'] == ['A0'] assert test_obs.e_content['A0'] == ['A0']
assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002'] assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002']
@ -345,9 +345,6 @@ def test_multi_ens():
ensembles.append(str(i)) ensembles.append(str(i))
assert my_sum.e_names == sorted(ensembles) assert my_sum.e_names == sorted(ensembles)
with pytest.raises(ValueError):
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
def test_multi_ens2(): def test_multi_ens2():
names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh'] names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh']
@ -464,18 +461,6 @@ def test_cobs_overloading():
obs / cobs obs / cobs
def test_pow():
data = [1, 2.341, pe.pseudo_Obs(4.8, 0.48, "test_obs"), pe.cov_Obs(1.1, 0.3 ** 2, "test_cov_obs")]
for d in data:
assert d * d == d ** 2
assert d * d * d == d ** 3
for d2 in data:
assert np.log(d ** d2) == d2 * np.log(d)
assert (d ** d2) ** (1 / d2) == d
def test_reweighting(): def test_reweighting():
my_obs = pe.Obs([np.random.rand(1000)], ['t']) my_obs = pe.Obs([np.random.rand(1000)], ['t'])
assert not my_obs.reweighted assert not my_obs.reweighted
@ -493,33 +478,26 @@ def test_reweighting():
r_obs2 = r_obs[0] * my_obs r_obs2 = r_obs[0] * my_obs
assert r_obs2.reweighted assert r_obs2.reweighted
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov') my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.reweight(my_obs, [my_covobs]) pe.reweight(my_obs, [my_covobs])
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2']) my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.reweight(my_obs, [my_obs + my_obs2]) pe.reweight(my_obs, [my_obs + my_obs2])
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.reweight(my_irregular_obs, [my_obs]) pe.reweight(my_irregular_obs, [my_obs])
my_merged_obs = my_obs + pe.Obs([np.random.rand(1000)], ['q'])
with pytest.raises(ValueError):
pe.reweight(my_merged_obs, [my_merged_obs])
def test_merge_obs(): def test_merge_obs():
my_obs1 = pe.Obs([np.random.normal(1, .1, 100)], ['t|1']) my_obs1 = pe.Obs([np.random.rand(100)], ['t'])
my_obs2 = pe.Obs([np.random.normal(1, .1, 100)], ['t|2'], idl=[range(1, 200, 2)]) my_obs2 = pe.Obs([np.random.rand(100)], ['q'], idl=[range(1, 200, 2)])
merged = pe.merge_obs([my_obs1, my_obs2]) merged = pe.merge_obs([my_obs1, my_obs2])
diff = merged - (my_obs2 + my_obs1) / 2 diff = merged - my_obs2 - my_obs1
assert np.isclose(0, diff.value, atol=1e-16) assert diff == -(my_obs1.value + my_obs2.value) / 2
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.merge_obs([my_obs1, my_obs1]) pe.merge_obs([my_obs1, my_obs1])
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov') my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.merge_obs([my_obs1, my_covobs]) pe.merge_obs([my_obs1, my_covobs])
my_obs3 = pe.Obs([np.random.rand(100)], ['q|2'], idl=[range(1, 200, 2)])
with pytest.raises(ValueError):
pe.merge_obs([my_obs1, my_obs3])
@ -541,26 +519,23 @@ def test_correlate():
assert corr1 == corr2 assert corr1 == corr2
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)]) my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.correlate(my_obs1, my_obs3) pe.correlate(my_obs1, my_obs3)
my_obs4 = pe.Obs([np.random.rand(99)], ['t']) my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.correlate(my_obs1, my_obs4) pe.correlate(my_obs1, my_obs4)
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)]) my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)]) my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
corr3 = pe.correlate(my_obs5, my_obs6) corr3 = pe.correlate(my_obs5, my_obs6)
assert my_obs5.idl == corr3.idl assert my_obs5.idl == corr3.idl
my_obs7 = pe.Obs([np.random.rand(99)], ['q'])
with pytest.raises(ValueError):
pe.correlate(my_obs1, my_obs7)
my_new_obs = pe.Obs([np.random.rand(100)], ['q3']) my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.correlate(my_obs1, my_new_obs) pe.correlate(my_obs1, my_new_obs)
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov') my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError): with pytest.raises(Exception):
pe.correlate(my_covobs, my_covobs) pe.correlate(my_covobs, my_covobs)
r_obs = pe.reweight(my_obs1, [my_obs1])[0] r_obs = pe.reweight(my_obs1, [my_obs1])[0]
with pytest.warns(RuntimeWarning): with pytest.warns(RuntimeWarning):
@ -579,11 +554,11 @@ def test_merge_idx():
for j in range(5): for j in range(5):
idll = [range(1, int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)] idll = [range(1, int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll)) assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
for j in range(5): for j in range(5):
idll = [range(int(round(np.random.uniform(1, 28))), int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)] idll = [range(int(round(np.random.uniform(1, 28))), int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll)) assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
idl = [list(np.arange(1, 14)) + list(range(16, 100, 4)), range(4, 604, 4), [2, 4, 5, 6, 8, 9, 12, 24], range(1, 20, 1), range(50, 789, 7)] idl = [list(np.arange(1, 14)) + list(range(16, 100, 4)), range(4, 604, 4), [2, 4, 5, 6, 8, 9, 12, 24], range(1, 20, 1), range(50, 789, 7)]
new_idx = pe.obs._merge_idx(idl) new_idx = pe.obs._merge_idx(idl)
@ -694,14 +669,14 @@ def test_gamma_method_irregular():
assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue) assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue)
arr2 = np.random.normal(1, .2, size=N) arr2 = np.random.normal(1, .2, size=N)
afull = pe.Obs([arr], ['a1']) + pe.Obs([arr2], ['a2']) afull = pe.Obs([arr, arr2], ['a1', 'a2'])
configs = np.ones_like(arr2) configs = np.ones_like(arr2)
for i in np.random.uniform(0, len(arr2), size=int(.8*N)): for i in np.random.uniform(0, len(arr2), size=int(.8*N)):
configs[int(i)] = 0 configs[int(i)] = 0
zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0] zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0]
idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1] idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1]
a = pe.Obs([zero_arr], ['a1'], idl=[idx]) + pe.Obs([zero_arr2], ['a2'], idl=[idx2]) a = pe.Obs([zero_arr, zero_arr2], ['a1', 'a2'], idl=[idx, idx2])
afull.gamma_method() afull.gamma_method()
a.gamma_method() a.gamma_method()
@ -787,7 +762,7 @@ def test_gamma_method_irregular():
my_obs.gm() my_obs.gm()
idl += [range(1, 400, 4)] idl += [range(1, 400, 4)]
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl) my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
with pytest.raises(ValueError): with pytest.raises(Exception):
my_obs.gm() my_obs.gm()
# check cases where tau is large compared to the chain length # check cases where tau is large compared to the chain length
@ -1035,7 +1010,7 @@ def test_correlation_intersection_of_idls():
def test_covariance_non_identical_objects(): def test_covariance_non_identical_objects():
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000)], ["ens|r1", "ens|r2"]) + pe.Obs([np.random.normal(1.0, 0.1, 732)], ['ens2']) obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"])
obs1.gamma_method() obs1.gamma_method()
obs2 = obs1 + 1e-18 obs2 = obs1 + 1e-18
obs2.gamma_method() obs2.gamma_method()
@ -1088,27 +1063,6 @@ def test_covariance_reorder_non_overlapping_data():
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14) assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
def test_sort_corr():
xd = {
'b': [1, 2, 3],
'a': [2.2, 4.4],
'c': [3.7, 5.1]
}
yd = {k : pe.cov_Obs(xd[k], [.2 * o for o in xd[k]], k) for k in xd}
key_orig = list(yd.keys())
y_all = np.concatenate([np.array(yd[key]) for key in key_orig])
[o.gm() for o in y_all]
cov = pe.covariance(y_all)
key_ls = key_sorted = sorted(key_orig)
y_sorted = np.concatenate([np.array(yd[key]) for key in key_sorted])
[o.gm() for o in y_sorted]
cov_sorted = pe.covariance(y_sorted)
retcov = pe.obs.sort_corr(cov, key_orig, yd)
assert np.sum(retcov - cov_sorted) == 0
def test_empty_obs(): def test_empty_obs():
o = pe.Obs([np.random.rand(100)], ['test']) o = pe.Obs([np.random.rand(100)], ['test'])
q = o + pe.Obs([], [], means=[]) q = o + pe.Obs([], [], means=[])
@ -1119,9 +1073,6 @@ def test_reweight_method():
obs1 = pe.pseudo_Obs(0.2, 0.01, 'test') obs1 = pe.pseudo_Obs(0.2, 0.01, 'test')
rw = pe.pseudo_Obs(0.999, 0.001, 'test') rw = pe.pseudo_Obs(0.999, 0.001, 'test')
assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0] assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0]
rw2 = pe.pseudo_Obs(0.999, 0.001, 'test2')
with pytest.raises(ValueError):
obs1.reweight(rw2)
def test_jackknife(): def test_jackknife():
@ -1138,7 +1089,7 @@ def test_jackknife():
assert np.allclose(tmp_jacks, my_obs.export_jackknife()) assert np.allclose(tmp_jacks, my_obs.export_jackknife())
my_new_obs = my_obs + pe.Obs([full_data], ['test2']) my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
with pytest.raises(ValueError): with pytest.raises(Exception):
my_new_obs.export_jackknife() my_new_obs.export_jackknife()

View file

@ -387,33 +387,3 @@ def test_find_correlator():
found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False) found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False)
assert found_start == 21 assert found_start == 21
assert found_T == 3 assert found_T == 3
def test_get_rep_name():
names = ['data_r0', 'data_r1', 'data_r2']
new_names = sfin._get_rep_names(names)
assert len(new_names) == 3
assert new_names[0] == 'data_|r0'
assert new_names[1] == 'data_|r1'
assert new_names[2] == 'data_|r2'
names = ['data_q0', 'data_q1', 'data_q2']
new_names = sfin._get_rep_names(names, rep_sep='q')
assert len(new_names) == 3
assert new_names[0] == 'data_|q0'
assert new_names[1] == 'data_|q1'
assert new_names[2] == 'data_|q2'
def test_get_appended_rep_name():
names = ['data_r0.f_1', 'data_r1.f_1', 'data_r2.f_1']
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1')
assert len(new_names) == 3
assert new_names[0] == 'data_|r0'
assert new_names[1] == 'data_|r1'
assert new_names[2] == 'data_|r2'
names = ['data_q0.f_1', 'data_q1.f_1', 'data_q2.f_1']
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1', rep_sep='q')
assert len(new_names) == 3
assert new_names[0] == 'data_|q0'
assert new_names[1] == 'data_|q1'
assert new_names[2] == 'data_|q2'