Compare commits

..

No commits in common. "develop" and "v2.9.0" have entirely different histories.

43 changed files with 543 additions and 2190 deletions

2
.github/CODEOWNERS vendored
View file

@ -2,5 +2,3 @@
/pyerrors/covobs.py @s-kuberski
/pyerrors/input/json.py @s-kuberski
/pyerrors/input/dobs.py @s-kuberski
/pyerrors/input/sfcf.py @jkuhl-uni
/tests/sfcf_in_test.py @jkuhl-uni

39
.github/workflows/codeql.yml vendored Normal file
View file

@ -0,0 +1,39 @@
name: "CodeQL"
on:
push:
branches:
- master
- develop
pull_request:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

View file

@ -11,10 +11,10 @@ jobs:
steps:
- name: Set up Python environment
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: "3.10"
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Updated documentation
run: |
git config --global user.email "${{ github.actor }}@users.noreply.github.com"

View file

@ -6,7 +6,6 @@ on:
- master
- develop
pull_request:
workflow_dispatch:
schedule:
- cron: '0 4 1 * *'
@ -17,27 +16,27 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.10", "3.12"]
python-version: ["3.8", "3.10"]
steps:
- name: Checkout source
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: uv
uses: astral-sh/setup-uv@v5
- name: Install
run: |
sudo apt-get update
sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super
uv pip install wheel --system
uv pip install . --system
uv pip install pytest nbmake --system
uv pip install -U matplotlib!=3.7.0 --system # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
python -m pip install --upgrade pip
pip install wheel
pip install .
pip install pytest
pip install nbmake
pip install -U matplotlib!=3.7.0 # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
- name: Run tests
run: pytest -vv --nbmake examples/*.ipynb

View file

@ -13,9 +13,9 @@ jobs:
name: Lint
steps:
- name: Check out source repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Python environment
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: flake8 Lint

View file

@ -6,7 +6,6 @@ on:
- master
- develop
pull_request:
workflow_dispatch:
schedule:
- cron: '0 4 1 * *'
@ -17,30 +16,31 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
python-version: ["3.8", "3.9", "3.10", "3.11"]
include:
- os: macos-latest
python-version: "3.12"
- os: ubuntu-24.04-arm
python-version: "3.12"
python-version: "3.10"
steps:
- name: Checkout source
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: uv
uses: astral-sh/setup-uv@v5
- name: Install
run: |
uv pip install wheel --system
uv pip install . --system
uv pip install pytest pytest-cov pytest-benchmark hypothesis --system
uv pip freeze --system
python -m pip install --upgrade pip
pip install wheel
pip install .
pip install pytest
pip install pytest-cov
pip install pytest-benchmark
pip install hypothesis
pip install py
pip freeze
- name: Run tests
run: pytest --cov=pyerrors -vv
run: pytest --cov=pyerrors -vv -Werror

View file

@ -1,58 +0,0 @@
name: Release
on:
workflow_dispatch:
release:
types: [published]
jobs:
build:
name: Build sdist and wheel
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
name: Checkout repository
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install pypa/build
run: >-
python3 -m
pip install
build
--user
- name: Build wheel and source tarball
run: python3 -m build
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
if-no-files-found: error
publish:
needs: [build]
name: Upload to PyPI
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/pyerrors
permissions:
id-token: write
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Sanity check
run: ls -la dist/
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1

View file

@ -1,15 +0,0 @@
name: ruff
on:
push:
branches:
- master
- develop
pull_request:
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v2
with:
src: "./pyerrors"

View file

@ -2,64 +2,6 @@
All notable changes to this project will be documented in this file.
## [2.14.0] - 2025-03-09
### Added
- Explicit checks of the provided inverse matrix for correlated fits #259
### Changed
- Compute derivative for pow explicitly instead of relying on autograd. This results in a ~4x speedup for pow operations #246
- More explicit exception types #248
### Fixed
- Removed the possibility to create an Obs from data on several replica #258
- Fix range in `set_prange` #247
- Fix ensemble name handling in sfcf input modules #253
- Correct error message for fit shape mismatch #257
## [2.13.0] - 2024-11-03
### Added
- Allow providing lower triangular matrix constructed from a Cholesky decomposition in least squares function for correlated fits.
### Fixed
- Corrected bug that prevented combined fits with multiple x-obs in some cases.
## [2.12.0] - 2024-08-22
### Changed
- Support for numpy 2 was added via a new autograd release
- Support for python<3.9 was dropped and dependencies were updated.
### Fixed
- Minor bug fixes in input.sfcf
## [2.11.1] - 2024-04-25
### Fixed
- Fixed a bug in error computation when combining two Obs from the same ensemble and fluctuations on one replicum are not part of one of the Obs.
## [2.11.0] - 2024-04-01
### Added
- New special function module.
### Fixed
- Various bug fixes in input module.
## [2.10.0] - 2023-11-24
### Added
- More efficient implementation of read_sfcf
- added support for addition and multiplication of complex numbers to Corr objects
- the Corr.GEVP method can now also propagate the errors for the eigenvectors
### Fixed
- Fixed bug in combined fit with multiple independent variables
- Check for invalid set of configuration numbers added when initializing an Obs object.
- Fixed a bug in hadrons.read_hdf5
## [2.9.0] - 2023-07-20
### Added
- Vectorized `gamma_method` added which can be applied to lists or arrays of pyerrors objects.

View file

@ -1,4 +1,4 @@
[![](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![arXiv](https://img.shields.io/badge/arXiv-2209.14371-b31b1b.svg)](https://arxiv.org/abs/2209.14371) [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2023.108750-blue)](https://doi.org/10.1016/j.cpc.2023.108750)
[![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![arXiv](https://img.shields.io/badge/arXiv-2209.14371-b31b1b.svg)](https://arxiv.org/abs/2209.14371) [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2023.108750-blue)](https://doi.org/10.1016/j.cpc.2023.108750)
# pyerrors
`pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations.
@ -14,6 +14,11 @@ Install the most recent release using pip and [pypi](https://pypi.org/project/py
python -m pip install pyerrors # Fresh install
python -m pip install -U pyerrors # Update
```
Install the most recent release using conda and [conda-forge](https://anaconda.org/conda-forge/pyerrors):
```bash
conda install -c conda-forge pyerrors # Fresh install
conda update -c conda-forge pyerrors # Update
```
## Contributing
We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md).

View file

@ -34,7 +34,7 @@
"source": [
"plt.style.use('./base_style.mplstyle')\n",
"import shutil\n",
"usetex = shutil.which('latex') not in ('', None)\n",
"usetex = shutil.which('latex') != ''\n",
"plt.rc('text', usetex=usetex)"
]
},

View file

@ -22,7 +22,7 @@
"source": [
"plt.style.use('./base_style.mplstyle')\n",
"import shutil\n",
"usetex = shutil.which('latex') not in ('', None)\n",
"usetex = shutil.which('latex') != ''\n",
"plt.rc('text', usetex=usetex)"
]
},

View file

@ -20,7 +20,7 @@
"source": [
"plt.style.use('./base_style.mplstyle')\n",
"import shutil\n",
"usetex = shutil.which('latex') not in ('', None)\n",
"usetex = shutil.which('latex') != ''\n",
"plt.rc('text', usetex=usetex)"
]
},

File diff suppressed because one or more lines are too long

View file

@ -190,7 +190,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[7.2(1.7) -1.00(46)]\n"
"[7.2(1.7) -1.00(45)]\n"
]
}
],
@ -243,7 +243,7 @@
"output_type": "stream",
"text": [
"[[2.025(49) 0.0]\n",
" [-0.494(51) 0.870(29)]]\n"
" [-0.494(50) 0.870(29)]]\n"
]
}
],
@ -296,7 +296,7 @@
"output_type": "stream",
"text": [
"[[0.494(12) 0.0]\n",
" [0.280(40) 1.150(39)]]\n",
" [0.280(40) 1.150(38)]]\n",
"Check:\n",
"[[1.0 0.0]\n",
" [0.0 1.0]]\n"
@ -330,10 +330,10 @@
"output_type": "stream",
"text": [
"Eigenvalues:\n",
"[0.705(57) 4.39(19)]\n",
"[0.705(56) 4.39(20)]\n",
"Eigenvectors:\n",
"[[-0.283(26) -0.9592(76)]\n",
" [-0.9592(76) 0.283(26)]]\n"
"[[-0.283(26) -0.9592(75)]\n",
" [-0.9592(75) 0.283(26)]]\n"
]
}
],
@ -363,13 +363,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[[ True True]\n",
" [ True True]]\n"
"Check eigenvector 1\n",
"[-5.551115123125783e-17 0.0]\n",
"Check eigenvector 2\n",
"[0.0 -2.220446049250313e-16]\n"
]
}
],
"source": [
"print(matrix @ v == e * v)"
"for i in range(2):\n",
" print('Check eigenvector', i + 1)\n",
" print(matrix @ v[:, i] - v[:, i] * e[i])"
]
},
{

View file

@ -21,7 +21,7 @@
"source": [
"plt.style.use('./base_style.mplstyle')\n",
"import shutil\n",
"usetex = shutil.which('latex') not in ('', None)\n",
"usetex = shutil.which('latex') != ''\n",
"plt.rc('text', usetex=usetex)"
]
},

View file

@ -481,12 +481,11 @@ from .obs import *
from .correlators import *
from .fits import *
from .misc import *
from . import dirac as dirac
from . import input as input
from . import linalg as linalg
from . import mpm as mpm
from . import roots as roots
from . import integrate as integrate
from . import special as special
from . import dirac
from . import input
from . import linalg
from . import mpm
from . import roots
from . import integrate
from .version import __version__ as __version__
from .version import __version__

View file

@ -8,7 +8,6 @@ from .obs import Obs, reweight, correlate, CObs
from .misc import dump_object, _assert_equal_properties
from .fits import least_squares
from .roots import find_root
from . import linalg
class Corr:
@ -101,7 +100,7 @@ class Corr:
self.N = 1
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
self.content = data_input
noNull = [a for a in self.content if a is not None] # To check if the matrices are correct for all undefined elements
noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
self.N = noNull[0].shape[0]
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
raise ValueError("Smearing matrices are not NxN.")
@ -141,7 +140,7 @@ class Corr:
def gamma_method(self, **kwargs):
"""Apply the gamma method to the content of the Corr."""
for item in self.content:
if item is not None:
if not (item is None):
if self.N == 1:
item[0].gamma_method(**kwargs)
else:
@ -159,7 +158,7 @@ class Corr:
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
"""
if self.N == 1:
raise ValueError("Trying to project a Corr, that already has N=1.")
raise Exception("Trying to project a Corr, that already has N=1.")
if vector_l is None:
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
@ -167,16 +166,16 @@ class Corr:
vector_r = vector_l
if isinstance(vector_l, list) and not isinstance(vector_r, list):
if len(vector_l) != self.T:
raise ValueError("Length of vector list must be equal to T")
raise Exception("Length of vector list must be equal to T")
vector_r = [vector_r] * self.T
if isinstance(vector_r, list) and not isinstance(vector_l, list):
if len(vector_r) != self.T:
raise ValueError("Length of vector list must be equal to T")
raise Exception("Length of vector list must be equal to T")
vector_l = [vector_l] * self.T
if not isinstance(vector_l, list):
if not vector_l.shape == vector_r.shape == (self.N,):
raise ValueError("Vectors are of wrong shape!")
raise Exception("Vectors are of wrong shape!")
if normalize:
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
@ -201,7 +200,7 @@ class Corr:
Second index to be picked.
"""
if self.N == 1:
raise ValueError("Trying to pick item from projected Corr")
raise Exception("Trying to pick item from projected Corr")
newcontent = [None if (item is None) else item[i, j] for item in self.content]
return Corr(newcontent)
@ -212,8 +211,8 @@ class Corr:
timeslice and the error on each timeslice.
"""
if self.N != 1:
raise ValueError("Can only make Corr[N=1] plottable")
x_list = [x for x in range(self.T) if self.content[x] is not None]
raise Exception("Can only make Corr[N=1] plottable")
x_list = [x for x in range(self.T) if not self.content[x] is None]
y_list = [y[0].value for y in self.content if y is not None]
y_err_list = [y[0].dvalue for y in self.content if y is not None]
@ -222,9 +221,9 @@ class Corr:
def symmetric(self):
""" Symmetrize the correlator around x0=0."""
if self.N != 1:
raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.')
if self.T % 2 != 0:
raise ValueError("Can not symmetrize odd T")
raise Exception("Can not symmetrize odd T")
if self.content[0] is not None:
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
@ -237,7 +236,7 @@ class Corr:
else:
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
if (all([x is None for x in newcontent])):
raise ValueError("Corr could not be symmetrized: No redundant values")
raise Exception("Corr could not be symmetrized: No redundant values")
return Corr(newcontent, prange=self.prange)
def anti_symmetric(self):
@ -245,7 +244,7 @@ class Corr:
if self.N != 1:
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
if self.T % 2 != 0:
raise ValueError("Can not symmetrize odd T")
raise Exception("Can not symmetrize odd T")
test = 1 * self
test.gamma_method()
@ -259,7 +258,7 @@ class Corr:
else:
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
if (all([x is None for x in newcontent])):
raise ValueError("Corr could not be symmetrized: No redundant values")
raise Exception("Corr could not be symmetrized: No redundant values")
return Corr(newcontent, prange=self.prange)
def is_matrix_symmetric(self):
@ -292,14 +291,14 @@ class Corr:
def matrix_symmetric(self):
"""Symmetrizes the correlator matrices on every timeslice."""
if self.N == 1:
raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.")
if self.is_matrix_symmetric():
return 1.0 * self
else:
transposed = [None if _check_for_none(self, G) else G.T for G in self.content]
return 0.5 * (Corr(transposed) + self)
def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
def GEVP(self, t0, ts=None, sort="Eigenvalue", **kwargs):
r'''Solve the generalized eigenvalue problem on the correlator matrix and returns the corresponding eigenvectors.
The eigenvectors are sorted according to the descending eigenvalues, the zeroth eigenvector(s) correspond to the
@ -318,28 +317,21 @@ class Corr:
If sort="Eigenvector" it gives a reference point for the sorting method.
sort : string
If this argument is set, a list of self.T vectors per state is returned. If it is set to None, only one vector is returned.
- "Eigenvalue": The eigenvector is chosen according to which eigenvalue it belongs individually on every timeslice. (default)
- "Eigenvalue": The eigenvector is chosen according to which eigenvalue it belongs individually on every timeslice.
- "Eigenvector": Use the method described in arXiv:2004.10472 to find the set of v(t) belonging to the state.
The reference state is identified by its eigenvalue at $t=t_s$.
- None: The GEVP is solved only at ts, no sorting is necessary
vector_obs : bool
If True, uncertainties are propagated in the eigenvector computation (default False).
Other Parameters
----------------
state : int
Returns only the vector(s) for a specified state. The lowest state is zero.
method : str
Method used to solve the GEVP.
- "eigh": Use scipy.linalg.eigh to solve the GEVP. (default for vector_obs=False)
- "cholesky": Use manually implemented solution via the Cholesky decomposition. Automatically chosen if vector_obs==True.
'''
if self.N == 1:
raise ValueError("GEVP methods only works on correlator matrices and not single correlators.")
raise Exception("GEVP methods only works on correlator matrices and not single correlators.")
if ts is not None:
if (ts <= t0):
raise ValueError("ts has to be larger than t0.")
raise Exception("ts has to be larger than t0.")
if "sorted_list" in kwargs:
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
@ -350,34 +342,16 @@ class Corr:
else:
symmetric_corr = self.matrix_symmetric()
def _get_mat_at_t(t, vector_obs=vector_obs):
if vector_obs:
return symmetric_corr[t]
else:
return np.vectorize(lambda x: x.value)(symmetric_corr[t])
G0 = _get_mat_at_t(t0)
method = kwargs.get('method', 'eigh')
if vector_obs:
chol = linalg.cholesky(G0)
chol_inv = linalg.inv(chol)
method = 'cholesky'
else:
chol = np.linalg.cholesky(_get_mat_at_t(t0, vector_obs=False)) # Check if matrix G0 is positive-semidefinite.
if method == 'cholesky':
chol_inv = np.linalg.inv(chol)
else:
chol_inv = None
G0 = np.vectorize(lambda x: x.value)(symmetric_corr[t0])
np.linalg.cholesky(G0) # Check if matrix G0 is positive-semidefinite.
if sort is None:
if (ts is None):
raise ValueError("ts is required if sort=None.")
raise Exception("ts is required if sort=None.")
if (self.content[t0] is None) or (self.content[ts] is None):
raise ValueError("Corr not defined at t0/ts.")
Gt = _get_mat_at_t(ts)
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
if kwargs.get('auto_gamma', False) and vector_obs:
[[o.gm() for o in ev if isinstance(o, Obs)] for ev in reordered_vecs]
raise Exception("Corr not defined at t0/ts.")
Gt = np.vectorize(lambda x: x.value)(symmetric_corr[ts])
reordered_vecs = _GEVP_solver(Gt, G0)
elif sort in ["Eigenvalue", "Eigenvector"]:
if sort == "Eigenvalue" and ts is not None:
@ -385,27 +359,25 @@ class Corr:
all_vecs = [None] * (t0 + 1)
for t in range(t0 + 1, self.T):
try:
Gt = _get_mat_at_t(t)
all_vecs.append(_GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv))
Gt = np.vectorize(lambda x: x.value)(symmetric_corr[t])
all_vecs.append(_GEVP_solver(Gt, G0))
except Exception:
all_vecs.append(None)
if sort == "Eigenvector":
if ts is None:
raise ValueError("ts is required for the Eigenvector sorting method.")
raise Exception("ts is required for the Eigenvector sorting method.")
all_vecs = _sort_vectors(all_vecs, ts)
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
if kwargs.get('auto_gamma', False) and vector_obs:
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
else:
raise ValueError("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
raise Exception("Unkown value for 'sort'.")
if "state" in kwargs:
return reordered_vecs[kwargs.get("state")]
else:
return reordered_vecs
def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue"):
"""Determines the eigenvalue of the GEVP by solving and projecting the correlator
Parameters
@ -415,7 +387,7 @@ class Corr:
All other parameters are identical to the ones of Corr.GEVP.
"""
vec = self.GEVP(t0, ts=ts, sort=sort, **kwargs)[state]
vec = self.GEVP(t0, ts=ts, sort=sort)[state]
return self.projected(vec)
def Hankel(self, N, periodic=False):
@ -435,7 +407,7 @@ class Corr:
"""
if self.N != 1:
raise NotImplementedError("Multi-operator Prony not implemented!")
raise Exception("Multi-operator Prony not implemented!")
array = np.empty([N, N], dtype="object")
new_content = []
@ -502,7 +474,7 @@ class Corr:
correlator or a Corr of same length.
"""
if self.N != 1:
raise ValueError("Only one-dimensional correlators can be safely correlated.")
raise Exception("Only one-dimensional correlators can be safely correlated.")
new_content = []
for x0, t_slice in enumerate(self.content):
if _check_for_none(self, t_slice):
@ -516,7 +488,7 @@ class Corr:
elif isinstance(partner, Obs): # Should this include CObs?
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
else:
raise TypeError("Can only correlate with an Obs or a Corr.")
raise Exception("Can only correlate with an Obs or a Corr.")
return Corr(new_content)
@ -583,7 +555,7 @@ class Corr:
Available choice: symmetric, forward, backward, improved, log, default: symmetric
"""
if self.N != 1:
raise ValueError("deriv only implemented for one-dimensional correlators.")
raise Exception("deriv only implemented for one-dimensional correlators.")
if variant == "symmetric":
newcontent = []
for t in range(1, self.T - 1):
@ -592,7 +564,7 @@ class Corr:
else:
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
if (all([x is None for x in newcontent])):
raise ValueError('Derivative is undefined at all timeslices')
raise Exception('Derivative is undefined at all timeslices')
return Corr(newcontent, padding=[1, 1])
elif variant == "forward":
newcontent = []
@ -602,7 +574,7 @@ class Corr:
else:
newcontent.append(self.content[t + 1] - self.content[t])
if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices")
raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[0, 1])
elif variant == "backward":
newcontent = []
@ -612,7 +584,7 @@ class Corr:
else:
newcontent.append(self.content[t] - self.content[t - 1])
if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices")
raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[1, 0])
elif variant == "improved":
newcontent = []
@ -622,7 +594,7 @@ class Corr:
else:
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
if (all([x is None for x in newcontent])):
raise ValueError('Derivative is undefined at all timeslices')
raise Exception('Derivative is undefined at all timeslices')
return Corr(newcontent, padding=[2, 2])
elif variant == 'log':
newcontent = []
@ -632,11 +604,11 @@ class Corr:
else:
newcontent.append(np.log(self.content[t]))
if (all([x is None for x in newcontent])):
raise ValueError("Log is undefined at all timeslices")
raise Exception("Log is undefined at all timeslices")
logcorr = Corr(newcontent)
return self * logcorr.deriv('symmetric')
else:
raise ValueError("Unknown variant.")
raise Exception("Unknown variant.")
def second_deriv(self, variant="symmetric"):
r"""Return the second derivative of the correlator with respect to x0.
@ -656,7 +628,7 @@ class Corr:
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
"""
if self.N != 1:
raise ValueError("second_deriv only implemented for one-dimensional correlators.")
raise Exception("second_deriv only implemented for one-dimensional correlators.")
if variant == "symmetric":
newcontent = []
for t in range(1, self.T - 1):
@ -665,7 +637,7 @@ class Corr:
else:
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices")
raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[1, 1])
elif variant == "big_symmetric":
newcontent = []
@ -675,7 +647,7 @@ class Corr:
else:
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices")
raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[2, 2])
elif variant == "improved":
newcontent = []
@ -685,7 +657,7 @@ class Corr:
else:
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
if (all([x is None for x in newcontent])):
raise ValueError("Derivative is undefined at all timeslices")
raise Exception("Derivative is undefined at all timeslices")
return Corr(newcontent, padding=[2, 2])
elif variant == 'log':
newcontent = []
@ -695,11 +667,11 @@ class Corr:
else:
newcontent.append(np.log(self.content[t]))
if (all([x is None for x in newcontent])):
raise ValueError("Log is undefined at all timeslices")
raise Exception("Log is undefined at all timeslices")
logcorr = Corr(newcontent)
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
else:
raise ValueError("Unknown variant.")
raise Exception("Unknown variant.")
def m_eff(self, variant='log', guess=1.0):
"""Returns the effective mass of the correlator as correlator object
@ -728,7 +700,7 @@ class Corr:
else:
newcontent.append(self.content[t] / self.content[t + 1])
if (all([x is None for x in newcontent])):
raise ValueError('m_eff is undefined at all timeslices')
raise Exception('m_eff is undefined at all timeslices')
return np.log(Corr(newcontent, padding=[0, 1]))
@ -742,7 +714,7 @@ class Corr:
else:
newcontent.append(self.content[t - 1] / self.content[t + 1])
if (all([x is None for x in newcontent])):
raise ValueError('m_eff is undefined at all timeslices')
raise Exception('m_eff is undefined at all timeslices')
return np.log(Corr(newcontent, padding=[1, 1])) / 2
@ -767,7 +739,7 @@ class Corr:
else:
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
if (all([x is None for x in newcontent])):
raise ValueError('m_eff is undefined at all timeslices')
raise Exception('m_eff is undefined at all timeslices')
return Corr(newcontent, padding=[0, 1])
@ -779,11 +751,11 @@ class Corr:
else:
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
if (all([x is None for x in newcontent])):
raise ValueError("m_eff is undefined at all timeslices")
raise Exception("m_eff is undefined at all timeslices")
return np.arccosh(Corr(newcontent, padding=[1, 1]))
else:
raise ValueError('Unknown variant.')
raise Exception('Unknown variant.')
def fit(self, function, fitrange=None, silent=False, **kwargs):
r'''Fits function to the data
@ -801,7 +773,7 @@ class Corr:
Decides whether output is printed to the standard output.
'''
if self.N != 1:
raise ValueError("Correlator must be projected before fitting")
raise Exception("Correlator must be projected before fitting")
if fitrange is None:
if self.prange:
@ -810,12 +782,12 @@ class Corr:
fitrange = [0, self.T - 1]
else:
if not isinstance(fitrange, list):
raise TypeError("fitrange has to be a list with two elements")
raise Exception("fitrange has to be a list with two elements")
if len(fitrange) != 2:
raise ValueError("fitrange has to have exactly two elements [fit_start, fit_stop]")
raise Exception("fitrange has to have exactly two elements [fit_start, fit_stop]")
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
result = least_squares(xs, ys, function, silent=silent, **kwargs)
return result
@ -840,9 +812,9 @@ class Corr:
else:
raise Exception("no plateau range provided")
if self.N != 1:
raise ValueError("Correlator must be projected before getting a plateau.")
raise Exception("Correlator must be projected before getting a plateau.")
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
raise ValueError("plateau is undefined at all timeslices in plateaurange.")
raise Exception("plateau is undefined at all timeslices in plateaurange.")
if auto_gamma:
self.gamma_method()
if method == "fit":
@ -854,16 +826,16 @@ class Corr:
return returnvalue
else:
raise ValueError("Unsupported plateau method: " + method)
raise Exception("Unsupported plateau method: " + method)
def set_prange(self, prange):
"""Sets the attribute prange of the Corr object."""
if not len(prange) == 2:
raise ValueError("prange must be a list or array with two values")
raise Exception("prange must be a list or array with two values")
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
raise TypeError("Start and end point must be integers")
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]):
raise ValueError("Start and end point must define a range in the interval 0,T")
raise Exception("Start and end point must be integers")
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
raise Exception("Start and end point must define a range in the interval 0,T")
self.prange = prange
return
@ -900,7 +872,7 @@ class Corr:
Optional title of the figure.
"""
if self.N != 1:
raise ValueError("Correlator must be projected before plotting")
raise Exception("Correlator must be projected before plotting")
if auto_gamma:
self.gamma_method()
@ -941,7 +913,7 @@ class Corr:
hide_from = None
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
else:
raise TypeError("'comp' must be a correlator or a list of correlators.")
raise Exception("'comp' must be a correlator or a list of correlators.")
if plateau:
if isinstance(plateau, Obs):
@ -950,14 +922,14 @@ class Corr:
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
else:
raise TypeError("'plateau' must be an Obs")
raise Exception("'plateau' must be an Obs")
if references:
if isinstance(references, list):
for ref in references:
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
else:
raise TypeError("'references' must be a list of floating pint values.")
raise Exception("'references' must be a list of floating pint values.")
if self.prange:
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
@ -991,7 +963,7 @@ class Corr:
if isinstance(save, str):
fig.savefig(save, bbox_inches='tight')
else:
raise TypeError("'save' has to be a string.")
raise Exception("'save' has to be a string.")
def spaghetti_plot(self, logscale=True):
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
@ -1002,7 +974,7 @@ class Corr:
Determines whether the scale of the y-axis is logarithmic or standard.
"""
if self.N != 1:
raise ValueError("Correlator needs to be projected first.")
raise Exception("Correlator needs to be projected first.")
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
@ -1044,7 +1016,7 @@ class Corr:
elif datatype == "pickle":
dump_object(self, filename, **kwargs)
else:
raise ValueError("Unknown datatype " + str(datatype))
raise Exception("Unknown datatype " + str(datatype))
def print(self, print_range=None):
print(self.__repr__(print_range))
@ -1094,7 +1066,7 @@ class Corr:
def __add__(self, y):
if isinstance(y, Corr):
if ((self.N != y.N) or (self.T != y.T)):
raise ValueError("Addition of Corrs with different shape")
raise Exception("Addition of Corrs with different shape")
newcontent = []
for t in range(self.T):
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
@ -1103,7 +1075,7 @@ class Corr:
newcontent.append(self.content[t] + y.content[t])
return Corr(newcontent)
elif isinstance(y, (Obs, int, float, CObs, complex)):
elif isinstance(y, (Obs, int, float, CObs)):
newcontent = []
for t in range(self.T):
if _check_for_none(self, self.content[t]):
@ -1122,7 +1094,7 @@ class Corr:
def __mul__(self, y):
if isinstance(y, Corr):
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
newcontent = []
for t in range(self.T):
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
@ -1131,7 +1103,7 @@ class Corr:
newcontent.append(self.content[t] * y.content[t])
return Corr(newcontent)
elif isinstance(y, (Obs, int, float, CObs, complex)):
elif isinstance(y, (Obs, int, float, CObs)):
newcontent = []
for t in range(self.T):
if _check_for_none(self, self.content[t]):
@ -1193,7 +1165,7 @@ class Corr:
def __truediv__(self, y):
if isinstance(y, Corr):
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
newcontent = []
for t in range(self.T):
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
@ -1207,16 +1179,16 @@ class Corr:
newcontent[t] = None
if all([item is None for item in newcontent]):
raise ValueError("Division returns completely undefined correlator")
raise Exception("Division returns completely undefined correlator")
return Corr(newcontent)
elif isinstance(y, (Obs, CObs)):
if isinstance(y, Obs):
if y.value == 0:
raise ValueError('Division by zero will return undefined correlator')
raise Exception('Division by zero will return undefined correlator')
if isinstance(y, CObs):
if y.is_zero():
raise ValueError('Division by zero will return undefined correlator')
raise Exception('Division by zero will return undefined correlator')
newcontent = []
for t in range(self.T):
@ -1228,7 +1200,7 @@ class Corr:
elif isinstance(y, (int, float)):
if y == 0:
raise ValueError('Division by zero will return undefined correlator')
raise Exception('Division by zero will return undefined correlator')
newcontent = []
for t in range(self.T):
if _check_for_none(self, self.content[t]):
@ -1284,7 +1256,7 @@ class Corr:
if np.isnan(tmp_sum.value):
newcontent[t] = None
if all([item is None for item in newcontent]):
raise ValueError('Operation returns undefined correlator')
raise Exception('Operation returns undefined correlator')
return Corr(newcontent)
def sin(self):
@ -1392,13 +1364,13 @@ class Corr:
'''
if self.N == 1:
raise ValueError('Method cannot be applied to one-dimensional correlators.')
raise Exception('Method cannot be applied to one-dimensional correlators.')
if basematrix is None:
basematrix = self
if Ntrunc >= basematrix.N:
raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
if basematrix.N != self.N:
raise ValueError('basematrix and targetmatrix have to be of the same size.')
raise Exception('basematrix and targetmatrix have to be of the same size.')
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
@ -1414,13 +1386,8 @@ class Corr:
return Corr(newcontent)
def _sort_vectors(vec_set_in, ts):
def _sort_vectors(vec_set, ts):
"""Helper function used to find a set of Eigenvectors consistent over all timeslices"""
if isinstance(vec_set_in[ts][0][0], Obs):
vec_set = [anp.vectorize(lambda x: float(x))(vi) if vi is not None else vi for vi in vec_set_in]
else:
vec_set = vec_set_in
reference_sorting = np.array(vec_set[ts])
N = reference_sorting.shape[0]
sorted_vec_set = []
@ -1439,9 +1406,9 @@ def _sort_vectors(vec_set_in, ts):
if current_score > best_score:
best_score = current_score
best_perm = perm
sorted_vec_set.append([vec_set_in[t][k] for k in best_perm])
sorted_vec_set.append([vec_set[t][k] for k in best_perm])
else:
sorted_vec_set.append(vec_set_in[t])
sorted_vec_set.append(vec_set[t])
return sorted_vec_set
@ -1451,63 +1418,10 @@ def _check_for_none(corr, entry):
return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2
def _GEVP_solver(Gt, G0, method='eigh', chol_inv=None):
r"""Helper function for solving the GEVP and sorting the eigenvectors.
Solves $G(t)v_i=\lambda_i G(t_0)v_i$ and returns the eigenvectors v_i
def _GEVP_solver(Gt, G0):
"""Helper function for solving the GEVP and sorting the eigenvectors.
The helper function assumes that both provided matrices are symmetric and
only processes the lower triangular part of both matrices. In case the matrices
are not symmetric the upper triangular parts are effectively discarded.
Parameters
----------
Gt : array
The correlator at time t for the left hand side of the GEVP
G0 : array
The correlator at time t0 for the right hand side of the GEVP
Method used to solve the GEVP.
- "eigh": Use scipy.linalg.eigh to solve the GEVP.
- "cholesky": Use manually implemented solution via the Cholesky decomposition.
chol_inv : array, optional
Inverse of the Cholesky decomposition of G0. May be provided to
speed up the computation in the case of method=='cholesky'
"""
if isinstance(G0[0][0], Obs):
vector_obs = True
else:
vector_obs = False
if method == 'cholesky':
if vector_obs:
cholesky = linalg.cholesky
inv = linalg.inv
eigv = linalg.eigv
matmul = linalg.matmul
else:
cholesky = np.linalg.cholesky
inv = np.linalg.inv
def eigv(x, **kwargs):
return np.linalg.eigh(x)[1]
def matmul(*operands):
return np.linalg.multi_dot(operands)
N = Gt.shape[0]
output = [[] for j in range(N)]
if chol_inv is None:
chol = cholesky(G0) # This will automatically report if the matrix is not pos-def
chol_inv = inv(chol)
try:
new_matrix = matmul(chol_inv, Gt, chol_inv.T)
ev = eigv(new_matrix)
ev = matmul(chol_inv.T, ev)
output = np.flip(ev, axis=1).T
except (np.linalg.LinAlgError, TypeError, ValueError): # The above code can fail because of linalg-errors or because the entry of the corr is None
for s in range(N):
output[s] = None
return output
elif method == 'eigh':
return scipy.linalg.eigh(Gt, G0, lower=True)[1].T[::-1]
are not symmetric the upper triangular parts are effectively discarded."""
return scipy.linalg.eigh(Gt, G0, lower=True)[1].T[::-1]

View file

@ -34,7 +34,7 @@ def epsilon_tensor(i, j, k):
"""
test_set = set((i, j, k))
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
raise ValueError("Unexpected input", i, j, k)
raise Exception("Unexpected input", i, j, k)
return (i - j) * (j - k) * (k - i) / 2
@ -52,7 +52,7 @@ def epsilon_tensor_rank4(i, j, k, o):
"""
test_set = set((i, j, k, o))
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
raise ValueError("Unexpected input", i, j, k, o)
raise Exception("Unexpected input", i, j, k, o)
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
@ -92,5 +92,5 @@ def Grid_gamma(gamma_tag):
elif gamma_tag == 'SigmaZT':
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
else:
raise ValueError('Unkown gamma structure', gamma_tag)
raise Exception('Unkown gamma structure', gamma_tag)
return g

View file

@ -14,7 +14,7 @@ from autograd import hessian as auto_hessian
from autograd import elementwise_grad as egrad
from numdifftools import Jacobian as num_jacobian
from numdifftools import Hessian as num_hessian
from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky
from .obs import Obs, derived_observable, covariance, cov_Obs
class Fit_result(Sequence):
@ -151,14 +151,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
inv_chol_cov_matrix [array,list], optional
array: shape = (no of y values) X (no of y values)
list: for an uncombined fit: [""]
for a combined fit: list of keys belonging to the corr_matrix saved in the array, must be the same as the keys of the y dict in alphabetical order
If correlated_fit=True is set as well, can provide an inverse covariance matrix (y errors, dy_f included!) of your own choosing for a correlated fit.
The matrix must be a lower triangular matrix constructed from a Cholesky decomposition: The function invert_corr_cov_cholesky(corr, inverrdiag) can be
used to construct it from a correlation matrix (corr) and the errors dy_f of the data points (inverrdiag = np.diag(1 / np.asarray(dy_f))). For the correct
ordering the correlation matrix (corr) can be sorted via the function sort_corr(corr, kl, yd) where kl is the list of keys and yd the y dict.
expected_chisquare : bool
If True estimates the expected chisquare which is
corrected by effects caused by correlated input data (default False).
@ -173,66 +165,15 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
-------
output : Fit_result
Parameters and information on the fitted result.
Examples
------
>>> # Example of a correlated (correlated_fit = True, inv_chol_cov_matrix handed over) combined fit, based on a randomly generated data set
>>> import numpy as np
>>> from scipy.stats import norm
>>> from scipy.linalg import cholesky
>>> import pyerrors as pe
>>> # generating the random data set
>>> num_samples = 400
>>> N = 3
>>> x = np.arange(N)
>>> x1 = norm.rvs(size=(N, num_samples)) # generate random numbers
>>> x2 = norm.rvs(size=(N, num_samples)) # generate random numbers
>>> r = r1 = r2 = np.zeros((N, N))
>>> y = {}
>>> for i in range(N):
>>> for j in range(N):
>>> r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
>>> errl = np.sqrt([3.4, 2.5, 3.6]) # set y errors
>>> for i in range(N):
>>> for j in range(N):
>>> r[i, j] *= errl[i] * errl[j] # element in covariance matrix
>>> c = cholesky(r, lower=True)
>>> y = {'a': np.dot(c, x1), 'b': np.dot(c, x2)} # generate y data with the covariance matrix defined
>>> # random data set has been generated, now the dictionaries and the inverse covariance matrix to be handed over are built
>>> x_dict = {}
>>> y_dict = {}
>>> chol_inv_dict = {}
>>> data = []
>>> for key in y.keys():
>>> x_dict[key] = x
>>> for i in range(N):
>>> data.append(pe.Obs([[i + 1 + o for o in y[key][i]]], ['ens'])) # generate y Obs from the y data
>>> [o.gamma_method() for o in data]
>>> corr = pe.covariance(data, correlation=True)
>>> inverrdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
>>> chol_inv = pe.obs.invert_corr_cov_cholesky(corr, inverrdiag) # gives form of the inverse covariance matrix needed for the combined correlated fit below
>>> y_dict = {'a': data[:3], 'b': data[3:]}
>>> # common fit parameter p[0] in combined fit
>>> def fit1(p, x):
>>> return p[0] + p[1] * x
>>> def fit2(p, x):
>>> return p[0] + p[2] * x
>>> fitf_dict = {'a': fit1, 'b':fit2}
>>> fitp_inv_cov_combined_fit = pe.least_squares(x_dict,y_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,['a','b']])
Fit with 3 parameters
Method: Levenberg-Marquardt
`ftol` termination condition is satisfied.
chisquare/d.o.f.: 0.5388013574561786 # random
fit parameters [1.11897846 0.96361162 0.92325319] # random
'''
output = Fit_result()
if (isinstance(x, dict) and isinstance(y, dict) and isinstance(func, dict)):
if (type(x) == dict and type(y) == dict and type(func) == dict):
xd = {key: anp.asarray(x[key]) for key in x}
yd = y
funcd = func
output.fit_function = func
elif (isinstance(x, dict) or isinstance(y, dict) or isinstance(func, dict)):
elif (type(x) == dict or type(y) == dict or type(func) == dict):
raise TypeError("All arguments have to be dictionaries in order to perform a combined fit.")
else:
x = np.asarray(x)
@ -256,7 +197,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
if sorted(list(funcd.keys())) != key_ls:
raise ValueError('x and func dictionaries do not contain the same keys.')
x_all = np.concatenate([np.array(xd[key]).transpose() for key in key_ls]).transpose()
x_all = np.concatenate([np.array(xd[key]) for key in key_ls])
y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
y_f = [o.value for o in y_all]
@ -293,7 +234,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
if len(key_ls) > 1:
for key in key_ls:
if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape:
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {np.asarray(yd[key]).shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
if not silent:
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
@ -356,21 +297,15 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
if kwargs.get('correlated_fit') is True:
if 'inv_chol_cov_matrix' in kwargs:
chol_inv = kwargs.get('inv_chol_cov_matrix')
if (chol_inv[0].shape[0] != len(dy_f)):
raise TypeError('The number of columns of the inverse covariance matrix handed over needs to be equal to the number of y errors.')
if (chol_inv[0].shape[0] != chol_inv[0].shape[1]):
raise TypeError('The inverse covariance matrix handed over needs to have the same number of rows as columns.')
if (chol_inv[1] != key_ls):
raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.')
chol_inv = chol_inv[0]
if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))):
raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.')
else:
corr = covariance(y_all, correlation=True, **kwargs)
inverrdiag = np.diag(1 / np.asarray(dy_f))
chol_inv = invert_corr_cov_cholesky(corr, inverrdiag)
corr = covariance(y_all, correlation=True, **kwargs)
covdiag = np.diag(1 / np.asarray(dy_f))
condn = np.linalg.cond(corr)
if condn > 0.1 / np.finfo(float).eps:
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
if condn > 1e13:
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
chol = np.linalg.cholesky(corr)
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
def general_chisqfunc(p, ivars, pr):
model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
@ -415,6 +350,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
if kwargs.get('correlated_fit') is True:
def chisqfunc_residuals(p):
return general_chisqfunc(p, y_f, p_f)
@ -429,7 +365,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
raise Exception('The minimization procedure did not converge.')
output.chisquare = chisquare
output.dof = y_all.shape[-1] - n_parms + len(loc_priors)
output.dof = x_all.shape[-1] - n_parms + len(loc_priors)
output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
if output.dof > 0:
output.chisquare_by_dof = output.chisquare / output.dof
@ -457,7 +393,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
hat_vector = prepare_hat_matrix()
A = W @ hat_vector
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W)
expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
if not silent:
print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)

View file

@ -5,11 +5,11 @@ r'''
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
'''
from . import bdio as bdio
from . import dobs as dobs
from . import hadrons as hadrons
from . import json as json
from . import misc as misc
from . import openQCD as openQCD
from . import pandas as pandas
from . import sfcf as sfcf
from . import bdio
from . import dobs
from . import hadrons
from . import json
from . import misc
from . import openQCD
from . import pandas
from . import sfcf

View file

@ -79,7 +79,7 @@ def _dict_to_xmlstring_spaces(d, space=' '):
o += space
o += li + '\n'
if li.startswith('<') and not cm:
if '<%s' % ('/') not in li:
if not '<%s' % ('/') in li:
c += 1
cm = False
return o
@ -529,8 +529,7 @@ def import_dobs_string(content, full_output=False, separator_insertion=True):
deltas.append(repdeltas)
idl.append(repidl)
obsmeans = [np.average(deltas[j]) for j in range(len(deltas))]
res.append(Obs([np.array(deltas[j]) - obsmeans[j] for j in range(len(obsmeans))], obs_names, idl=idl, means=obsmeans))
res.append(Obs(deltas, obs_names, idl=idl))
res[-1]._value = mean[i]
_check(len(e_names) == ne)
@ -672,7 +671,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
o += space
o += li + '\n'
if li.startswith('<') and not cm:
if '<%s' % ('/') not in li:
if not '<%s' % ('/') in li:
c += 1
cm = False
return o

View file

@ -88,7 +88,7 @@ def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
path_obj = Path(filestem)
path = path_obj.parent.as_posix()
filestem = path_obj.name
filestem = path_obj.stem
files, idx = _get_files(path, filestem, idl)
@ -113,7 +113,7 @@ def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
infos = []
for hd5_file in files:
h5file = h5py.File(path + '/' + hd5_file, "r")
if group + '/' + entry not in h5file:
if not group + '/' + entry in h5file:
raise Exception("Entry '" + entry + "' not contained in the files.")
raw_data = h5file[group + '/' + entry + '/corr']
real_data = raw_data[:].view("complex")
@ -186,7 +186,7 @@ def _extract_real_arrays(path, files, tree, keys):
for hd5_file in files:
h5file = h5py.File(path + '/' + hd5_file, "r")
for key in keys:
if tree + '/' + key not in h5file:
if not tree + '/' + key in h5file:
raise Exception("Entry '" + key + "' not contained in the files.")
raw_data = h5file[tree + '/' + key + '/data']
real_data = raw_data[:].astype(np.double)

View file

@ -133,11 +133,10 @@ def create_json_string(ol, description='', indent=1):
names = []
idl = []
for key, value in obs.idl.items():
samples.append(np.array([np.nan] * len(value)))
samples.append([np.nan] * len(value))
names.append(key)
idl.append(value)
my_obs = Obs(samples, names, idl, means=[np.nan for n in names])
my_obs._value = np.nan
my_obs = Obs(samples, names, idl)
my_obs._covobs = obs._covobs
for name in obs._covobs:
my_obs.names.append(name)
@ -332,8 +331,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
cd = _gen_covobsd_from_cdatad(o.get('cdata', {}))
if od:
r_offsets = [np.average([ddi[0] for ddi in di]) for di in od['deltas']]
ret = Obs([np.array([ddi[0] for ddi in od['deltas'][i]]) - r_offsets[i] for i in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[0] for ro in r_offsets])
ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
ret._value = values[0]
else:
ret = Obs([], [], means=[])
@ -358,8 +356,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
taglist = o.get('tag', layout * [None])
for i in range(layout):
if od:
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
ret[-1]._value = values[i]
else:
ret.append(Obs([], [], means=[]))
@ -386,8 +383,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
taglist = o.get('tag', N * [None])
for i in range(N):
if od:
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
ret[-1]._value = values[i]
else:
ret.append(Obs([], [], means=[]))

View file

@ -47,7 +47,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
Reweighting factors read
"""
known_oqcd_versions = ['1.4', '1.6', '2.0']
if version not in known_oqcd_versions:
if not (version in known_oqcd_versions):
raise Exception('Unknown openQCD version defined!')
print("Working with openQCD version " + version)
if 'postfix' in kwargs:
@ -1286,9 +1286,7 @@ def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
imagsamples[repnum][t].append(corrres[1][t])
if 'idl' in kwargs:
left_idl = list(left_idl)
if expected_idl[repnum] == left_idl:
raise ValueError("None of the idls searched for were found in replikum of file " + file)
elif len(left_idl) > 0:
if len(left_idl) > 0:
warnings.warn('Could not find idls ' + str(left_idl) + ' in replikum of file ' + file, UserWarning)
repnum += 1
s = "Read correlator " + corr + " from " + str(repnum) + " replika with idls" + str(realsamples[0][t])

View file

@ -4,13 +4,9 @@ import re
import numpy as np # Thinly-wrapped numpy
from ..obs import Obs
from .utils import sort_names, check_idl
import itertools
sep = "/"
def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs):
def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs):
"""Read sfcf files from given folder structure.
Parameters
@ -69,77 +65,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0,
list of Observables with length T, observable per timeslice.
bb-type correlators have length 1.
"""
ret = read_sfcf_multi(path, prefix, [name], quarks_list=[quarks], corr_type_list=[corr_type],
noffset_list=[noffset], wf_list=[wf], wf2_list=[wf2], version=version,
cfg_separator=cfg_separator, silent=silent, **kwargs)
return ret[name][quarks][str(noffset)][str(wf)][str(wf2)]
def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=['bi'], noffset_list=[0], wf_list=[0], wf2_list=[0], version="1.0c", cfg_separator="n", silent=False, keyed_out=False, **kwargs):
"""Read sfcf files from given folder structure.
Parameters
----------
path : str
Path to the sfcf files.
prefix : str
Prefix of the sfcf files.
name : str
Name of the correlation function to read.
quarks_list : list[str]
Label of the quarks used in the sfcf input file. e.g. "quark quark"
for version 0.0 this does NOT need to be given with the typical " - "
that is present in the output file,
this is done automatically for this version
corr_type_list : list[str]
Type of correlation function to read. Can be
- 'bi' for boundary-inner
- 'bb' for boundary-boundary
- 'bib' for boundary-inner-boundary
noffset_list : list[int]
Offset of the source (only relevant when wavefunctions are used)
wf_list : int
ID of wave function
wf2_list : list[int]
ID of the second wavefunction
(only relevant for boundary-to-boundary correlation functions)
im : bool
if True, read imaginary instead of real part
of the correlation function.
names : list
Alternative labeling for replicas/ensembles.
Has to have the appropriate length
ens_name : str
replaces the name of the ensemble
version: str
version of SFCF, with which the measurement was done.
if the compact output option (-c) was specified,
append a "c" to the version (e.g. "1.0c")
if the append output option (-a) was specified,
append an "a" to the version
cfg_separator : str
String that separates the ensemble identifier from the configuration number (default 'n').
replica: list
list of replica to be read, default is all
files: list[list[int]]
list of files to be read per replica, default is all.
for non-compact output format, hand the folders to be read here.
check_configs: list[list[int]]
list of list of supposed configs, eg. [range(1,1000)]
for one replicum with 1000 configs
rep_string: str
Separator of ensemble name and replicum. Example: In "ensAr0", "r" would be the separator string.
Returns
-------
result: dict[list[Obs]]
dict with one of the following properties:
if keyed_out:
dict[key] = list[Obs]
where key has the form name/quarks/offset/wf/wf2
if not keyed_out:
dict[name][quarks][offset][wf][wf2] = list[Obs]
"""
if kwargs.get('im'):
im = 1
part = 'imaginary'
@ -147,6 +72,16 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
im = 0
part = 'real'
if corr_type == 'bb':
b2b = True
single = True
elif corr_type == 'bib':
b2b = True
single = False
else:
b2b = False
single = False
known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"]
if version not in known_versions:
@ -185,10 +120,8 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
else:
replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls]))
if replica == 0:
raise Exception('No replica found in directory')
if not silent:
print('Read', part, 'part of', name_list, 'from', prefix[:-1], ',', replica, 'replica')
print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
if 'names' in kwargs:
new_names = kwargs.get('names')
@ -200,66 +133,17 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
else:
ens_name = kwargs.get("ens_name")
if not appended:
new_names = _get_rep_names(ls, ens_name, rep_sep=(kwargs.get('rep_string', 'r')))
new_names = _get_rep_names(ls, ens_name)
else:
new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name, rep_sep=(kwargs.get('rep_string', 'r')))
new_names = _get_appended_rep_names(ls, prefix, name, ens_name)
new_names = sort_names(new_names)
idl = []
noffset_list = [str(x) for x in noffset_list]
wf_list = [str(x) for x in wf_list]
wf2_list = [str(x) for x in wf2_list]
# setup dict structures
intern = {}
for name, corr_type in zip(name_list, corr_type_list):
intern[name] = {}
b2b, single = _extract_corr_type(corr_type)
intern[name]["b2b"] = b2b
intern[name]["single"] = single
intern[name]["spec"] = {}
for quarks in quarks_list:
intern[name]["spec"][quarks] = {}
for off in noffset_list:
intern[name]["spec"][quarks][off] = {}
for w in wf_list:
intern[name]["spec"][quarks][off][w] = {}
if b2b:
for w2 in wf2_list:
intern[name]["spec"][quarks][off][w][w2] = {}
intern[name]["spec"][quarks][off][w][w2]["pattern"] = _make_pattern(version, name, off, w, w2, intern[name]['b2b'], quarks)
else:
intern[name]["spec"][quarks][off][w]["0"] = {}
intern[name]["spec"][quarks][off][w]["0"]["pattern"] = _make_pattern(version, name, off, w, 0, intern[name]['b2b'], quarks)
internal_ret_dict = {}
needed_keys = []
for name, corr_type in zip(name_list, corr_type_list):
b2b, single = _extract_corr_type(corr_type)
if b2b:
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, wf2_list))
else:
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, ["0"]))
for key in needed_keys:
internal_ret_dict[key] = []
if not appended:
for i, item in enumerate(ls):
rep_path = path + '/' + item
if "files" in kwargs:
files = kwargs.get("files")
if isinstance(files, list):
if all(isinstance(f, list) for f in files):
files = files[i]
elif all(isinstance(f, str) for f in files):
files = files
else:
raise TypeError("files has to be of type list[list[str]] or list[str]!")
else:
raise TypeError("files has to be of type list[list[str]] or list[str]!")
else:
files = []
sub_ls = _find_files(rep_path, prefix, compact, files)
@ -272,7 +156,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
else:
rep_idl.append(int(cfg[3:]))
except Exception:
raise Exception("Couldn't parse idl from directory, problem with file " + cfg)
raise Exception("Couldn't parse idl from directroy, problem with file " + cfg)
rep_idl.sort()
# maybe there is a better way to print the idls
if not silent:
@ -280,89 +164,67 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
idl.append(rep_idl)
# here we have found all the files we need to look into.
if i == 0:
if version != "0.0" and compact:
file = path + '/' + item + '/' + sub_ls[0]
for name_index, name in enumerate(name_list):
if version == "0.0" or not compact:
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
if corr_type_list[name_index] == 'bi':
name_keys = _lists2key(quarks_list, noffset_list, wf_list, ["0"])
# here, we want to find the place within the file,
# where the correlator we need is stored.
# to do so, the pattern needed is put together
# from the input values
if version == "0.0":
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
else:
if compact:
file = path + '/' + item + '/' + sub_ls[0]
else:
name_keys = _lists2key(quarks_list, noffset_list, wf_list, wf2_list)
for key in name_keys:
specs = _key2specs(key)
quarks = specs[0]
off = specs[1]
w = specs[2]
w2 = specs[3]
# here, we want to find the place within the file,
# where the correlator we need is stored.
# to do so, the pattern needed is put together
# from the input values
start_read, T = _find_correlator(file, version, intern[name]["spec"][quarks][str(off)][str(w)][str(w2)]["pattern"], intern[name]['b2b'], silent=silent)
intern[name]["spec"][quarks][str(off)][str(w)][str(w2)]["start"] = start_read
intern[name]["T"] = T
# preparing the datastructure
# the correlators get parsed into...
deltas = []
for j in range(intern[name]["T"]):
deltas.append([])
internal_ret_dict[sep.join([name, key])] = deltas
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks)
start_read, T = _find_correlator(file, version, pattern, b2b, silent=silent)
# preparing the datastructure
# the correlators get parsed into...
deltas = []
for j in range(T):
deltas.append([])
if compact:
rep_deltas = _read_compact_rep(path, item, sub_ls, intern, needed_keys, im)
for key in needed_keys:
name = _key2specs(key)[0]
for t in range(intern[name]["T"]):
internal_ret_dict[key][t].append(rep_deltas[key][t])
else:
for key in needed_keys:
rep_data = []
name = _key2specs(key)[0]
for subitem in sub_ls:
cfg_path = path + '/' + item + '/' + subitem
file_data = _read_o_file(cfg_path, name, needed_keys, intern, version, im)
rep_data.append(file_data)
for t in range(intern[name]["T"]):
internal_ret_dict[key][t].append([])
for cfg in range(no_cfg):
internal_ret_dict[key][t][i].append(rep_data[cfg][key][t])
else:
for key in needed_keys:
specs = _key2specs(key)
name = specs[0]
quarks = specs[1]
off = specs[2]
w = specs[3]
w2 = specs[4]
if "files" in kwargs:
if isinstance(kwargs.get("files"), list) and all(isinstance(f, str) for f in kwargs.get("files")):
name_ls = kwargs.get("files")
else:
raise TypeError("In append mode, files has to be of type list[str]!")
else:
name_ls = ls
for exc in name_ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
name_ls = list(set(name_ls) - set([exc]))
name_ls = sort_names(name_ls)
pattern = intern[name]['spec'][quarks][off][w][w2]['pattern']
deltas = []
for rep, file in enumerate(name_ls):
rep_idl = []
filename = path + '/' + file
T, rep_idl, rep_data = _read_append_rep(filename, pattern, intern[name]['b2b'], cfg_separator, im, intern[name]['single'])
if rep == 0:
intern[name]['T'] = T
for t in range(intern[name]['T']):
deltas.append([])
for t in range(intern[name]['T']):
deltas[t].append(rep_data[t])
internal_ret_dict[key] = deltas
if name == name_list[0]:
idl.append(rep_idl)
rep_deltas = _read_compact_rep(path, item, sub_ls, start_read, T, b2b, name, im)
if kwargs.get("check_configs") is True:
for t in range(T):
deltas[t].append(rep_deltas[t])
else:
for t in range(T):
deltas[t].append(np.zeros(no_cfg))
for cnfg, subitem in enumerate(sub_ls):
with open(path + '/' + item + '/' + subitem + '/' + name) as fp:
for k, line in enumerate(fp):
if (k >= start_read and k < start_read + T):
floats = list(map(float, line.split()))
if version == "0.0":
deltas[k - start_read][i][cnfg] = floats[im - single]
else:
deltas[k - start_read][i][cnfg] = floats[1 + im - single]
else:
if "files" in kwargs:
ls = kwargs.get("files")
else:
for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
ls = list(set(ls) - set([exc]))
ls = sort_names(ls)
pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks)
deltas = []
for rep, file in enumerate(ls):
rep_idl = []
filename = path + '/' + file
T, rep_idl, rep_data = _read_append_rep(filename, pattern, b2b, cfg_separator, im, single)
if rep == 0:
for t in range(T):
deltas.append([])
for t in range(T):
deltas[t].append(rep_data[t])
idl.append(rep_idl)
if "check_configs" in kwargs:
if not silent:
print("Checking for missing configs...")
che = kwargs.get("check_configs")
@ -374,91 +236,10 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
check_idl(idl[r], che[r])
if not silent:
print("Done")
result_dict = {}
if keyed_out:
for key in needed_keys:
name = _key2specs(key)[0]
result = []
for t in range(intern[name]["T"]):
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
result_dict[key] = result
else:
for name, corr_type in zip(name_list, corr_type_list):
result_dict[name] = {}
for quarks in quarks_list:
result_dict[name][quarks] = {}
for off in noffset_list:
result_dict[name][quarks][off] = {}
for w in wf_list:
result_dict[name][quarks][off][w] = {}
if corr_type != 'bi':
for w2 in wf2_list:
key = _specs2key(name, quarks, off, w, w2)
result = []
for t in range(intern[name]["T"]):
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
result_dict[name][quarks][str(off)][str(w)][str(w2)] = result
else:
key = _specs2key(name, quarks, off, w, "0")
result = []
for t in range(intern[name]["T"]):
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
result_dict[name][quarks][str(off)][str(w)][str(0)] = result
return result_dict
def _lists2key(*lists):
keys = []
for tup in itertools.product(*lists):
keys.append(sep.join(tup))
return keys
def _key2specs(key):
return key.split(sep)
def _specs2key(*specs):
return sep.join(specs)
def _read_o_file(cfg_path, name, needed_keys, intern, version, im):
return_vals = {}
for key in needed_keys:
file = cfg_path + '/' + name
specs = _key2specs(key)
if specs[0] == name:
with open(file) as fp:
lines = fp.readlines()
quarks = specs[1]
off = specs[2]
w = specs[3]
w2 = specs[4]
T = intern[name]["T"]
start_read = intern[name]["spec"][quarks][off][w][w2]["start"]
deltas = []
for line in lines[start_read:start_read + T]:
floats = list(map(float, line.split()))
if version == "0.0":
deltas.append(floats[im - intern[name]["single"]])
else:
deltas.append(floats[1 + im - intern[name]["single"]])
return_vals[key] = deltas
return return_vals
def _extract_corr_type(corr_type):
if corr_type == 'bb':
b2b = True
single = True
elif corr_type == 'bib':
b2b = True
single = False
else:
b2b = False
single = False
return b2b, single
result = []
for t in range(T):
result.append(Obs(deltas[t], new_names, idl=idl))
return result
def _find_files(rep_path, prefix, compact, files=[]):
@ -528,57 +309,38 @@ def _find_correlator(file_name, version, pattern, b2b, silent=False):
return start_read, T
def _read_compact_file(rep_path, cfg_file, intern, needed_keys, im):
return_vals = {}
with open(rep_path + cfg_file) as fp:
def _read_compact_file(rep_path, config_file, start_read, T, b2b, name, im):
with open(rep_path + config_file) as fp:
lines = fp.readlines()
for key in needed_keys:
keys = _key2specs(key)
name = keys[0]
quarks = keys[1]
off = keys[2]
w = keys[3]
w2 = keys[4]
# check, if the correlator is in fact
# printed completely
if (start_read + T + 1 > len(lines)):
raise Exception("EOF before end of correlator data! Maybe " + rep_path + config_file + " is corrupted?")
corr_lines = lines[start_read - 6: start_read + T]
del lines
t_vals = []
T = intern[name]["T"]
start_read = intern[name]["spec"][quarks][off][w][w2]["start"]
# check, if the correlator is in fact
# printed completely
if (start_read + T + 1 > len(lines)):
raise Exception("EOF before end of correlator data! Maybe " + rep_path + cfg_file + " is corrupted?")
corr_lines = lines[start_read - 6: start_read + T]
t_vals = []
if corr_lines[1 - b2b].strip() != 'name ' + name:
raise Exception('Wrong format in file', config_file)
if corr_lines[1 - intern[name]["b2b"]].strip() != 'name ' + name:
raise Exception('Wrong format in file', cfg_file)
for k in range(6, T + 6):
floats = list(map(float, corr_lines[k].split()))
t_vals.append(floats[-2:][im])
return_vals[key] = t_vals
return return_vals
for k in range(6, T + 6):
floats = list(map(float, corr_lines[k].split()))
t_vals.append(floats[-2:][im])
return t_vals
def _read_compact_rep(path, rep, sub_ls, intern, needed_keys, im):
def _read_compact_rep(path, rep, sub_ls, start_read, T, b2b, name, im):
rep_path = path + '/' + rep + '/'
no_cfg = len(sub_ls)
return_vals = {}
for key in needed_keys:
name = _key2specs(key)[0]
deltas = []
for t in range(intern[name]["T"]):
deltas.append(np.zeros(no_cfg))
return_vals[key] = deltas
deltas = []
for t in range(T):
deltas.append(np.zeros(no_cfg))
for cfg in range(no_cfg):
cfg_file = sub_ls[cfg]
cfg_data = _read_compact_file(rep_path, cfg_file, intern, needed_keys, im)
for key in needed_keys:
name = _key2specs(key)[0]
for t in range(intern[name]["T"]):
return_vals[key][t][cfg] = cfg_data[key][t]
return return_vals
cfg_data = _read_compact_file(rep_path, cfg_file, start_read, T, b2b, name, im)
for t in range(T):
deltas[t][cfg] = cfg_data[t]
return deltas
def _read_chunk(chunk, gauge_line, cfg_sep, start_read, T, corr_line, b2b, pattern, im, single):
@ -647,22 +409,22 @@ def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
return T, rep_idl, data
def _get_rep_names(ls, ens_name=None, rep_sep='r'):
def _get_rep_names(ls, ens_name=None):
new_names = []
for entry in ls:
try:
idx = entry.index(rep_sep)
idx = entry.index('r')
except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if ens_name:
new_names.append(ens_name + '|' + entry[idx:])
new_names.append('ens_name' + '|' + entry[idx:])
else:
new_names.append(entry[:idx] + '|' + entry[idx:])
return new_names
def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
def _get_appended_rep_names(ls, prefix, name, ens_name=None):
new_names = []
for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
@ -671,12 +433,12 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
for entry in ls:
myentry = entry[:-len(name) - 1]
try:
idx = myentry.index(rep_sep)
idx = myentry.index('r')
except Exception:
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
if ens_name:
new_names.append(ens_name + '|' + entry[idx:])
new_names.append('ens_name' + '|' + entry[idx:])
else:
new_names.append(myentry[:idx] + '|' + myentry[idx:])
return new_names

View file

@ -1,8 +1,5 @@
"""Utilities for the input"""
import re
import fnmatch
import os
"""Utilities for the input"""
def sort_names(ll):
@ -20,7 +17,6 @@ def sort_names(ll):
ll: list
sorted list
"""
if len(ll) > 1:
sorted = False
r_pattern = r'r(\d+)'
@ -67,7 +63,6 @@ def check_idl(idl, che):
miss_str : str
string with integers of which idls are missing
"""
missing = []
for c in che:
if c not in idl:
@ -80,65 +75,3 @@ def check_idl(idl, che):
miss_str += "," + str(i)
print(miss_str)
return miss_str
def check_params(path, param_hash, prefix, param_prefix="parameters_"):
"""
Check if, for sfcf, the parameter hashes at the end of the parameter files are in fact the expected one.
Parameters
----------
path: str
measurement path, same as for sfcf read method
param_hash: str
expected parameter hash
prefix: str
data prefix to find the appropriate replicum folders in path
param_prefix: str
prefix of the parameter file. Defaults to 'parameters_'
Returns
-------
nums: dict
dictionary of faulty parameter files sorted by the replica paths
"""
ls = []
for (dirpath, dirnames, filenames) in os.walk(path):
ls.extend(dirnames)
break
if not ls:
raise Exception('Error, directory not found')
# Exclude folders with different names
for exc in ls:
if not fnmatch.fnmatch(exc, prefix + '*'):
ls = list(set(ls) - set([exc]))
ls = sort_names(ls)
nums = {}
for rep in ls:
rep_path = path + '/' + rep
# files of replicum
sub_ls = []
for (dirpath, dirnames, filenames) in os.walk(rep_path):
sub_ls.extend(filenames)
# filter
param_files = []
for file in sub_ls:
if fnmatch.fnmatch(file, param_prefix + '*'):
param_files.append(file)
rep_nums = ''
for file in param_files:
with open(rep_path + '/' + file) as fp:
for line in fp:
pass
last_line = line
if last_line.split()[2] != param_hash:
rep_nums += file.split("_")[1] + ','
nums[rep_path] = rep_nums
if not len(rep_nums) == 0:
raise Warning("found differing parameter hash in the param files in " + rep_path)
return nums

View file

@ -271,12 +271,6 @@ def eig(obs, **kwargs):
return w
def eigv(obs, **kwargs):
"""Computes the eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh."""
v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs)
return v
def pinv(obs, **kwargs):
"""Computes the Moore-Penrose pseudoinverse of a matrix of Obs."""
return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)

View file

@ -20,7 +20,7 @@ def print_config():
"pandas": pd.__version__}
for key, value in config.items():
print(f"{key: <10}\t {value}")
print(f"{key : <10}\t {value}")
def errorbar(x, y, axes=plt, **kwargs):

View file

@ -82,8 +82,6 @@ class Obs:
raise ValueError('Names are not unique.')
if not all(isinstance(x, str) for x in names):
raise TypeError('All names have to be strings.')
if len(set([o.split('|')[0] for o in names])) > 1:
raise ValueError('Cannot initialize Obs based on multiple ensembles. Please average separate Obs from each ensemble.')
else:
if not isinstance(names[0], str):
raise TypeError('All names have to be strings.')
@ -106,9 +104,7 @@ class Obs:
elif isinstance(idx, (list, np.ndarray)):
dc = np.unique(np.diff(idx))
if np.any(dc < 0):
raise ValueError("Unsorted idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]])))
elif np.any(dc == 0):
raise ValueError("Duplicate entries in idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]])))
raise ValueError("Unsorted idx for idl[%s]" % (name))
if len(dc) == 1:
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
else:
@ -224,7 +220,7 @@ class Obs:
tmp = kwargs.get(kwarg_name)
if isinstance(tmp, (int, float)):
if tmp < 0:
raise ValueError(kwarg_name + ' has to be larger or equal to 0.')
raise Exception(kwarg_name + ' has to be larger or equal to 0.')
for e, e_name in enumerate(self.e_names):
getattr(self, kwarg_name)[e_name] = tmp
else:
@ -293,7 +289,7 @@ class Obs:
texp = self.tau_exp[e_name]
# Critical slowing down analysis
if w_max // 2 <= 1:
raise ValueError("Need at least 8 samples for tau_exp error analysis")
raise Exception("Need at least 8 samples for tau_exp error analysis")
for n in range(1, w_max // 2):
_compute_drho(n + 1)
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
@ -622,7 +618,7 @@ class Obs:
if not hasattr(self, 'e_dvalue'):
raise Exception('Run the gamma method first.')
if np.isclose(0.0, self._dvalue, atol=1e-15):
raise ValueError('Error is 0.0')
raise Exception('Error is 0.0')
labels = self.e_names
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
fig1, ax1 = plt.subplots()
@ -661,7 +657,7 @@ class Obs:
with open(file_name + '.p', 'wb') as fb:
pickle.dump(self, fb)
else:
raise TypeError("Unknown datatype " + str(datatype))
raise Exception("Unknown datatype " + str(datatype))
def export_jackknife(self):
"""Export jackknife samples from the Obs
@ -678,7 +674,7 @@ class Obs:
"""
if len(self.names) != 1:
raise ValueError("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
name = self.names[0]
full_data = self.deltas[name] + self.r_values[name]
@ -713,7 +709,7 @@ class Obs:
should agree with samples from a full bootstrap analysis up to O(1/N).
"""
if len(self.names) != 1:
raise ValueError("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
name = self.names[0]
length = self.N
@ -788,8 +784,6 @@ class Obs:
else:
if isinstance(y, np.ndarray):
return np.array([self + o for o in y])
elif isinstance(y, complex):
return CObs(self, 0) + y
elif y.__class__.__name__ in ['Corr', 'CObs']:
return NotImplemented
else:
@ -858,12 +852,15 @@ class Obs:
def __pow__(self, y):
if isinstance(y, Obs):
return derived_observable(lambda x, **kwargs: x[0] ** x[1], [self, y], man_grad=[y.value * self.value ** (y.value - 1), self.value ** y.value * np.log(self.value)])
return derived_observable(lambda x: x[0] ** x[1], [self, y])
else:
return derived_observable(lambda x, **kwargs: x[0] ** y, [self], man_grad=[y * self.value ** (y - 1)])
return derived_observable(lambda x: x[0] ** y, [self])
def __rpow__(self, y):
return derived_observable(lambda x, **kwargs: y ** x[0], [self], man_grad=[y ** self.value * np.log(y)])
if isinstance(y, Obs):
return derived_observable(lambda x: x[0] ** x[1], [y, self])
else:
return derived_observable(lambda x: y ** x[0], [self])
def __abs__(self):
return derived_observable(lambda x: anp.abs(x[0]), [self])
@ -1137,7 +1134,7 @@ def _intersection_idx(idl):
return idinter
def _expand_deltas_for_merge(deltas, idx, shape, new_idx, scalefactor):
def _expand_deltas_for_merge(deltas, idx, shape, new_idx):
"""Expand deltas defined on idx to the list of configs that is defined by new_idx.
New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest
common divisor of the step sizes is used as new step size.
@ -1153,20 +1150,15 @@ def _expand_deltas_for_merge(deltas, idx, shape, new_idx, scalefactor):
Number of configs in idx.
new_idx : list
List of configs that defines the new range, has to be sorted in ascending order.
scalefactor : float
An additional scaling factor that can be applied to scale the fluctuations,
e.g., when Obs with differing numbers of replica are merged.
"""
if type(idx) is range and type(new_idx) is range:
if idx == new_idx:
if scalefactor == 1:
return deltas
else:
return deltas * scalefactor
return deltas
ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
for i in range(shape):
ret[idx[i] - new_idx[0]] = deltas[i]
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) * scalefactor
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx)
def derived_observable(func, data, array_mode=False, **kwargs):
@ -1247,29 +1239,10 @@ def derived_observable(func, data, array_mode=False, **kwargs):
new_r_values[name] = func(tmp_values, **kwargs)
new_idl_d[name] = _merge_idx(idl)
def _compute_scalefactor_missing_rep(obs):
"""
Computes the scale factor that is to be multiplied with the deltas
in the case where Obs with different subsets of replica are merged.
Returns a dictionary with the scale factor for each Monte Carlo name.
Parameters
----------
obs : Obs
The observable corresponding to the deltas that are to be scaled
"""
scalef_d = {}
for mc_name in obs.mc_names:
mc_idl_d = [name for name in obs.idl if name.startswith(mc_name + '|')]
new_mc_idl_d = [name for name in new_idl_d if name.startswith(mc_name + '|')]
if len(mc_idl_d) > 0 and len(mc_idl_d) < len(new_mc_idl_d):
scalef_d[mc_name] = sum([len(new_idl_d[name]) for name in new_mc_idl_d]) / sum([len(new_idl_d[name]) for name in mc_idl_d])
return scalef_d
if 'man_grad' in kwargs:
deriv = np.asarray(kwargs.get('man_grad'))
if new_values.shape + data.shape != deriv.shape:
raise ValueError('Manual derivative does not have correct shape.')
raise Exception('Manual derivative does not have correct shape.')
elif kwargs.get('num_grad') is True:
if multi > 0:
raise Exception('Multi mode currently not supported for numerical derivative')
@ -1303,7 +1276,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
d_extracted[name] = []
ens_length = len(new_idl_d[name])
for i_dat, dat in enumerate(data):
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name], _compute_scalefactor_missing_rep(o).get(name.split('|')[0], 1)) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
for name in new_cov_names:
g_extracted[name] = []
zero_grad = _Zero_grad(new_covobs_lengths[name])
@ -1325,17 +1298,16 @@ def derived_observable(func, data, array_mode=False, **kwargs):
new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
else:
for j_obs, obs in np.ndenumerate(data):
scalef_d = _compute_scalefactor_missing_rep(obs)
for name in obs.names:
if name in obs.cov_names:
new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
else:
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name], scalef_d.get(name.split('|')[0], 1))
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
raise ValueError('The same name has been used for deltas and covobs!')
raise Exception('The same name has been used for deltas and covobs!')
new_samples = []
new_means = []
new_idl = []
@ -1376,7 +1348,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
Has to be a subset of idx_old.
"""
if not len(deltas) == len(idx_old):
raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
if type(idx_old) is range and type(idx_new) is range:
if idx_old == idx_new:
return deltas
@ -1384,7 +1356,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
return deltas
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
if len(indices) < len(idx_new):
raise ValueError('Error in _reduce_deltas: Config of idx_new not in idx_old')
raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old')
return np.array(deltas)[indices]
@ -1406,14 +1378,12 @@ def reweight(weight, obs, **kwargs):
result = []
for i in range(len(obs)):
if len(obs[i].cov_names):
raise ValueError('Error: Not possible to reweight an Obs that contains covobs!')
raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
if not set(obs[i].names).issubset(weight.names):
raise ValueError('Error: Ensembles do not fit')
if len(obs[i].mc_names) > 1 or len(weight.mc_names) > 1:
raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.')
raise Exception('Error: Ensembles do not fit')
for name in obs[i].names:
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
new_samples = []
w_deltas = {}
for name in sorted(obs[i].names):
@ -1446,21 +1416,18 @@ def correlate(obs_a, obs_b):
-----
Keep in mind to only correlate primary observables which have not been reweighted
yet. The reweighting has to be applied after correlating the observables.
Only works if a single ensemble is present in the Obs.
Currently only works if ensemble content is identical (this is not strictly necessary).
Currently only works if ensembles are identical (this is not strictly necessary).
"""
if len(obs_a.mc_names) > 1 or len(obs_b.mc_names) > 1:
raise ValueError('Error: Cannot correlate Obs that contain multiple ensembles.')
if sorted(obs_a.names) != sorted(obs_b.names):
raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
if len(obs_a.cov_names) or len(obs_b.cov_names):
raise ValueError('Error: Not possible to correlate Obs that contain covobs!')
raise Exception('Error: Not possible to correlate Obs that contain covobs!')
for name in obs_a.names:
if obs_a.shape[name] != obs_b.shape[name]:
raise ValueError('Shapes of ensemble', name, 'do not fit')
raise Exception('Shapes of ensemble', name, 'do not fit')
if obs_a.idl[name] != obs_b.idl[name]:
raise ValueError('idl of ensemble', name, 'do not fit')
raise Exception('idl of ensemble', name, 'do not fit')
if obs_a.reweighted is True:
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
@ -1548,92 +1515,6 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
return cov
def invert_corr_cov_cholesky(corr, inverrdiag):
"""Constructs a lower triangular matrix `chol` via the Cholesky decomposition of the correlation matrix `corr`
and then returns the inverse covariance matrix `chol_inv` as a lower triangular matrix by solving `chol * x = inverrdiag`.
Parameters
----------
corr : np.ndarray
correlation matrix
inverrdiag : np.ndarray
diagonal matrix, the entries are the inverse errors of the data points considered
"""
condn = np.linalg.cond(corr)
if condn > 0.1 / np.finfo(float).eps:
raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
if condn > 1e13:
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
chol = np.linalg.cholesky(corr)
chol_inv = scipy.linalg.solve_triangular(chol, inverrdiag, lower=True)
return chol_inv
def sort_corr(corr, kl, yd):
""" Reorders a correlation matrix to match the alphabetical order of its underlying y data.
The ordering of the input correlation matrix `corr` is given by the list of keys `kl`.
The input dictionary `yd` (with the same keys `kl`) must contain the corresponding y data
that the correlation matrix is based on.
This function sorts the list of keys `kl` alphabetically and sorts the matrix `corr`
according to this alphabetical order such that the sorted matrix `corr_sorted` corresponds
to the y data `yd` when arranged in an alphabetical order by its keys.
Parameters
----------
corr : np.ndarray
A square correlation matrix constructed using the order of the y data specified by `kl`.
The dimensions of `corr` should match the total number of y data points in `yd` combined.
kl : list of str
A list of keys that denotes the order in which the y data from `yd` was used to build the
input correlation matrix `corr`.
yd : dict of list
A dictionary where each key corresponds to a unique identifier, and its value is a list of
y data points. The total number of y data points across all keys must match the dimensions
of `corr`. The lists in the dictionary can be lists of Obs.
Returns
-------
np.ndarray
A new, sorted correlation matrix that corresponds to the y data from `yd` when arranged alphabetically by its keys.
Example
-------
>>> import numpy as np
>>> import pyerrors as pe
>>> corr = np.array([[1, 0.2, 0.3], [0.2, 1, 0.4], [0.3, 0.4, 1]])
>>> kl = ['b', 'a']
>>> yd = {'a': [1, 2], 'b': [3]}
>>> sorted_corr = pe.obs.sort_corr(corr, kl, yd)
>>> print(sorted_corr)
array([[1. , 0.3, 0.4],
[0.3, 1. , 0.2],
[0.4, 0.2, 1. ]])
"""
kl_sorted = sorted(kl)
posd = {}
ofs = 0
for ki, k in enumerate(kl):
posd[k] = [i + ofs for i in range(len(yd[k]))]
ofs += len(posd[k])
mapping = []
for k in kl_sorted:
for i in range(len(yd[k])):
mapping.append(posd[k][i])
corr_sorted = np.zeros_like(corr)
for i in range(corr.shape[0]):
for j in range(corr.shape[0]):
corr_sorted[i][j] = corr[mapping[i]][mapping[j]]
return corr_sorted
def _smooth_eigenvalues(corr, E):
"""Eigenvalue smoothing as described in hep-lat/9412087
@ -1643,7 +1524,7 @@ def _smooth_eigenvalues(corr, E):
Number of eigenvalues to be left substantially unchanged
"""
if not (2 < E < corr.shape[0] - 1):
raise ValueError(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
vals, vec = np.linalg.eigh(corr)
lambda_min = np.mean(vals[:-E])
vals[vals < lambda_min] = lambda_min
@ -1762,11 +1643,7 @@ def import_bootstrap(boots, name, random_numbers):
def merge_obs(list_of_obs):
"""Combine all observables in list_of_obs into one new observable.
This allows to merge Obs that have been computed on multiple replica
of the same ensemble.
If you like to merge Obs that are based on several ensembles, please
average them yourself.
"""Combine all observables in list_of_obs into one new observable
Parameters
----------
@ -1779,9 +1656,9 @@ def merge_obs(list_of_obs):
"""
replist = [item for obs in list_of_obs for item in obs.names]
if (len(replist) == len(set(replist))) is False:
raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist)))
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
if any([len(o.cov_names) for o in list_of_obs]):
raise ValueError('Not possible to merge data that contains covobs!')
raise Exception('Not possible to merge data that contains covobs!')
new_dict = {}
idl_dict = {}
for o in list_of_obs:
@ -1832,7 +1709,7 @@ def cov_Obs(means, cov, name, grad=None):
for i in range(len(means)):
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
if ol[0].covobs[name].N != len(means):
raise ValueError('You have to provide %d mean values!' % (ol[0].N))
raise Exception('You have to provide %d mean values!' % (ol[0].N))
if len(ol) == 1:
return ol[0]
return ol
@ -1848,7 +1725,7 @@ def _determine_gap(o, e_content, e_name):
gap = min(gaps)
if not np.all([gi % gap == 0 for gi in gaps]):
raise ValueError(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
return gap

View file

@ -1,23 +0,0 @@
import scipy
import numpy as np
from autograd.extend import primitive, defvjp
from autograd.scipy.special import j0, y0, j1, y1, jn, yn, i0, i1, iv, ive, beta, betainc, betaln
from autograd.scipy.special import polygamma, psi, digamma, gamma, gammaln, gammainc, gammaincc, gammasgn, rgamma, multigammaln
from autograd.scipy.special import erf, erfc, erfinv, erfcinv, logit, expit, logsumexp
__all__ = ["beta", "betainc", "betaln",
"polygamma", "psi", "digamma", "gamma", "gammaln", "gammainc", "gammaincc", "gammasgn", "rgamma", "multigammaln",
"kn", "j0", "y0", "j1", "y1", "jn", "yn", "i0", "i1", "iv", "ive",
"erf", "erfc", "erfinv", "erfcinv", "logit", "expit", "logsumexp"]
@primitive
def kn(n, x):
"""Modified Bessel function of the second kind of integer order n"""
if int(n) != n:
raise TypeError("The order 'n' needs to be an integer.")
return scipy.special.kn(n, x)
defvjp(kn, None, lambda ans, n, x: lambda g: - g * 0.5 * (kn(np.abs(n - 1), x) + kn(n + 1, x)))

View file

@ -1 +1 @@
__version__ = "2.15.0-dev"
__version__ = "2.9.0"

View file

@ -1,6 +1,3 @@
[build-system]
requires = ["setuptools >= 63.0.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.ruff.lint]
ignore = ["F403"]

View file

@ -24,19 +24,18 @@ setup(name='pyerrors',
author_email='fabian.joswig@ed.ac.uk',
license="MIT",
packages=find_packages(),
python_requires='>=3.9.0',
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2'],
python_requires='>=3.8.0',
install_requires=['numpy>=1.24', 'autograd>=1.6.2', 'numdifftools>=0.9.41', 'matplotlib>=3.7', 'scipy>=1.10', 'iminuit>=2.21', 'h5py>=3.8', 'lxml>=4.9', 'python-rapidjson>=1.10', 'pandas>=2.0'],
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.13',
'Topic :: Scientific/Engineering :: Physics'
],
)

View file

@ -129,7 +129,7 @@ def test_m_eff():
with pytest.warns(RuntimeWarning):
my_corr.m_eff('sinh')
with pytest.raises(ValueError):
with pytest.raises(Exception):
my_corr.m_eff('unkown_variant')
@ -140,7 +140,7 @@ def test_m_eff_negative_values():
assert m_eff_log[padding + 1] is None
m_eff_cosh = my_corr.m_eff('cosh')
assert m_eff_cosh[padding + 1] is None
with pytest.raises(ValueError):
with pytest.raises(Exception):
my_corr.m_eff('logsym')
@ -155,7 +155,7 @@ def test_correlate():
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
corr1 = my_corr.correlate(my_corr)
corr2 = my_corr.correlate(my_corr[0])
with pytest.raises(TypeError):
with pytest.raises(Exception):
corr3 = my_corr.correlate(7.3)
@ -176,9 +176,9 @@ def test_fit_correlator():
assert fit_res[0] == my_corr[0]
assert fit_res[1] == my_corr[1] - my_corr[0]
with pytest.raises(TypeError):
with pytest.raises(Exception):
my_corr.fit(f, "from 0 to 3")
with pytest.raises(ValueError):
with pytest.raises(Exception):
my_corr.fit(f, [0, 2, 3])
@ -200,17 +200,17 @@ def test_padded_correlator():
def test_corr_exceptions():
obs_a = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'])
obs_b = pe.Obs([np.random.normal(0.1, 0.1, 99)], ['test'])
obs_b= pe.Obs([np.random.normal(0.1, 0.1, 99)], ['test'])
with pytest.raises(Exception):
pe.Corr([obs_a, obs_b])
obs_a = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'])
obs_b = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'], idl=[range(1, 200, 2)])
obs_b= pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'], idl=[range(1, 200, 2)])
with pytest.raises(Exception):
pe.Corr([obs_a, obs_b])
obs_a = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'])
obs_b = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test2'])
obs_b= pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test2'])
with pytest.raises(Exception):
pe.Corr([obs_a, obs_b])
@ -256,11 +256,11 @@ def test_prange():
corr = pe.correlators.Corr(corr_content)
corr.set_prange([2, 4])
with pytest.raises(ValueError):
with pytest.raises(Exception):
corr.set_prange([2])
with pytest.raises(TypeError):
with pytest.raises(Exception):
corr.set_prange([2, 2.3])
with pytest.raises(ValueError):
with pytest.raises(Exception):
corr.set_prange([4, 1])
@ -436,7 +436,6 @@ def test_GEVP_solver():
sp_vecs = [v / np.sqrt((v.T @ mat2 @ v)) for v in sp_vecs]
assert np.allclose(sp_vecs, pe.correlators._GEVP_solver(mat1, mat2), atol=1e-14)
assert np.allclose(np.abs(sp_vecs), np.abs(pe.correlators._GEVP_solver(mat1, mat2, method='cholesky')))
def test_GEVP_none_entries():
@ -553,7 +552,7 @@ def test_corr_no_filtering():
li = [-pe.pseudo_Obs(.2, .1, 'a', samples=10) for i in range(96)]
for i in range(len(li)):
li[i].idl['a'] = range(1, 21, 2)
c = pe.Corr(li)
c= pe.Corr(li)
b = pe.pseudo_Obs(1, 1e-11, 'a', samples=30)
c *= b
assert np.all([c[0].idl == o.idl for o in c])
@ -573,28 +572,6 @@ def test_corr_symmetric():
assert scorr[0] == corr[0]
def test_error_GEVP():
corr = pe.input.json.load_json("tests/data/test_matrix_corr.json.gz")
t0, ts, state = 3, 6, 0
vec_regular = corr.GEVP(t0=t0)[state][ts]
vec_errors = corr.GEVP(t0=t0, vector_obs=True, auto_gamma=True)[state][ts]
vec_regular_chol = corr.GEVP(t0=t0, method='cholesky')[state][ts]
print(vec_errors)
print(type(vec_errors[0]))
assert(np.isclose(np.asarray([e.value for e in vec_errors]), vec_regular).all())
assert(all([e.dvalue > 0. for e in vec_errors]))
projected_regular = corr.projected(vec_regular).content[ts + 1][0]
projected_errors = corr.projected(vec_errors).content[ts + 1][0]
projected_regular.gamma_method()
projected_errors.gamma_method()
assert(projected_errors.dvalue > projected_regular.dvalue)
assert(corr.GEVP(t0, vector_obs=True)[state][42] is None)
assert(np.isclose(vec_regular_chol, vec_regular).all())
assert(np.isclose(corr.GEVP(t0=t0, state=state)[ts], vec_regular).all())
def test_corr_array_ndim1_init():
y = [pe.pseudo_Obs(2 + np.random.normal(0.0, 0.1), .1, 't') for i in np.arange(5)]
cc1 = pe.Corr(y)
@ -711,6 +688,7 @@ def test_matrix_trace():
for el in corr.trace():
assert el == 0
with pytest.raises(ValueError):
corr.item(0, 0).trace()
@ -771,13 +749,3 @@ def test_corr_item():
corr_mat = pe.Corr(np.array([[corr_aa, corr_ab], [corr_ab, corr_aa]]))
corr_mat.item(0, 0)
assert corr_mat[0].item(0, 1) == corr_mat.item(0, 1)[0]
def test_complex_add_and_mul():
o = pe.pseudo_Obs(1.0, 0.3, "my_r345sfg16£$%&$%^%$^$", samples=47)
co = pe.CObs(o, 0.341 * o)
for obs in [o, co]:
cc = pe.Corr([obs for _ in range(4)])
cc += 2j
cc = cc * 4j
cc.real + cc.imag

Binary file not shown.

View file

@ -30,7 +30,7 @@ def test_grid_dirac():
'SigmaYZ',
'SigmaZT']:
pe.dirac.Grid_gamma(gamma)
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.dirac.Grid_gamma('Not a gamma matrix')
@ -44,7 +44,7 @@ def test_epsilon_tensor():
(1, 1, 3) : 0.0}
for key, value in check.items():
assert pe.dirac.epsilon_tensor(*key) == value
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.dirac.epsilon_tensor(0, 1, 3)
@ -59,5 +59,5 @@ def test_epsilon_tensor_rank4():
(1, 2, 3, 1) : 0.0}
for key, value in check.items():
assert pe.dirac.epsilon_tensor_rank4(*key) == value
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)

View file

@ -152,127 +152,6 @@ def test_alternative_solvers():
chisquare_values = np.array(chisquare_values)
assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
def test_inv_cov_matrix_input_least_squares():
num_samples = 400
N = 10
x = norm.rvs(size=(N, num_samples)) # generate random numbers
r = np.zeros((N, N))
for i in range(N):
for j in range(N):
r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
errl = np.sqrt([3.4, 2.5, 3.6, 2.8, 4.2, 4.7, 4.9, 5.1, 3.2, 4.2]) # set y errors
for i in range(N):
for j in range(N):
r[i, j] *= errl[i] * errl[j] # element in covariance matrix
c = cholesky(r, lower=True)
y = np.dot(c, x)
x = np.arange(N)
x_dict = {}
y_dict = {}
for i,item in enumerate(x):
x_dict[str(item)] = [x[i]]
for linear in [True, False]:
data = []
for i in range(N):
if linear:
data.append(pe.Obs([[i + 1 + o for o in y[i]]], ['ens']))
else:
data.append(pe.Obs([[np.exp(-(i + 1)) + np.exp(-(i + 1)) * o for o in y[i]]], ['ens']))
[o.gamma_method() for o in data]
data_dict = {}
for i,item in enumerate(x):
data_dict[str(item)] = [data[i]]
corr = pe.covariance(data, correlation=True)
chol = np.linalg.cholesky(corr)
covdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
chol_inv_keys = [""]
chol_inv_keys_combined_fit = [str(item) for i,item in enumerate(x)]
if linear:
def fitf(p, x):
return p[1] + p[0] * x
fitf_dict = {}
for i,item in enumerate(x):
fitf_dict[str(item)] = fitf
else:
def fitf(p, x):
return p[1] * anp.exp(-p[0] * x)
fitf_dict = {}
for i,item in enumerate(x):
fitf_dict[str(item)] = fitf
fitpc = pe.least_squares(x, data, fitf, correlated_fit=True)
fitp_inv_cov = pe.least_squares(x, data, fitf, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys])
fitp_inv_cov_combined_fit = pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys_combined_fit])
for i in range(2):
diff_inv_cov = fitp_inv_cov[i] - fitpc[i]
diff_inv_cov.gamma_method()
assert(diff_inv_cov.is_zero(atol=0.0))
diff_inv_cov_combined_fit = fitp_inv_cov_combined_fit[i] - fitpc[i]
diff_inv_cov_combined_fit.gamma_method()
assert(diff_inv_cov_combined_fit.is_zero(atol=1e-12))
with pytest.raises(ValueError):
pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [corr,chol_inv_keys_combined_fit])
def test_least_squares_invalid_inv_cov_matrix_input():
xvals = []
yvals = []
err = 0.1
def func_valid(a,x):
return a[0] + a[1] * x
for x in range(1, 8, 2):
xvals.append(x)
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
[o.gamma_method() for o in yvals]
#dictionaries for a combined fit
xvals_dict = { }
yvals_dict = { }
for i,item in enumerate(np.arange(1, 8, 2)):
xvals_dict[str(item)] = [xvals[i]]
yvals_dict[str(item)] = [yvals[i]]
chol_inv_keys_combined_fit = ['1', '3', '5', '7']
chol_inv_keys_combined_fit_invalid = ['2', '7', '100', '8']
func_dict_valid = {"1": func_valid,"3": func_valid,"5": func_valid,"7": func_valid}
corr_valid = pe.covariance(yvals, correlation = True)
chol = np.linalg.cholesky(corr_valid)
covdiag = np.diag(1 / np.asarray([o.dvalue for o in yvals]))
chol_inv_valid = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
chol_inv_keys = [""]
pe.least_squares(xvals, yvals,func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys])
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit])
chol_inv_invalid_shape1 = np.zeros((len(yvals),len(yvals)-1))
chol_inv_invalid_shape2 = np.zeros((len(yvals)+2,len(yvals)))
# for an uncombined fit
with pytest.raises(TypeError):
pe.least_squares(xvals, yvals, func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys])
with pytest.raises(TypeError):
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys])
with pytest.raises(ValueError):
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
#repeat for a combined fit
with pytest.raises(TypeError):
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys_combined_fit])
with pytest.raises(TypeError):
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys_combined_fit])
with pytest.raises(ValueError):
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
def test_correlated_fit():
num_samples = 400
@ -1085,20 +964,6 @@ def test_combined_resplot_qqplot():
fr = pe.least_squares(xd, yd, fd, resplot=True, qqplot=True)
plt.close('all')
def test_combined_fit_xerr():
fitd = {
'a' : lambda p, x: p[0] * x[0] + p[1] * x[1],
'b' : lambda p, x: p[0] * x[0] + p[2] * x[1],
'c' : lambda p, x: p[0] * x[0] + p[3] * x[1],
}
yd = {
'a': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'a' + str(i)) for i in range(5)],
'b': [pe.cov_Obs(1 + .1 * np.random.uniform(), .1**2, 'b' + str(i)) for i in range(6)],
'c': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'c' + str(i)) for i in range(3)],
}
xd = {k: np.transpose([[1 + .01 * np.random.uniform(), 2] for i in range(len(yd[k]))]) for k in fitd}
pe.fits.least_squares(xd, yd, fitd)
def test_x_multidim_fit():
x1 = np.arange(1, 10)
@ -1277,53 +1142,6 @@ def test_fit_dof():
assert cd[0] != cd[0] # Check for nan
assert np.all(np.array(cd[1:]) > 0)
N = 5
def fitf(a, x):
return a[0] + 0 * x
def fitf_multi(a, x):
return a[0] + 0 * x[0] + 0*x[1]
for priors in [None, [pe.cov_Obs(3, 1, 'p')]]:
if priors is None:
lp = 0
else:
lp = len(priors)
x = [1. for i in range(N)]
y = [pe.cov_Obs(i, .1, '%d' % (i)) for i in range(N)]
[o.gm() for o in y]
res = pe.fits.least_squares(x, y, fitf, expected_chisquare=True, priors=priors)
assert(res.dof == N - 1 + lp)
if priors is None:
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
kl = ['a', 'b']
x = {k: [1. for i in range(N)] for k in kl}
y = {k: [pe.cov_Obs(i, .1, '%d%s' % (i, k)) for i in range(N)] for k in kl}
[[o.gm() for o in y[k]] for k in y]
res = pe.fits.least_squares(x, y, {k: fitf for k in kl}, expected_chisquare=True, priors=priors)
assert(res.dof == 2 * N - 1 + lp)
if priors is None:
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
x = np.array([[1., 2.] for i in range(N)]).T
y = [pe.cov_Obs(i, .1, '%d' % (i)) for i in range(N)]
[o.gm() for o in y]
res = pe.fits.least_squares(x, y, fitf_multi, expected_chisquare=True, priors=priors)
assert(res.dof == N - 1 + lp)
if priors is None:
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
x = {k: np.array([[1., 2.] for i in range(N)]).T for k in kl}
y = {k: [pe.cov_Obs(i, .1, '%d%s' % (i, k)) for i in range(N)] for k in kl}
[[o.gm() for o in y[k]] for k in y]
res = pe.fits.least_squares(x, y, {k: fitf_multi for k in kl}, expected_chisquare=True, priors=priors)
assert(res.dof == 2 * N - 1 + lp)
if priors is None:
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
def test_combined_fit_constant_shape():
N1 = 16

View file

@ -12,7 +12,7 @@ def test_jsonio():
o = pe.pseudo_Obs(1.0, .2, 'one')
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
o4 = pe.merge_obs([o2, o3])
otag = 'This has been merged!'
o4.tag = otag
do = o - .2 * o4
@ -101,8 +101,8 @@ def test_json_string_reconstruction():
def test_json_corr_io():
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]) for o in range(8)]
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]), my_list)
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']) for o in range(8)]
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']), my_list)
for obs_list in [my_list, rw_list]:
for tag in [None, "test"]:
@ -111,51 +111,40 @@ def test_json_corr_io():
for corr_tag in [None, 'my_Corr_tag']:
for prange in [None, [3, 6]]:
for gap in [False, True]:
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
my_corr = mult * pe.Corr(obs_list, padding=[pad, pad], prange=prange)
my_corr.tag = corr_tag
if gap:
my_corr.content[4] = None
pe.input.json.dump_to_json(my_corr, 'corr')
recover = pe.input.json.load_json('corr')
os.remove('corr.json.gz')
assert np.all([o.is_zero() for o in [x for x in (my_corr - recover) if x is not None]])
for index, entry in enumerate(my_corr):
if entry is None:
assert recover[index] is None
assert my_corr.tag == recover.tag
assert my_corr.prange == recover.prange
assert my_corr.reweighted == recover.reweighted
my_corr = pe.Corr(obs_list, padding=[pad, pad], prange=prange)
my_corr.tag = corr_tag
if gap:
my_corr.content[4] = None
pe.input.json.dump_to_json(my_corr, 'corr')
recover = pe.input.json.load_json('corr')
os.remove('corr.json.gz')
assert np.all([o.is_zero() for o in [x for x in (my_corr - recover) if x is not None]])
for index, entry in enumerate(my_corr):
if entry is None:
assert recover[index] is None
assert my_corr.tag == recover.tag
assert my_corr.prange == recover.prange
assert my_corr.reweighted == recover.reweighted
def test_json_corr_2d_io():
obs_list = [np.array([
[
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321)]),
],
[
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321),]),
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
],
]) for i in range(4)]
obs_list = [np.array([[pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test')], [pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test')]]) for i in range(4)]
for tag in [None, "test"]:
obs_list[3][0, 1].tag = tag
for padding in [0, 1]:
for prange in [None, [3, 6]]:
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
my_corr = mult * pe.Corr(obs_list, padding=[padding, padding], prange=prange)
my_corr.tag = tag
pe.input.json.dump_to_json(my_corr, 'corr')
recover = pe.input.json.load_json('corr')
os.remove('corr.json.gz')
assert np.all([np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None]])
for index, entry in enumerate(my_corr):
if entry is None:
assert recover[index] is None
assert my_corr.tag == recover.tag
assert my_corr.prange == recover.prange
my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange)
my_corr.tag = tag
pe.input.json.dump_to_json(my_corr, 'corr')
recover = pe.input.json.load_json('corr')
os.remove('corr.json.gz')
assert np.all([np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None]])
for index, entry in enumerate(my_corr):
if entry is None:
assert recover[index] is None
assert my_corr.tag == recover.tag
assert my_corr.prange == recover.prange
def test_json_dict_io():
@ -222,7 +211,6 @@ def test_json_dict_io():
'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'),
'se': None,
'sf': 1.2,
'k': pe.cov_Obs(.1, .001**2, 'cov') * pe.merge_obs([pe.pseudo_Obs(1.0, 0.1, 'test|r2'), pe.pseudo_Obs(1.0, 0.1, 'test|r1', samples=321)]),
}
}
@ -326,7 +314,7 @@ def test_dobsio():
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
o4 = pe.merge_obs([o2, o3])
otag = 'This has been merged!'
o4.tag = otag
do = o - .2 * o4
@ -340,7 +328,7 @@ def test_dobsio():
o5 /= co2[0]
o5.tag = 2 * otag
tt1 = pe.Obs([np.random.rand(100), np.random.rand(102)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 226, 2)])
tt1 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 222, 2)])
tt3 = pe.Obs([np.random.rand(102)], ['qe|r1'])
tt = tt1 + tt3
@ -349,7 +337,7 @@ def test_dobsio():
tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)])
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt), o4.reweight(o4)]
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt)]
print(ol)
fname = 'test_rw'
@ -374,12 +362,9 @@ def test_dobsio():
def test_reconstruct_non_linear_r_obs(tmp_path):
to = (
pe.Obs([np.random.rand(500), np.random.rand(1200)],
["e|r1", "e|r2", ],
idl=[range(1, 501), range(0, 1200)])
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
)
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
to = np.log(to ** 2) / to
to.dump((tmp_path / "test_equality").as_posix())
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
@ -387,12 +372,9 @@ def test_reconstruct_non_linear_r_obs(tmp_path):
def test_reconstruct_non_linear_r_obs_list(tmp_path):
to = (
pe.Obs([np.random.rand(500), np.random.rand(1200)],
["e|r1", "e|r2", ],
idl=[range(1, 501), range(0, 1200)])
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
)
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
to = np.log(to ** 2) / to
for to_list in [[to, to, to], np.array([to, to, to])]:
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())

View file

@ -34,7 +34,7 @@ def test_matmul():
my_list = []
length = 100 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
my_array = const * np.array(my_list).reshape((dim, dim))
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -43,8 +43,8 @@ def test_matmul():
my_list = []
length = 100 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
my_array = np.array(my_list).reshape((dim, dim)) * const
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -151,7 +151,7 @@ def test_multi_dot():
my_list = []
length = 1000 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim))
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -160,8 +160,8 @@ def test_multi_dot():
my_list = []
length = 1000 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov')
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -209,7 +209,7 @@ def test_irregular_matrix_inverse():
for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]:
irregular_array = []
for i in range(dim ** 2):
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl]) + pe.Obs([np.random.normal(0.25, 0.1, 10)], ['ens2'], idl=[range(1, 11)]))
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)]))
irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23')
invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T
@ -276,10 +276,10 @@ def test_matrix_functions():
for (i, j), entry in np.ndenumerate(check_inv):
entry.gamma_method()
if(i == j):
assert math.isclose(entry.value, 1.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
assert math.isclose(entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
else:
assert math.isclose(entry.value, 0.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
assert math.isclose(entry.dvalue, 0.0, abs_tol=2e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
assert math.isclose(entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
assert math.isclose(entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
# Check Cholesky decomposition
sym = np.dot(matrix, matrix.T)
@ -297,10 +297,6 @@ def test_matrix_functions():
for j in range(dim):
assert tmp[j].is_zero()
# Check eigv
v2 = pe.linalg.eigv(sym)
assert np.sum(v - v2).is_zero()
# Check eig function
e2 = pe.linalg.eig(sym)
assert np.all(np.sort(e) == np.sort(e2))

View file

@ -5,7 +5,6 @@ import copy
import matplotlib.pyplot as plt
import pyerrors as pe
import pytest
import pyerrors.linalg
from hypothesis import given, strategies as st
np.random.seed(0)
@ -41,10 +40,6 @@ def test_Obs_exceptions():
pe.Obs([np.random.rand(4)], ['name'])
with pytest.raises(ValueError):
pe.Obs([np.random.rand(5)], ['1'], idl=[[5, 3, 2 ,4 ,1]])
with pytest.raises(ValueError):
pe.Obs([np.random.rand(5)], ['1'], idl=[[1, 2, 3, 3, 5]])
with pytest.raises(ValueError):
pe.Obs([np.random.rand(5)], ['1'], idl=[[1, 1, 3, 1, 5]])
with pytest.raises(TypeError):
pe.Obs([np.random.rand(5)], ['1'], idl=['t'])
with pytest.raises(ValueError):
@ -61,9 +56,9 @@ def test_Obs_exceptions():
my_obs.plot_rep_dist()
with pytest.raises(Exception):
my_obs.plot_piechart()
with pytest.raises(TypeError):
with pytest.raises(Exception):
my_obs.gamma_method(S='2.3')
with pytest.raises(ValueError):
with pytest.raises(Exception):
my_obs.gamma_method(tau_exp=2.3)
my_obs.gamma_method()
my_obs.details()
@ -199,7 +194,7 @@ def test_gamma_method_no_windowing():
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
obs.gamma_method(S=1.1)
assert obs.e_tauint['ens'] > 0.5
with pytest.raises(ValueError):
with pytest.raises(Exception):
obs.gamma_method(S=-0.2)
@ -333,7 +328,7 @@ def test_derived_observables():
def test_multi_ens():
names = ['A0', 'A1|r001', 'A1|r002']
test_obs = pe.Obs([np.random.rand(50)], names[:1]) + pe.Obs([np.random.rand(50), np.random.rand(50)], names[1:])
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
assert test_obs.e_names == ['A0', 'A1']
assert test_obs.e_content['A0'] == ['A0']
assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002']
@ -345,9 +340,6 @@ def test_multi_ens():
ensembles.append(str(i))
assert my_sum.e_names == sorted(ensembles)
with pytest.raises(ValueError):
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
def test_multi_ens2():
names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh']
@ -464,18 +456,6 @@ def test_cobs_overloading():
obs / cobs
def test_pow():
data = [1, 2.341, pe.pseudo_Obs(4.8, 0.48, "test_obs"), pe.cov_Obs(1.1, 0.3 ** 2, "test_cov_obs")]
for d in data:
assert d * d == d ** 2
assert d * d * d == d ** 3
for d2 in data:
assert np.log(d ** d2) == d2 * np.log(d)
assert (d ** d2) ** (1 / d2) == d
def test_reweighting():
my_obs = pe.Obs([np.random.rand(1000)], ['t'])
assert not my_obs.reweighted
@ -493,33 +473,26 @@ def test_reweighting():
r_obs2 = r_obs[0] * my_obs
assert r_obs2.reweighted
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.reweight(my_obs, [my_covobs])
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.reweight(my_obs, [my_obs + my_obs2])
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.reweight(my_irregular_obs, [my_obs])
my_merged_obs = my_obs + pe.Obs([np.random.rand(1000)], ['q'])
with pytest.raises(ValueError):
pe.reweight(my_merged_obs, [my_merged_obs])
def test_merge_obs():
my_obs1 = pe.Obs([np.random.normal(1, .1, 100)], ['t|1'])
my_obs2 = pe.Obs([np.random.normal(1, .1, 100)], ['t|2'], idl=[range(1, 200, 2)])
my_obs1 = pe.Obs([np.random.rand(100)], ['t'])
my_obs2 = pe.Obs([np.random.rand(100)], ['q'], idl=[range(1, 200, 2)])
merged = pe.merge_obs([my_obs1, my_obs2])
diff = merged - (my_obs2 + my_obs1) / 2
assert np.isclose(0, diff.value, atol=1e-16)
with pytest.raises(ValueError):
diff = merged - my_obs2 - my_obs1
assert diff == -(my_obs1.value + my_obs2.value) / 2
with pytest.raises(Exception):
pe.merge_obs([my_obs1, my_obs1])
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.merge_obs([my_obs1, my_covobs])
my_obs3 = pe.Obs([np.random.rand(100)], ['q|2'], idl=[range(1, 200, 2)])
with pytest.raises(ValueError):
pe.merge_obs([my_obs1, my_obs3])
@ -541,26 +514,23 @@ def test_correlate():
assert corr1 == corr2
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.correlate(my_obs1, my_obs3)
my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.correlate(my_obs1, my_obs4)
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
corr3 = pe.correlate(my_obs5, my_obs6)
assert my_obs5.idl == corr3.idl
my_obs7 = pe.Obs([np.random.rand(99)], ['q'])
with pytest.raises(ValueError):
pe.correlate(my_obs1, my_obs7)
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.correlate(my_obs1, my_new_obs)
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError):
with pytest.raises(Exception):
pe.correlate(my_covobs, my_covobs)
r_obs = pe.reweight(my_obs1, [my_obs1])[0]
with pytest.warns(RuntimeWarning):
@ -579,11 +549,11 @@ def test_merge_idx():
for j in range(5):
idll = [range(1, int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll))
assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
for j in range(5):
idll = [range(int(round(np.random.uniform(1, 28))), int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll))
assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
idl = [list(np.arange(1, 14)) + list(range(16, 100, 4)), range(4, 604, 4), [2, 4, 5, 6, 8, 9, 12, 24], range(1, 20, 1), range(50, 789, 7)]
new_idx = pe.obs._merge_idx(idl)
@ -694,14 +664,14 @@ def test_gamma_method_irregular():
assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue)
arr2 = np.random.normal(1, .2, size=N)
afull = pe.Obs([arr], ['a1']) + pe.Obs([arr2], ['a2'])
afull = pe.Obs([arr, arr2], ['a1', 'a2'])
configs = np.ones_like(arr2)
for i in np.random.uniform(0, len(arr2), size=int(.8*N)):
configs[int(i)] = 0
zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0]
idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1]
a = pe.Obs([zero_arr], ['a1'], idl=[idx]) + pe.Obs([zero_arr2], ['a2'], idl=[idx2])
a = pe.Obs([zero_arr, zero_arr2], ['a1', 'a2'], idl=[idx, idx2])
afull.gamma_method()
a.gamma_method()
@ -787,7 +757,7 @@ def test_gamma_method_irregular():
my_obs.gm()
idl += [range(1, 400, 4)]
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
with pytest.raises(ValueError):
with pytest.raises(Exception):
my_obs.gm()
# check cases where tau is large compared to the chain length
@ -803,11 +773,11 @@ def test_gamma_method_irregular():
carr = gen_autocorrelated_array(arr, .8)
o = pe.Obs([carr], ['test'])
o.gamma_method()
no = np.nan * o
no = np.NaN * o
no.gamma_method()
o.idl['test'] = range(1, 1998, 2)
o.gamma_method()
no = np.nan * o
no = np.NaN * o
no.gamma_method()
@ -865,7 +835,7 @@ def test_covariance_vs_numpy():
data1 = np.random.normal(2.5, 0.2, N)
data2 = np.random.normal(0.5, 0.08, N)
data3 = np.random.normal(-178, 5, N)
uncorr = np.vstack([data1, data2, data3])
uncorr = np.row_stack([data1, data2, data3])
corr = np.random.multivariate_normal([0.0, 17, -0.0487], [[1.0, 0.6, -0.22], [0.6, 0.8, 0.01], [-0.22, 0.01, 1.9]], N).T
for X in [uncorr, corr]:
@ -1035,7 +1005,7 @@ def test_correlation_intersection_of_idls():
def test_covariance_non_identical_objects():
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000)], ["ens|r1", "ens|r2"]) + pe.Obs([np.random.normal(1.0, 0.1, 732)], ['ens2'])
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"])
obs1.gamma_method()
obs2 = obs1 + 1e-18
obs2.gamma_method()
@ -1088,27 +1058,6 @@ def test_covariance_reorder_non_overlapping_data():
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
def test_sort_corr():
xd = {
'b': [1, 2, 3],
'a': [2.2, 4.4],
'c': [3.7, 5.1]
}
yd = {k : pe.cov_Obs(xd[k], [.2 * o for o in xd[k]], k) for k in xd}
key_orig = list(yd.keys())
y_all = np.concatenate([np.array(yd[key]) for key in key_orig])
[o.gm() for o in y_all]
cov = pe.covariance(y_all)
key_ls = key_sorted = sorted(key_orig)
y_sorted = np.concatenate([np.array(yd[key]) for key in key_sorted])
[o.gm() for o in y_sorted]
cov_sorted = pe.covariance(y_sorted)
retcov = pe.obs.sort_corr(cov, key_orig, yd)
assert np.sum(retcov - cov_sorted) == 0
def test_empty_obs():
o = pe.Obs([np.random.rand(100)], ['test'])
q = o + pe.Obs([], [], means=[])
@ -1119,9 +1068,6 @@ def test_reweight_method():
obs1 = pe.pseudo_Obs(0.2, 0.01, 'test')
rw = pe.pseudo_Obs(0.999, 0.001, 'test')
assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0]
rw2 = pe.pseudo_Obs(0.999, 0.001, 'test2')
with pytest.raises(ValueError):
obs1.reweight(rw2)
def test_jackknife():
@ -1138,7 +1084,7 @@ def test_jackknife():
assert np.allclose(tmp_jacks, my_obs.export_jackknife())
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
with pytest.raises(ValueError):
with pytest.raises(Exception):
my_new_obs.export_jackknife()
@ -1326,7 +1272,7 @@ def test_nan_obs():
no.gamma_method()
o.idl['test'] = [1, 5] + list(range(7, 2002, 2))
no = np.nan * o
no = np.NaN * o
no.gamma_method()
@ -1335,9 +1281,9 @@ def test_format_uncertainty():
assert pe.obs._format_uncertainty(0.548, 2.48497, 2) == '0.5(2.5)'
assert pe.obs._format_uncertainty(0.548, 2.48497, 4) == '0.548(2.485)'
assert pe.obs._format_uncertainty(0.548, 20078.3, 9) == '0.5480(20078.3000)'
pe.obs._format_uncertainty(np.nan, 1)
pe.obs._format_uncertainty(1, np.nan)
pe.obs._format_uncertainty(np.nan, np.inf)
pe.obs._format_uncertainty(np.NaN, 1)
pe.obs._format_uncertainty(1, np.NaN)
pe.obs._format_uncertainty(np.NaN, np.inf)
def test_format():
@ -1387,102 +1333,3 @@ def test_vec_gm():
cc = pe.Corr(obs)
pe.gm(cc, S=4.12)
assert np.all(np.vectorize(lambda x: x.S["qq"])(cc.content) == 4.12)
def test_complex_addition():
o = pe.pseudo_Obs(34.12, 1e-4, "testens")
r = o + 2j
assert r.real == o
r = r * 1j
assert r.imag == o
def test_missing_replica():
N1 = 3000
N2 = 2000
O1 = np.random.normal(1.0, .1, N1 + N2)
O2 = .5 * O1[:N1]
w1 = N1 / (N1 + N2)
w2 = N2 / (N1 + N2)
m12 = np.mean(O1[N1:])
m2 = np.mean(O2)
d12 = np.std(O1[N1:]) / np.sqrt(N2) # error of <O1> from second rep
d2 = np.std(O2) / np.sqrt(N1) # error of <O2> from first rep
dval = np.sqrt((w2 * d12 / m2)**2 + (w2 * m12 * d2 / m2**2)**2) # complete error of <O1>/<O2>
# pyerrors version that should give the same result
O1dobs = pe.Obs([O1[:N1], O1[N1:]], names=['E|1', 'E|2'])
O2dobs = pe.Obs([O2], names=['E|1'])
O1O2 = O1dobs / O2dobs
O1O2.gm(S=0)
# explicit construction with different ensembles
O1a = pe.Obs([O1[:N1]], names=['E|1'])
O1b = pe.Obs([O1[N1:]], names=['F|2'])
O1O2b = (w1 * O1a + w2 * O1b) / O2dobs
O1O2b.gm(S=0)
# pyerrors version without replica (missing configs)
O1c = pe.Obs([O1], names=['E|1'])
O1O2c = O1c / O2dobs
O1O2c.gm(S=0)
for o in [O1O2, O1O2b, O1O2c]:
assert(np.isclose(dval, o.dvalue, atol=0, rtol=5e-2))
o = O1O2 * O2dobs - O1dobs
o.gm()
assert(o.is_zero())
o = O1dobs / O1O2 - O2dobs
o.gm()
assert(o.is_zero())
# bring more randomness and complexity into the game
Nl = [int(np.random.uniform(low=500, high=5000)) for i in range(4)]
wl = np.array(Nl) / sum(Nl)
O1 = np.random.normal(1.0, .1, sum(Nl))
# pyerrors replica version
datl = [O1[:Nl[0]], O1[Nl[0]:sum(Nl[:2])], O1[sum(Nl[:2]):sum(Nl[:3])], O1[sum(Nl[:3]):sum(Nl[:4])]]
O1dobs = pe.Obs(datl, names=['E|%d' % (d) for d in range(len(Nl))])
O2dobs = .5 * pe.Obs([datl[0]], names=['E|0'])
O3dobs = 2. / pe.Obs([datl[1]], names=['E|1'])
O1O2 = O1dobs / O2dobs
O1O2.gm(S=0)
O1O2O3 = O1O2 * np.sinh(O3dobs)
O1O2O3.gm(S=0)
# explicit construction with different ensembles
charl = ['E', 'F', 'G', 'H']
Ol = [pe.Obs([datl[i]], names=['%s|%d' % (charl[i], i)]) for i in range(len(Nl))]
O1O2b = sum(np.array(Ol) * wl) / O2dobs
O1O2b.gm(S=0)
i = 1
O3dobsb = 2. / pe.Obs([datl[i]], names=['%s|%d' % (charl[i], i)])
O1O2O3b = O1O2b * np.sinh(O3dobsb)
O1O2O3b.gm(S=0)
for op in [[O1O2, O1O2b], [O1O2O3, O1O2O3b]]:
assert np.isclose(op[0].value, op[1].value)
assert np.isclose(op[0].dvalue, op[1].dvalue, atol=0, rtol=5e-2)
# perform the same test using the array_mode of derived_observable
O1O2 = pyerrors.linalg.matmul(np.diag(np.diag(np.reshape(4 * [O1dobs], (2, 2)))), np.diag(np.diag(np.reshape(4 * [1. / O2dobs], (2, 2)))))
O1O2O3 = pyerrors.linalg.matmul(O1O2, np.diag(np.diag(np.sinh(np.reshape(4 * [O3dobs], (2, 2))))))
O1O2 = O1O2[0][0]
O1O2.gm(S=0)
O1O2O3 = O1O2O3[0][0]
O1O2O3.gm(S=0)
O1O2b = pyerrors.linalg.matmul(np.diag(np.diag(np.reshape(4 * [sum(np.array(Ol) * wl)], (2, 2)))), np.diag(np.diag(np.reshape(4 * [1. / O2dobs], (2, 2)))))
O1O2O3b = pyerrors.linalg.matmul(O1O2b, np.diag(np.diag(np.sinh(np.reshape(4 * [O3dobsb], (2, 2))))))
O1O2b = O1O2b[0][0]
O1O2b.gm(S=0)
O1O2O3b = O1O2O3b[0][0]
O1O2O3b.gm(S=0)
for op in [[O1O2, O1O2b], [O1O2O3, O1O2O3b]]:
assert np.isclose(op[1].value, op[0].value)
assert np.isclose(op[1].dvalue, op[0].dvalue, atol=0, rtol=5e-2)

View file

@ -1,6 +1,7 @@
import os
import sys
import inspect
import pyerrors as pe
import pyerrors.input.sfcf as sfin
import shutil
import pytest
@ -13,348 +14,107 @@ sys.path.insert(0, parent_dir)
def build_test_environment(path, env_type, cfgs, reps):
shutil.copytree("tests/data/sfcf_test/data_"+env_type, (path + "/data_" + env_type))
if env_type == "o":
for i in range(2, cfgs+1):
for i in range(2,cfgs+1):
shutil.copytree(path + "/data_o/test_r0/cfg1", path + "/data_o/test_r0/cfg"+str(i))
for i in range(1, reps):
for i in range(1,reps):
shutil.copytree(path + "/data_o/test_r0", path + "/data_o/test_r"+str(i))
elif env_type == "c":
for i in range(2, cfgs+1):
for i in range(2,cfgs+1):
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1", path + "/data_c/data_c_r0/data_c_r0_n"+str(i))
for i in range(1, reps):
for i in range(1,reps):
os.mkdir(path + "/data_c/data_c_r"+str(i))
for j in range(1, cfgs+1):
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1", path + "/data_c/data_c_r"+str(i)+"/data_c_r"+str(i)+"_n"+str(j))
for j in range(1,cfgs+1):
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1",path + "/data_c/data_c_r"+str(i)+"/data_c_r"+str(i)+"_n"+str(j))
elif env_type == "a":
for i in range(1, reps):
for i in range(1,reps):
for corr in ["f_1", "f_A", "F_V0"]:
shutil.copy(path + "/data_a/data_a_r0." + corr, path + "/data_a/data_a_r" + str(i) + "." + corr)
def test_o_bb(tmp_path):
build_test_environment(str(tmp_path), "o", 5, 3)
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0", corr_type="bb")
build_test_environment(str(tmp_path), "o",5,3)
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_1",quarks="lquark lquark", wf = 0, wf2=0, version = "2.0", corr_type="bb")
print(f_1)
assert len(f_1) == 1
assert list(f_1[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert list(f_1[0].shape.keys()) == ["test_|r0","test_|r1","test_|r2"]
assert f_1[0].value == 351.1941525454502
def test_o_bi(tmp_path):
build_test_environment(str(tmp_path), "o", 5, 3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_A", quarks="lquark lquark", wf=0, version="2.0")
build_test_environment(str(tmp_path), "o",5,3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_A",quarks="lquark lquark", wf = 0, version = "2.0")
print(f_A)
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert list(f_A[0].shape.keys()) == ["test_|r0","test_|r1","test_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
def test_o_bi_files(tmp_path):
build_test_environment(str(tmp_path), "o", 10, 3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_A", quarks="lquark lquark", wf=0, version="2.0",
files=[["cfg" + str(i) for i in range(1, 11, 2)], ["cfg" + str(i) for i in range(2, 11, 2)], ["cfg" + str(i) for i in range(1, 11, 2)]])
print(f_A)
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
def test_o_bib(tmp_path):
build_test_environment(str(tmp_path), "o", 5, 3)
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0", corr_type="bib")
build_test_environment(str(tmp_path), "o",5,3)
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0", corr_type="bib")
print(f_V0)
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert list(f_V0[0].shape.keys()) == ["test_|r0","test_|r1","test_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_simple_multi_o(tmp_path):
build_test_environment(str(tmp_path), "o", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_o", "test", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0", corr_type_list=["bib"])
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_dict_multi_o(tmp_path):
build_test_environment(str(tmp_path), "o", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_o", "test",
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
wf_list=[0], wf2_list=[0], version="2.0",
corr_type_list=["bib", "bi", "bb"], nice_output=False)
print(corrs)
f_1 = corrs["f_1"]['lquark lquark']['0']['0']['0']
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
assert len(f_1) == 1
assert list(f_1[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert f_1[0].value == 351.1941525454502
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_c_bb(tmp_path):
build_test_environment(str(tmp_path), "c", 5, 3)
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0c", corr_type="bb")
build_test_environment(str(tmp_path), "c",5,3)
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_1", quarks="lquark lquark", wf = 0, wf2=0, version = "2.0c", corr_type="bb")
print(f_1)
assert len(f_1) == 1
assert list(f_1[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert list(f_1[0].shape.keys()) == ["data_c_|r0","data_c_|r1","data_c_|r2"]
assert f_1[0].value == 351.1941525454502
def test_c_bi(tmp_path):
build_test_environment(str(tmp_path), "c", 5, 3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf=0, version="2.0c")
build_test_environment(str(tmp_path), "c",5,3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf = 0, version = "2.0c")
print(f_A)
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert list(f_A[0].shape.keys()) == ["data_c_|r0","data_c_|r1","data_c_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
def test_c_bi_files(tmp_path):
build_test_environment(str(tmp_path), "c", 10, 3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf=0, version="2.0c",
files=[["data_c_r0_n" + str(i) for i in range(1, 11, 2)], ["data_c_r1_n" + str(i) for i in range(2, 11, 2)], ["data_c_r2_n" + str(i) for i in range(1, 11, 2)]])
print(f_A)
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
def test_c_bi_files_int_fail(tmp_path):
build_test_environment(str(tmp_path), "c", 10, 3)
with pytest.raises(TypeError):
sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf=0, version="2.0c",
files=[[range(1, 11, 2)], [range(2, 11, 2)], [range(1, 11, 2)]])
def test_c_bib(tmp_path):
build_test_environment(str(tmp_path), "c", 5, 3)
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0c", corr_type="bib")
build_test_environment(str(tmp_path), "c",5,3)
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0c", corr_type="bib")
print(f_V0)
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert list(f_V0[0].shape.keys()) == ["data_c_|r0","data_c_|r1","data_c_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_simple_multi_c(tmp_path):
build_test_environment(str(tmp_path), "c", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_c", "data_c", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0c", corr_type_list=["bib"])
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_dict_multi_c(tmp_path):
build_test_environment(str(tmp_path), "c", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_c", "data_c",
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
wf_list=[0], wf2_list=[0], version="2.0c",
corr_type_list=["bib", "bi", "bb"], nice_output=False)
print(corrs)
f_1 = corrs["f_1"]['lquark lquark']['0']['0']['0']
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
assert len(f_1) == 1
assert list(f_1[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert f_1[0].value == 351.1941525454502
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_dict_multi_wf_c(tmp_path):
build_test_environment(str(tmp_path), "c", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_c", "data_c",
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
wf_list=[0, 1], wf2_list=[0, 1], version="2.0c",
corr_type_list=["bib", "bi", "bb"], nice_output=False)
rep_names = ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
f_1_00 = corrs["f_1"]['lquark lquark']['0']['0']['0']
f_1_01 = corrs["f_1"]['lquark lquark']['0']['0']['1']
f_1_10 = corrs["f_1"]['lquark lquark']['0']['1']['0']
f_1_11 = corrs["f_1"]['lquark lquark']['0']['1']['1']
assert len(f_1_00) == 1
assert list(f_1_00[0].shape.keys()) == rep_names
assert f_1_00[0].value == 351.1941525454502
assert len(f_1_01) == 1
assert list(f_1_01[0].shape.keys()) == rep_names
assert f_1_01[0].value == 351.20703575855345
assert len(f_1_10) == 1
assert list(f_1_10[0].shape.keys()) == rep_names
assert f_1_10[0].value == 351.20703575855515
assert len(f_1_11) == 1
assert list(f_1_11[0].shape.keys()) == rep_names
assert f_1_11[0].value == 351.22001235609065
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == rep_names
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
f_V0_00 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
f_V0_01 = corrs["F_V0"]['lquark lquark']['0']['0']['1']
f_V0_10 = corrs["F_V0"]['lquark lquark']['0']['1']['0']
f_V0_11 = corrs["F_V0"]['lquark lquark']['0']['1']['1']
assert len(f_V0_00) == 3
assert list(f_V0_00[0].shape.keys()) == rep_names
assert f_V0_00[0].value == 683.6776090085115
assert f_V0_00[1].value == 661.3188585582334
assert f_V0_00[2].value == 683.6776090081005
assert len(f_V0_01) == 3
assert list(f_V0_01[0].shape.keys()) == rep_names
assert f_V0_01[0].value == 683.7028316879306
assert f_V0_01[1].value == 661.3432563640756
assert f_V0_01[2].value == 683.7028316875197
assert len(f_V0_10) == 3
assert list(f_V0_10[0].shape.keys()) == rep_names
assert f_V0_10[0].value == 683.7028316879289
assert f_V0_10[1].value == 661.343256364074
assert f_V0_10[2].value == 683.702831687518
assert len(f_V0_11) == 3
assert list(f_V0_11[0].shape.keys()) == rep_names
assert f_V0_11[0].value == 683.7280552978792
assert f_V0_11[1].value == 661.3676550700158
assert f_V0_11[2].value == 683.7280552974681
def test_a_bb(tmp_path):
build_test_environment(str(tmp_path), "a", 5, 3)
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bb")
build_test_environment(str(tmp_path), "a",5,3)
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_1", quarks="lquark lquark", wf = 0, wf2=0, version = "2.0a", corr_type="bb")
print(f_1)
assert len(f_1) == 1
assert list(f_1[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert list(f_1[0].shape.keys()) == ["data_a_|r0","data_a_|r1","data_a_|r2"]
assert f_1[0].value == 351.1941525454502
def test_a_bi(tmp_path):
build_test_environment(str(tmp_path), "a", 5, 3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a")
build_test_environment(str(tmp_path), "a",5,3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf = 0, version = "2.0a")
print(f_A)
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert list(f_A[0].shape.keys()) == ["data_a_|r0","data_a_|r1","data_a_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
def test_a_bi_files(tmp_path):
build_test_environment(str(tmp_path), "a", 5, 3)
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a", files=["data_a_r0.f_A", "data_a_r1.f_A", "data_a_r2.f_A"])
print(f_A)
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
def test_a_bi_files_int_fail(tmp_path):
build_test_environment(str(tmp_path), "a", 10, 3)
with pytest.raises(TypeError):
sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a",
files=[[range(1, 11, 2)], [range(2, 11, 2)], [range(1, 11, 2)]])
def test_a_bib(tmp_path):
build_test_environment(str(tmp_path), "a", 5, 3)
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bib")
build_test_environment(str(tmp_path), "a",5,3)
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0a", corr_type="bib")
print(f_V0)
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert list(f_V0[0].shape.keys()) == ["data_a_|r0","data_a_|r1","data_a_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_simple_multi_a(tmp_path):
build_test_environment(str(tmp_path), "a", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_a", "data_a", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0a", corr_type_list=["bib"])
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_dict_multi_a(tmp_path):
build_test_environment(str(tmp_path), "a", 5, 3)
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_a", "data_a",
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
wf_list=[0], wf2_list=[0], version="2.0a",
corr_type_list=["bib", "bi", "bb"], nice_output=False)
print(corrs)
f_1 = corrs["f_1"]['lquark lquark']['0']['0']['0']
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
assert len(f_1) == 1
assert list(f_1[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert f_1[0].value == 351.1941525454502
assert len(f_A) == 3
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert f_A[0].value == 65.4711887279723
assert f_A[1].value == 1.0447210336915187
assert f_A[2].value == -41.025094911185185
assert len(f_V0) == 3
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
assert f_V0[0] == 683.6776090085115
assert f_V0[1] == 661.3188585582334
assert f_V0[2] == 683.6776090081005
def test_find_corr():
pattern = 'name ' + "f_A" + '\nquarks ' + "lquark lquark" + '\noffset ' + str(0) + '\nwf ' + str(0)
start_read, T = sfin._find_correlator("tests/data/sfcf_test/data_c/data_c_r0/data_c_r0_n1", "2.0c", pattern, False)
@ -369,8 +129,7 @@ def test_find_corr():
with pytest.raises(ValueError):
sfin._find_correlator("tests/data/sfcf_test/broken_data_c/data_c_r0/data_c_r0_n1", "2.0c", pattern, False)
def test_read_compact_file():
def test_read_compact_file(tmp_path):
rep_path = "tests/data/sfcf_test/broken_data_c/data_c_r0/"
config_file = "data_c_r0_n1"
start_read = 469
@ -380,40 +139,3 @@ def test_read_compact_file():
im = False
with pytest.raises(Exception):
sfin._read_compact_file(rep_path, config_file, start_read, T, b2b, name, im)
def test_find_correlator():
file = "tests/data/sfcf_test/data_c/data_c_r0/data_c_r0_n1"
found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False)
assert found_start == 21
assert found_T == 3
def test_get_rep_name():
names = ['data_r0', 'data_r1', 'data_r2']
new_names = sfin._get_rep_names(names)
assert len(new_names) == 3
assert new_names[0] == 'data_|r0'
assert new_names[1] == 'data_|r1'
assert new_names[2] == 'data_|r2'
names = ['data_q0', 'data_q1', 'data_q2']
new_names = sfin._get_rep_names(names, rep_sep='q')
assert len(new_names) == 3
assert new_names[0] == 'data_|q0'
assert new_names[1] == 'data_|q1'
assert new_names[2] == 'data_|q2'
def test_get_appended_rep_name():
names = ['data_r0.f_1', 'data_r1.f_1', 'data_r2.f_1']
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1')
assert len(new_names) == 3
assert new_names[0] == 'data_|r0'
assert new_names[1] == 'data_|r1'
assert new_names[2] == 'data_|r2'
names = ['data_q0.f_1', 'data_q1.f_1', 'data_q2.f_1']
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1', rep_sep='q')
assert len(new_names) == 3
assert new_names[0] == 'data_|q0'
assert new_names[1] == 'data_|q1'
assert new_names[2] == 'data_|q2'

View file

@ -1,12 +0,0 @@
import numpy as np
import scipy
import pyerrors as pe
import pytest
from autograd import jacobian
from numdifftools import Jacobian as num_jacobian
def test_kn():
for n in np.arange(0, 10):
for val in np.linspace(0.1, 7.3, 10):
assert np.isclose(num_jacobian(lambda x: scipy.special.kn(n, x))(val), jacobian(lambda x: pe.special.kn(n, x))(val), rtol=1e-10, atol=1e-10)