mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-11-04 17:36:04 +01:00
Compare commits
40 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1002dd0e51 | ||
|
|
4cdddf0a76 | ||
|
|
da0a4cc40a |
||
|
|
e0076ccea9 |
||
|
|
85ae9d7563 |
||
|
|
a600a69bb9 |
||
|
|
4c4173c461 | ||
|
|
6c7daac44b | ||
|
|
1d031d0eab | ||
|
|
e0bfcabc0c |
||
|
|
3e955d4976 | ||
|
|
dbe5912ca3 | ||
|
|
5bcbe5c2ff | ||
|
|
cf36d17a00 | ||
|
|
6521e16901 | ||
|
|
68e4633ae0 |
||
|
|
d6e6a435a8 |
||
|
|
8183ee2ef4 |
||
|
|
dcb95265ac |
||
|
|
934a61e124 |
||
|
|
3c36ab08c8 | ||
|
|
b2847a1f80 | ||
|
|
17792418ed |
||
|
|
dd4f8525f7 |
||
|
|
5f5438b563 |
||
|
|
6ed6ce6113 |
||
|
|
7eabd68c5f |
||
|
|
9ff34c27d7 |
||
|
|
997d360db3 |
||
|
|
3eac9214b4 |
||
|
|
d908508120 | ||
|
|
b1448a2703 |
||
|
|
30bfb55981 |
||
|
|
0ce765a99d | ||
|
|
c057ecffda | ||
|
|
47fd72b814 |
||
|
|
b43a2cbd34 |
||
|
|
4b1bb0872a |
||
|
|
1d6f7f65c0 |
||
|
|
3830e3f777 |
35 changed files with 3931 additions and 375 deletions
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
|
|
@ -13,7 +13,7 @@ jobs:
|
||||||
- name: Set up Python environment
|
- name: Set up Python environment
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.12"
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Updated documentation
|
- name: Updated documentation
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
12
.github/workflows/examples.yml
vendored
12
.github/workflows/examples.yml
vendored
|
|
@ -27,17 +27,17 @@ jobs:
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: uv
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
|
||||||
- name: Install
|
- name: Install
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super
|
sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super
|
||||||
python -m pip install --upgrade pip
|
uv pip install wheel --system
|
||||||
pip install wheel
|
uv pip install . --system
|
||||||
pip install .
|
uv pip install pytest nbmake --system
|
||||||
pip install pytest
|
uv pip install -U matplotlib!=3.7.0 --system # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
|
||||||
pip install nbmake
|
|
||||||
pip install -U matplotlib!=3.7.0 # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: pytest -vv --nbmake examples/*.ipynb
|
run: pytest -vv --nbmake examples/*.ipynb
|
||||||
|
|
|
||||||
2
.github/workflows/flake8.yml
vendored
2
.github/workflows/flake8.yml
vendored
|
|
@ -17,7 +17,7 @@ jobs:
|
||||||
- name: Set up Python environment
|
- name: Set up Python environment
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.12"
|
||||||
- name: flake8 Lint
|
- name: flake8 Lint
|
||||||
uses: py-actions/flake8@v2
|
uses: py-actions/flake8@v2
|
||||||
with:
|
with:
|
||||||
|
|
|
||||||
29
.github/workflows/pytest.yml
vendored
29
.github/workflows/pytest.yml
vendored
|
|
@ -17,10 +17,12 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ["3.9", "3.10", "3.11", "3.12"]
|
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
|
||||||
include:
|
include:
|
||||||
- os: macos-latest
|
- os: macos-latest
|
||||||
python-version: "3.10"
|
python-version: "3.12"
|
||||||
|
- os: ubuntu-24.04-arm
|
||||||
|
python-version: "3.12"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout source
|
- name: Checkout source
|
||||||
|
|
@ -30,19 +32,20 @@ jobs:
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: uv
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
|
||||||
- name: Install
|
- name: Install
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
uv pip install wheel --system
|
||||||
pip install wheel
|
uv pip install . --system
|
||||||
pip install .
|
uv pip install pytest pytest-cov pytest-benchmark hypothesis --system
|
||||||
pip install pytest
|
uv pip freeze --system
|
||||||
pip install pytest-cov
|
|
||||||
pip install pytest-benchmark
|
|
||||||
pip install hypothesis
|
|
||||||
pip install py
|
|
||||||
pip install pyarrow
|
|
||||||
pip freeze
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests with -Werror
|
||||||
|
if: matrix.python-version != '3.14'
|
||||||
|
run: pytest --cov=pyerrors -vv -Werror
|
||||||
|
|
||||||
|
- name: Run tests without -Werror for python 3.14
|
||||||
|
if: matrix.python-version == '3.14'
|
||||||
run: pytest --cov=pyerrors -vv
|
run: pytest --cov=pyerrors -vv
|
||||||
|
|
|
||||||
58
.github/workflows/release.yml
vendored
Normal file
58
.github/workflows/release.yml
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build sdist and wheel
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
name: Checkout repository
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.12"
|
||||||
|
|
||||||
|
- name: Install pypa/build
|
||||||
|
run: >-
|
||||||
|
python3 -m
|
||||||
|
pip install
|
||||||
|
build
|
||||||
|
--user
|
||||||
|
|
||||||
|
- name: Build wheel and source tarball
|
||||||
|
run: python3 -m build
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: python-package-distributions
|
||||||
|
path: dist/
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
publish:
|
||||||
|
needs: [build]
|
||||||
|
name: Upload to PyPI
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
environment:
|
||||||
|
name: pypi
|
||||||
|
url: https://pypi.org/p/pyerrors
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: python-package-distributions
|
||||||
|
path: dist/
|
||||||
|
|
||||||
|
- name: Sanity check
|
||||||
|
run: ls -la dist/
|
||||||
|
|
||||||
|
- name: Publish to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
15
.github/workflows/ruff.yml
vendored
Normal file
15
.github/workflows/ruff.yml
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
name: ruff
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- develop
|
||||||
|
pull_request:
|
||||||
|
jobs:
|
||||||
|
ruff:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: astral-sh/ruff-action@v2
|
||||||
|
with:
|
||||||
|
src: "./pyerrors"
|
||||||
44
CHANGELOG.md
44
CHANGELOG.md
|
|
@ -2,6 +2,50 @@
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
## [2.16.0] - 2025-10-30
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support for custom configuration number extraction in the sfcf input module.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Calculation of expected chisquare in connection with priors.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Support for python<3.10 was dropped.
|
||||||
|
|
||||||
|
## [2.15.1] - 2025-10-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Fixed handling of padding in Correlator prune method.
|
||||||
|
|
||||||
|
## [2.15.0] - 2025-10-10
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Option to explicitly specify the number of fit parameters added.
|
||||||
|
|
||||||
|
## [2.14.0] - 2025-03-09
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Explicit checks of the provided inverse matrix for correlated fits #259
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Compute derivative for pow explicitly instead of relying on autograd. This results in a ~4x speedup for pow operations #246
|
||||||
|
- More explicit exception types #248
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Removed the possibility to create an Obs from data on several replica #258
|
||||||
|
- Fix range in `set_prange` #247
|
||||||
|
- Fix ensemble name handling in sfcf input modules #253
|
||||||
|
- Correct error message for fit shape mismatch #257
|
||||||
|
|
||||||
|
## [2.13.0] - 2024-11-03
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Allow providing lower triangular matrix constructed from a Cholesky decomposition in least squares function for correlated fits.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Corrected bug that prevented combined fits with multiple x-obs in some cases.
|
||||||
|
|
||||||
## [2.12.0] - 2024-08-22
|
## [2.12.0] - 2024-08-22
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
[](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [](https://www.python.org/downloads/) [](https://opensource.org/licenses/MIT) [](https://arxiv.org/abs/2209.14371) [](https://doi.org/10.1016/j.cpc.2023.108750)
|
[](https://www.python.org/downloads/) [](https://opensource.org/licenses/MIT) [](https://arxiv.org/abs/2209.14371) [](https://doi.org/10.1016/j.cpc.2023.108750)
|
||||||
# pyerrors
|
# pyerrors
|
||||||
`pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations.
|
`pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations.
|
||||||
|
|
||||||
|
|
@ -14,11 +14,6 @@ Install the most recent release using pip and [pypi](https://pypi.org/project/py
|
||||||
python -m pip install pyerrors # Fresh install
|
python -m pip install pyerrors # Fresh install
|
||||||
python -m pip install -U pyerrors # Update
|
python -m pip install -U pyerrors # Update
|
||||||
```
|
```
|
||||||
Install the most recent release using conda and [conda-forge](https://anaconda.org/conda-forge/pyerrors):
|
|
||||||
```bash
|
|
||||||
conda install -c conda-forge pyerrors # Fresh install
|
|
||||||
conda update -c conda-forge pyerrors # Update
|
|
||||||
```
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md).
|
We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md).
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -151,7 +151,7 @@
|
||||||
"\n",
|
"\n",
|
||||||
"$$C_{\\textrm{projected}}(t)=v_1^T \\underline{C}(t) v_2$$\n",
|
"$$C_{\\textrm{projected}}(t)=v_1^T \\underline{C}(t) v_2$$\n",
|
||||||
"\n",
|
"\n",
|
||||||
"If we choose the vectors to be $v_1=v_2=(0,1,0,0)$, we should get the same correlator as in the cell above. \n",
|
"If we choose the vectors to be $v_1=v_2=(1,0,0,0)$, we should get the same correlator as in the cell above. \n",
|
||||||
"\n",
|
"\n",
|
||||||
"Thinking about it this way is usefull in the Context of the generalized eigenvalue problem (GEVP), used to find the source-sink combination, which best describes a certain energy eigenstate.\n",
|
"Thinking about it this way is usefull in the Context of the generalized eigenvalue problem (GEVP), used to find the source-sink combination, which best describes a certain energy eigenstate.\n",
|
||||||
"A good introduction is found in https://arxiv.org/abs/0902.1265."
|
"A good introduction is found in https://arxiv.org/abs/0902.1265."
|
||||||
|
|
|
||||||
|
|
@ -481,12 +481,12 @@ from .obs import *
|
||||||
from .correlators import *
|
from .correlators import *
|
||||||
from .fits import *
|
from .fits import *
|
||||||
from .misc import *
|
from .misc import *
|
||||||
from . import dirac
|
from . import dirac as dirac
|
||||||
from . import input
|
from . import input as input
|
||||||
from . import linalg
|
from . import linalg as linalg
|
||||||
from . import mpm
|
from . import mpm as mpm
|
||||||
from . import roots
|
from . import roots as roots
|
||||||
from . import integrate
|
from . import integrate as integrate
|
||||||
from . import special
|
from . import special as special
|
||||||
|
|
||||||
from .version import __version__
|
from .version import __version__ as __version__
|
||||||
|
|
|
||||||
|
|
@ -101,7 +101,7 @@ class Corr:
|
||||||
self.N = 1
|
self.N = 1
|
||||||
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
||||||
self.content = data_input
|
self.content = data_input
|
||||||
noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
|
noNull = [a for a in self.content if a is not None] # To check if the matrices are correct for all undefined elements
|
||||||
self.N = noNull[0].shape[0]
|
self.N = noNull[0].shape[0]
|
||||||
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
||||||
raise ValueError("Smearing matrices are not NxN.")
|
raise ValueError("Smearing matrices are not NxN.")
|
||||||
|
|
@ -141,7 +141,7 @@ class Corr:
|
||||||
def gamma_method(self, **kwargs):
|
def gamma_method(self, **kwargs):
|
||||||
"""Apply the gamma method to the content of the Corr."""
|
"""Apply the gamma method to the content of the Corr."""
|
||||||
for item in self.content:
|
for item in self.content:
|
||||||
if not (item is None):
|
if item is not None:
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
item[0].gamma_method(**kwargs)
|
item[0].gamma_method(**kwargs)
|
||||||
else:
|
else:
|
||||||
|
|
@ -159,7 +159,7 @@ class Corr:
|
||||||
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
||||||
"""
|
"""
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("Trying to project a Corr, that already has N=1.")
|
raise ValueError("Trying to project a Corr, that already has N=1.")
|
||||||
|
|
||||||
if vector_l is None:
|
if vector_l is None:
|
||||||
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
|
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
|
||||||
|
|
@ -167,16 +167,16 @@ class Corr:
|
||||||
vector_r = vector_l
|
vector_r = vector_l
|
||||||
if isinstance(vector_l, list) and not isinstance(vector_r, list):
|
if isinstance(vector_l, list) and not isinstance(vector_r, list):
|
||||||
if len(vector_l) != self.T:
|
if len(vector_l) != self.T:
|
||||||
raise Exception("Length of vector list must be equal to T")
|
raise ValueError("Length of vector list must be equal to T")
|
||||||
vector_r = [vector_r] * self.T
|
vector_r = [vector_r] * self.T
|
||||||
if isinstance(vector_r, list) and not isinstance(vector_l, list):
|
if isinstance(vector_r, list) and not isinstance(vector_l, list):
|
||||||
if len(vector_r) != self.T:
|
if len(vector_r) != self.T:
|
||||||
raise Exception("Length of vector list must be equal to T")
|
raise ValueError("Length of vector list must be equal to T")
|
||||||
vector_l = [vector_l] * self.T
|
vector_l = [vector_l] * self.T
|
||||||
|
|
||||||
if not isinstance(vector_l, list):
|
if not isinstance(vector_l, list):
|
||||||
if not vector_l.shape == vector_r.shape == (self.N,):
|
if not vector_l.shape == vector_r.shape == (self.N,):
|
||||||
raise Exception("Vectors are of wrong shape!")
|
raise ValueError("Vectors are of wrong shape!")
|
||||||
if normalize:
|
if normalize:
|
||||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||||
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||||
|
|
@ -201,7 +201,7 @@ class Corr:
|
||||||
Second index to be picked.
|
Second index to be picked.
|
||||||
"""
|
"""
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("Trying to pick item from projected Corr")
|
raise ValueError("Trying to pick item from projected Corr")
|
||||||
newcontent = [None if (item is None) else item[i, j] for item in self.content]
|
newcontent = [None if (item is None) else item[i, j] for item in self.content]
|
||||||
return Corr(newcontent)
|
return Corr(newcontent)
|
||||||
|
|
||||||
|
|
@ -212,8 +212,8 @@ class Corr:
|
||||||
timeslice and the error on each timeslice.
|
timeslice and the error on each timeslice.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Can only make Corr[N=1] plottable")
|
raise ValueError("Can only make Corr[N=1] plottable")
|
||||||
x_list = [x for x in range(self.T) if not self.content[x] is None]
|
x_list = [x for x in range(self.T) if self.content[x] is not None]
|
||||||
y_list = [y[0].value for y in self.content if y is not None]
|
y_list = [y[0].value for y in self.content if y is not None]
|
||||||
y_err_list = [y[0].dvalue for y in self.content if y is not None]
|
y_err_list = [y[0].dvalue for y in self.content if y is not None]
|
||||||
|
|
||||||
|
|
@ -222,9 +222,9 @@ class Corr:
|
||||||
def symmetric(self):
|
def symmetric(self):
|
||||||
""" Symmetrize the correlator around x0=0."""
|
""" Symmetrize the correlator around x0=0."""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.')
|
raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||||
if self.T % 2 != 0:
|
if self.T % 2 != 0:
|
||||||
raise Exception("Can not symmetrize odd T")
|
raise ValueError("Can not symmetrize odd T")
|
||||||
|
|
||||||
if self.content[0] is not None:
|
if self.content[0] is not None:
|
||||||
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
|
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
|
||||||
|
|
@ -237,7 +237,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
|
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Corr could not be symmetrized: No redundant values")
|
raise ValueError("Corr could not be symmetrized: No redundant values")
|
||||||
return Corr(newcontent, prange=self.prange)
|
return Corr(newcontent, prange=self.prange)
|
||||||
|
|
||||||
def anti_symmetric(self):
|
def anti_symmetric(self):
|
||||||
|
|
@ -245,7 +245,7 @@ class Corr:
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||||
if self.T % 2 != 0:
|
if self.T % 2 != 0:
|
||||||
raise Exception("Can not symmetrize odd T")
|
raise ValueError("Can not symmetrize odd T")
|
||||||
|
|
||||||
test = 1 * self
|
test = 1 * self
|
||||||
test.gamma_method()
|
test.gamma_method()
|
||||||
|
|
@ -259,7 +259,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
|
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Corr could not be symmetrized: No redundant values")
|
raise ValueError("Corr could not be symmetrized: No redundant values")
|
||||||
return Corr(newcontent, prange=self.prange)
|
return Corr(newcontent, prange=self.prange)
|
||||||
|
|
||||||
def is_matrix_symmetric(self):
|
def is_matrix_symmetric(self):
|
||||||
|
|
@ -292,7 +292,7 @@ class Corr:
|
||||||
def matrix_symmetric(self):
|
def matrix_symmetric(self):
|
||||||
"""Symmetrizes the correlator matrices on every timeslice."""
|
"""Symmetrizes the correlator matrices on every timeslice."""
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.")
|
raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
|
||||||
if self.is_matrix_symmetric():
|
if self.is_matrix_symmetric():
|
||||||
return 1.0 * self
|
return 1.0 * self
|
||||||
else:
|
else:
|
||||||
|
|
@ -336,10 +336,10 @@ class Corr:
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("GEVP methods only works on correlator matrices and not single correlators.")
|
raise ValueError("GEVP methods only works on correlator matrices and not single correlators.")
|
||||||
if ts is not None:
|
if ts is not None:
|
||||||
if (ts <= t0):
|
if (ts <= t0):
|
||||||
raise Exception("ts has to be larger than t0.")
|
raise ValueError("ts has to be larger than t0.")
|
||||||
|
|
||||||
if "sorted_list" in kwargs:
|
if "sorted_list" in kwargs:
|
||||||
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
|
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
|
||||||
|
|
@ -371,9 +371,9 @@ class Corr:
|
||||||
|
|
||||||
if sort is None:
|
if sort is None:
|
||||||
if (ts is None):
|
if (ts is None):
|
||||||
raise Exception("ts is required if sort=None.")
|
raise ValueError("ts is required if sort=None.")
|
||||||
if (self.content[t0] is None) or (self.content[ts] is None):
|
if (self.content[t0] is None) or (self.content[ts] is None):
|
||||||
raise Exception("Corr not defined at t0/ts.")
|
raise ValueError("Corr not defined at t0/ts.")
|
||||||
Gt = _get_mat_at_t(ts)
|
Gt = _get_mat_at_t(ts)
|
||||||
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
|
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
|
||||||
if kwargs.get('auto_gamma', False) and vector_obs:
|
if kwargs.get('auto_gamma', False) and vector_obs:
|
||||||
|
|
@ -391,14 +391,14 @@ class Corr:
|
||||||
all_vecs.append(None)
|
all_vecs.append(None)
|
||||||
if sort == "Eigenvector":
|
if sort == "Eigenvector":
|
||||||
if ts is None:
|
if ts is None:
|
||||||
raise Exception("ts is required for the Eigenvector sorting method.")
|
raise ValueError("ts is required for the Eigenvector sorting method.")
|
||||||
all_vecs = _sort_vectors(all_vecs, ts)
|
all_vecs = _sort_vectors(all_vecs, ts)
|
||||||
|
|
||||||
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
|
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
|
||||||
if kwargs.get('auto_gamma', False) and vector_obs:
|
if kwargs.get('auto_gamma', False) and vector_obs:
|
||||||
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
|
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
|
raise ValueError("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
|
||||||
|
|
||||||
if "state" in kwargs:
|
if "state" in kwargs:
|
||||||
return reordered_vecs[kwargs.get("state")]
|
return reordered_vecs[kwargs.get("state")]
|
||||||
|
|
@ -435,7 +435,7 @@ class Corr:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Multi-operator Prony not implemented!")
|
raise NotImplementedError("Multi-operator Prony not implemented!")
|
||||||
|
|
||||||
array = np.empty([N, N], dtype="object")
|
array = np.empty([N, N], dtype="object")
|
||||||
new_content = []
|
new_content = []
|
||||||
|
|
@ -502,7 +502,7 @@ class Corr:
|
||||||
correlator or a Corr of same length.
|
correlator or a Corr of same length.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Only one-dimensional correlators can be safely correlated.")
|
raise ValueError("Only one-dimensional correlators can be safely correlated.")
|
||||||
new_content = []
|
new_content = []
|
||||||
for x0, t_slice in enumerate(self.content):
|
for x0, t_slice in enumerate(self.content):
|
||||||
if _check_for_none(self, t_slice):
|
if _check_for_none(self, t_slice):
|
||||||
|
|
@ -516,7 +516,7 @@ class Corr:
|
||||||
elif isinstance(partner, Obs): # Should this include CObs?
|
elif isinstance(partner, Obs): # Should this include CObs?
|
||||||
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
|
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
|
||||||
else:
|
else:
|
||||||
raise Exception("Can only correlate with an Obs or a Corr.")
|
raise TypeError("Can only correlate with an Obs or a Corr.")
|
||||||
|
|
||||||
return Corr(new_content)
|
return Corr(new_content)
|
||||||
|
|
||||||
|
|
@ -583,7 +583,7 @@ class Corr:
|
||||||
Available choice: symmetric, forward, backward, improved, log, default: symmetric
|
Available choice: symmetric, forward, backward, improved, log, default: symmetric
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("deriv only implemented for one-dimensional correlators.")
|
raise ValueError("deriv only implemented for one-dimensional correlators.")
|
||||||
if variant == "symmetric":
|
if variant == "symmetric":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(1, self.T - 1):
|
for t in range(1, self.T - 1):
|
||||||
|
|
@ -592,7 +592,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
|
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('Derivative is undefined at all timeslices')
|
raise ValueError('Derivative is undefined at all timeslices')
|
||||||
return Corr(newcontent, padding=[1, 1])
|
return Corr(newcontent, padding=[1, 1])
|
||||||
elif variant == "forward":
|
elif variant == "forward":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -602,7 +602,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t + 1] - self.content[t])
|
newcontent.append(self.content[t + 1] - self.content[t])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[0, 1])
|
return Corr(newcontent, padding=[0, 1])
|
||||||
elif variant == "backward":
|
elif variant == "backward":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -612,7 +612,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t] - self.content[t - 1])
|
newcontent.append(self.content[t] - self.content[t - 1])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[1, 0])
|
return Corr(newcontent, padding=[1, 0])
|
||||||
elif variant == "improved":
|
elif variant == "improved":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -622,7 +622,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
|
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('Derivative is undefined at all timeslices')
|
raise ValueError('Derivative is undefined at all timeslices')
|
||||||
return Corr(newcontent, padding=[2, 2])
|
return Corr(newcontent, padding=[2, 2])
|
||||||
elif variant == 'log':
|
elif variant == 'log':
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -632,11 +632,11 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(np.log(self.content[t]))
|
newcontent.append(np.log(self.content[t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Log is undefined at all timeslices")
|
raise ValueError("Log is undefined at all timeslices")
|
||||||
logcorr = Corr(newcontent)
|
logcorr = Corr(newcontent)
|
||||||
return self * logcorr.deriv('symmetric')
|
return self * logcorr.deriv('symmetric')
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown variant.")
|
raise ValueError("Unknown variant.")
|
||||||
|
|
||||||
def second_deriv(self, variant="symmetric"):
|
def second_deriv(self, variant="symmetric"):
|
||||||
r"""Return the second derivative of the correlator with respect to x0.
|
r"""Return the second derivative of the correlator with respect to x0.
|
||||||
|
|
@ -656,7 +656,7 @@ class Corr:
|
||||||
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
|
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("second_deriv only implemented for one-dimensional correlators.")
|
raise ValueError("second_deriv only implemented for one-dimensional correlators.")
|
||||||
if variant == "symmetric":
|
if variant == "symmetric":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(1, self.T - 1):
|
for t in range(1, self.T - 1):
|
||||||
|
|
@ -665,7 +665,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[1, 1])
|
return Corr(newcontent, padding=[1, 1])
|
||||||
elif variant == "big_symmetric":
|
elif variant == "big_symmetric":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -675,7 +675,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
|
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[2, 2])
|
return Corr(newcontent, padding=[2, 2])
|
||||||
elif variant == "improved":
|
elif variant == "improved":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -685,7 +685,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
|
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[2, 2])
|
return Corr(newcontent, padding=[2, 2])
|
||||||
elif variant == 'log':
|
elif variant == 'log':
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
|
@ -695,11 +695,11 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(np.log(self.content[t]))
|
newcontent.append(np.log(self.content[t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Log is undefined at all timeslices")
|
raise ValueError("Log is undefined at all timeslices")
|
||||||
logcorr = Corr(newcontent)
|
logcorr = Corr(newcontent)
|
||||||
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
|
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown variant.")
|
raise ValueError("Unknown variant.")
|
||||||
|
|
||||||
def m_eff(self, variant='log', guess=1.0):
|
def m_eff(self, variant='log', guess=1.0):
|
||||||
"""Returns the effective mass of the correlator as correlator object
|
"""Returns the effective mass of the correlator as correlator object
|
||||||
|
|
@ -728,7 +728,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t] / self.content[t + 1])
|
newcontent.append(self.content[t] / self.content[t + 1])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('m_eff is undefined at all timeslices')
|
raise ValueError('m_eff is undefined at all timeslices')
|
||||||
|
|
||||||
return np.log(Corr(newcontent, padding=[0, 1]))
|
return np.log(Corr(newcontent, padding=[0, 1]))
|
||||||
|
|
||||||
|
|
@ -742,7 +742,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t - 1] / self.content[t + 1])
|
newcontent.append(self.content[t - 1] / self.content[t + 1])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('m_eff is undefined at all timeslices')
|
raise ValueError('m_eff is undefined at all timeslices')
|
||||||
|
|
||||||
return np.log(Corr(newcontent, padding=[1, 1])) / 2
|
return np.log(Corr(newcontent, padding=[1, 1])) / 2
|
||||||
|
|
||||||
|
|
@ -767,7 +767,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
|
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('m_eff is undefined at all timeslices')
|
raise ValueError('m_eff is undefined at all timeslices')
|
||||||
|
|
||||||
return Corr(newcontent, padding=[0, 1])
|
return Corr(newcontent, padding=[0, 1])
|
||||||
|
|
||||||
|
|
@ -779,11 +779,11 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("m_eff is undefined at all timeslices")
|
raise ValueError("m_eff is undefined at all timeslices")
|
||||||
return np.arccosh(Corr(newcontent, padding=[1, 1]))
|
return np.arccosh(Corr(newcontent, padding=[1, 1]))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise Exception('Unknown variant.')
|
raise ValueError('Unknown variant.')
|
||||||
|
|
||||||
def fit(self, function, fitrange=None, silent=False, **kwargs):
|
def fit(self, function, fitrange=None, silent=False, **kwargs):
|
||||||
r'''Fits function to the data
|
r'''Fits function to the data
|
||||||
|
|
@ -801,7 +801,7 @@ class Corr:
|
||||||
Decides whether output is printed to the standard output.
|
Decides whether output is printed to the standard output.
|
||||||
'''
|
'''
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator must be projected before fitting")
|
raise ValueError("Correlator must be projected before fitting")
|
||||||
|
|
||||||
if fitrange is None:
|
if fitrange is None:
|
||||||
if self.prange:
|
if self.prange:
|
||||||
|
|
@ -810,12 +810,12 @@ class Corr:
|
||||||
fitrange = [0, self.T - 1]
|
fitrange = [0, self.T - 1]
|
||||||
else:
|
else:
|
||||||
if not isinstance(fitrange, list):
|
if not isinstance(fitrange, list):
|
||||||
raise Exception("fitrange has to be a list with two elements")
|
raise TypeError("fitrange has to be a list with two elements")
|
||||||
if len(fitrange) != 2:
|
if len(fitrange) != 2:
|
||||||
raise Exception("fitrange has to have exactly two elements [fit_start, fit_stop]")
|
raise ValueError("fitrange has to have exactly two elements [fit_start, fit_stop]")
|
||||||
|
|
||||||
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
|
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
|
||||||
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
|
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
|
||||||
result = least_squares(xs, ys, function, silent=silent, **kwargs)
|
result = least_squares(xs, ys, function, silent=silent, **kwargs)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
@ -840,9 +840,9 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
raise Exception("no plateau range provided")
|
raise Exception("no plateau range provided")
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator must be projected before getting a plateau.")
|
raise ValueError("Correlator must be projected before getting a plateau.")
|
||||||
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
|
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
|
||||||
raise Exception("plateau is undefined at all timeslices in plateaurange.")
|
raise ValueError("plateau is undefined at all timeslices in plateaurange.")
|
||||||
if auto_gamma:
|
if auto_gamma:
|
||||||
self.gamma_method()
|
self.gamma_method()
|
||||||
if method == "fit":
|
if method == "fit":
|
||||||
|
|
@ -854,16 +854,16 @@ class Corr:
|
||||||
return returnvalue
|
return returnvalue
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise Exception("Unsupported plateau method: " + method)
|
raise ValueError("Unsupported plateau method: " + method)
|
||||||
|
|
||||||
def set_prange(self, prange):
|
def set_prange(self, prange):
|
||||||
"""Sets the attribute prange of the Corr object."""
|
"""Sets the attribute prange of the Corr object."""
|
||||||
if not len(prange) == 2:
|
if not len(prange) == 2:
|
||||||
raise Exception("prange must be a list or array with two values")
|
raise ValueError("prange must be a list or array with two values")
|
||||||
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
|
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
|
||||||
raise Exception("Start and end point must be integers")
|
raise TypeError("Start and end point must be integers")
|
||||||
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
|
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]):
|
||||||
raise Exception("Start and end point must define a range in the interval 0,T")
|
raise ValueError("Start and end point must define a range in the interval 0,T")
|
||||||
|
|
||||||
self.prange = prange
|
self.prange = prange
|
||||||
return
|
return
|
||||||
|
|
@ -900,7 +900,7 @@ class Corr:
|
||||||
Optional title of the figure.
|
Optional title of the figure.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator must be projected before plotting")
|
raise ValueError("Correlator must be projected before plotting")
|
||||||
|
|
||||||
if auto_gamma:
|
if auto_gamma:
|
||||||
self.gamma_method()
|
self.gamma_method()
|
||||||
|
|
@ -941,7 +941,7 @@ class Corr:
|
||||||
hide_from = None
|
hide_from = None
|
||||||
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
|
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
|
||||||
else:
|
else:
|
||||||
raise Exception("'comp' must be a correlator or a list of correlators.")
|
raise TypeError("'comp' must be a correlator or a list of correlators.")
|
||||||
|
|
||||||
if plateau:
|
if plateau:
|
||||||
if isinstance(plateau, Obs):
|
if isinstance(plateau, Obs):
|
||||||
|
|
@ -950,14 +950,14 @@ class Corr:
|
||||||
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
|
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
|
||||||
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
|
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
|
||||||
else:
|
else:
|
||||||
raise Exception("'plateau' must be an Obs")
|
raise TypeError("'plateau' must be an Obs")
|
||||||
|
|
||||||
if references:
|
if references:
|
||||||
if isinstance(references, list):
|
if isinstance(references, list):
|
||||||
for ref in references:
|
for ref in references:
|
||||||
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
|
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
|
||||||
else:
|
else:
|
||||||
raise Exception("'references' must be a list of floating pint values.")
|
raise TypeError("'references' must be a list of floating pint values.")
|
||||||
|
|
||||||
if self.prange:
|
if self.prange:
|
||||||
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
|
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
|
||||||
|
|
@ -991,7 +991,7 @@ class Corr:
|
||||||
if isinstance(save, str):
|
if isinstance(save, str):
|
||||||
fig.savefig(save, bbox_inches='tight')
|
fig.savefig(save, bbox_inches='tight')
|
||||||
else:
|
else:
|
||||||
raise Exception("'save' has to be a string.")
|
raise TypeError("'save' has to be a string.")
|
||||||
|
|
||||||
def spaghetti_plot(self, logscale=True):
|
def spaghetti_plot(self, logscale=True):
|
||||||
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
|
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
|
||||||
|
|
@ -1002,7 +1002,7 @@ class Corr:
|
||||||
Determines whether the scale of the y-axis is logarithmic or standard.
|
Determines whether the scale of the y-axis is logarithmic or standard.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator needs to be projected first.")
|
raise ValueError("Correlator needs to be projected first.")
|
||||||
|
|
||||||
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
|
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
|
||||||
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
||||||
|
|
@ -1044,7 +1044,7 @@ class Corr:
|
||||||
elif datatype == "pickle":
|
elif datatype == "pickle":
|
||||||
dump_object(self, filename, **kwargs)
|
dump_object(self, filename, **kwargs)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown datatype " + str(datatype))
|
raise ValueError("Unknown datatype " + str(datatype))
|
||||||
|
|
||||||
def print(self, print_range=None):
|
def print(self, print_range=None):
|
||||||
print(self.__repr__(print_range))
|
print(self.__repr__(print_range))
|
||||||
|
|
@ -1094,7 +1094,7 @@ class Corr:
|
||||||
def __add__(self, y):
|
def __add__(self, y):
|
||||||
if isinstance(y, Corr):
|
if isinstance(y, Corr):
|
||||||
if ((self.N != y.N) or (self.T != y.T)):
|
if ((self.N != y.N) or (self.T != y.T)):
|
||||||
raise Exception("Addition of Corrs with different shape")
|
raise ValueError("Addition of Corrs with different shape")
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||||
|
|
@ -1122,7 +1122,7 @@ class Corr:
|
||||||
def __mul__(self, y):
|
def __mul__(self, y):
|
||||||
if isinstance(y, Corr):
|
if isinstance(y, Corr):
|
||||||
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
||||||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||||
|
|
@ -1193,7 +1193,7 @@ class Corr:
|
||||||
def __truediv__(self, y):
|
def __truediv__(self, y):
|
||||||
if isinstance(y, Corr):
|
if isinstance(y, Corr):
|
||||||
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
||||||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||||
|
|
@ -1207,16 +1207,16 @@ class Corr:
|
||||||
newcontent[t] = None
|
newcontent[t] = None
|
||||||
|
|
||||||
if all([item is None for item in newcontent]):
|
if all([item is None for item in newcontent]):
|
||||||
raise Exception("Division returns completely undefined correlator")
|
raise ValueError("Division returns completely undefined correlator")
|
||||||
return Corr(newcontent)
|
return Corr(newcontent)
|
||||||
|
|
||||||
elif isinstance(y, (Obs, CObs)):
|
elif isinstance(y, (Obs, CObs)):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
if y.value == 0:
|
if y.value == 0:
|
||||||
raise Exception('Division by zero will return undefined correlator')
|
raise ValueError('Division by zero will return undefined correlator')
|
||||||
if isinstance(y, CObs):
|
if isinstance(y, CObs):
|
||||||
if y.is_zero():
|
if y.is_zero():
|
||||||
raise Exception('Division by zero will return undefined correlator')
|
raise ValueError('Division by zero will return undefined correlator')
|
||||||
|
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
|
|
@ -1228,7 +1228,7 @@ class Corr:
|
||||||
|
|
||||||
elif isinstance(y, (int, float)):
|
elif isinstance(y, (int, float)):
|
||||||
if y == 0:
|
if y == 0:
|
||||||
raise Exception('Division by zero will return undefined correlator')
|
raise ValueError('Division by zero will return undefined correlator')
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]):
|
if _check_for_none(self, self.content[t]):
|
||||||
|
|
@ -1284,7 +1284,7 @@ class Corr:
|
||||||
if np.isnan(tmp_sum.value):
|
if np.isnan(tmp_sum.value):
|
||||||
newcontent[t] = None
|
newcontent[t] = None
|
||||||
if all([item is None for item in newcontent]):
|
if all([item is None for item in newcontent]):
|
||||||
raise Exception('Operation returns undefined correlator')
|
raise ValueError('Operation returns undefined correlator')
|
||||||
return Corr(newcontent)
|
return Corr(newcontent)
|
||||||
|
|
||||||
def sin(self):
|
def sin(self):
|
||||||
|
|
@ -1392,26 +1392,28 @@ class Corr:
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception('Method cannot be applied to one-dimensional correlators.')
|
raise ValueError('Method cannot be applied to one-dimensional correlators.')
|
||||||
if basematrix is None:
|
if basematrix is None:
|
||||||
basematrix = self
|
basematrix = self
|
||||||
if Ntrunc >= basematrix.N:
|
if Ntrunc >= basematrix.N:
|
||||||
raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
||||||
if basematrix.N != self.N:
|
if basematrix.N != self.N:
|
||||||
raise Exception('basematrix and targetmatrix have to be of the same size.')
|
raise ValueError('basematrix and targetmatrix have to be of the same size.')
|
||||||
|
|
||||||
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
|
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
|
||||||
|
|
||||||
tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object)
|
tmpmat = np.empty((Ntrunc, Ntrunc), dtype=object)
|
||||||
rmat = []
|
rmat = []
|
||||||
for t in range(basematrix.T):
|
for t in range(basematrix.T):
|
||||||
for i in range(Ntrunc):
|
if self.content[t] is None:
|
||||||
for j in range(Ntrunc):
|
rmat.append(None)
|
||||||
tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j]
|
else:
|
||||||
rmat.append(np.copy(tmpmat))
|
for i in range(Ntrunc):
|
||||||
|
for j in range(Ntrunc):
|
||||||
|
tmpmat[i][j] = evecs[i].T @ self[t] @ evecs[j]
|
||||||
|
rmat.append(np.copy(tmpmat))
|
||||||
|
|
||||||
newcontent = [None if (self.content[t] is None) else rmat[t] for t in range(self.T)]
|
return Corr(rmat)
|
||||||
return Corr(newcontent)
|
|
||||||
|
|
||||||
|
|
||||||
def _sort_vectors(vec_set_in, ts):
|
def _sort_vectors(vec_set_in, ts):
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ def epsilon_tensor(i, j, k):
|
||||||
"""
|
"""
|
||||||
test_set = set((i, j, k))
|
test_set = set((i, j, k))
|
||||||
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
|
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
|
||||||
raise Exception("Unexpected input", i, j, k)
|
raise ValueError("Unexpected input", i, j, k)
|
||||||
|
|
||||||
return (i - j) * (j - k) * (k - i) / 2
|
return (i - j) * (j - k) * (k - i) / 2
|
||||||
|
|
||||||
|
|
@ -52,7 +52,7 @@ def epsilon_tensor_rank4(i, j, k, o):
|
||||||
"""
|
"""
|
||||||
test_set = set((i, j, k, o))
|
test_set = set((i, j, k, o))
|
||||||
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
|
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
|
||||||
raise Exception("Unexpected input", i, j, k, o)
|
raise ValueError("Unexpected input", i, j, k, o)
|
||||||
|
|
||||||
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
|
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
|
||||||
|
|
||||||
|
|
@ -92,5 +92,5 @@ def Grid_gamma(gamma_tag):
|
||||||
elif gamma_tag == 'SigmaZT':
|
elif gamma_tag == 'SigmaZT':
|
||||||
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
|
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
|
||||||
else:
|
else:
|
||||||
raise Exception('Unkown gamma structure', gamma_tag)
|
raise ValueError('Unkown gamma structure', gamma_tag)
|
||||||
return g
|
return g
|
||||||
|
|
|
||||||
193
pyerrors/fits.py
193
pyerrors/fits.py
|
|
@ -14,7 +14,7 @@ from autograd import hessian as auto_hessian
|
||||||
from autograd import elementwise_grad as egrad
|
from autograd import elementwise_grad as egrad
|
||||||
from numdifftools import Jacobian as num_jacobian
|
from numdifftools import Jacobian as num_jacobian
|
||||||
from numdifftools import Hessian as num_hessian
|
from numdifftools import Hessian as num_hessian
|
||||||
from .obs import Obs, derived_observable, covariance, cov_Obs
|
from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky
|
||||||
|
|
||||||
|
|
||||||
class Fit_result(Sequence):
|
class Fit_result(Sequence):
|
||||||
|
|
@ -131,7 +131,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
|
Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
|
||||||
0.548(23), 500(40) or 0.5(0.4)
|
0.548(23), 500(40) or 0.5(0.4)
|
||||||
silent : bool, optional
|
silent : bool, optional
|
||||||
If true all output to the console is omitted (default False).
|
If True all output to the console is omitted (default False).
|
||||||
initial_guess : list
|
initial_guess : list
|
||||||
can provide an initial guess for the input parameters. Relevant for
|
can provide an initial guess for the input parameters. Relevant for
|
||||||
non-linear fits with many parameters. In case of correlated fits the guess is used to perform
|
non-linear fits with many parameters. In case of correlated fits the guess is used to perform
|
||||||
|
|
@ -139,10 +139,10 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
method : str, optional
|
method : str, optional
|
||||||
can be used to choose an alternative method for the minimization of chisquare.
|
can be used to choose an alternative method for the minimization of chisquare.
|
||||||
The possible methods are the ones which can be used for scipy.optimize.minimize and
|
The possible methods are the ones which can be used for scipy.optimize.minimize and
|
||||||
migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
|
migrad of iminuit. If no method is specified, Levenberg–Marquardt is used.
|
||||||
Reliable alternatives are migrad, Powell and Nelder-Mead.
|
Reliable alternatives are migrad, Powell and Nelder-Mead.
|
||||||
tol: float, optional
|
tol: float, optional
|
||||||
can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
|
can be used (only for combined fits and methods other than Levenberg–Marquardt) to set the tolerance for convergence
|
||||||
to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
|
to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
|
||||||
invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
|
invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
|
||||||
The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
|
The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
|
||||||
|
|
@ -151,6 +151,14 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
|
For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
|
||||||
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
|
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
|
||||||
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
|
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
|
||||||
|
inv_chol_cov_matrix [array,list], optional
|
||||||
|
array: shape = (number of y values) X (number of y values)
|
||||||
|
list: for an uncombined fit: [""]
|
||||||
|
for a combined fit: list of keys belonging to the corr_matrix saved in the array, must be the same as the keys of the y dict in alphabetical order
|
||||||
|
If correlated_fit=True is set as well, can provide an inverse covariance matrix (y errors, dy_f included!) of your own choosing for a correlated fit.
|
||||||
|
The matrix must be a lower triangular matrix constructed from a Cholesky decomposition: The function invert_corr_cov_cholesky(corr, inverrdiag) can be
|
||||||
|
used to construct it from a correlation matrix (corr) and the errors dy_f of the data points (inverrdiag = np.diag(1 / np.asarray(dy_f))). For the correct
|
||||||
|
ordering the correlation matrix (corr) can be sorted via the function sort_corr(corr, kl, yd) where kl is the list of keys and yd the y dict.
|
||||||
expected_chisquare : bool
|
expected_chisquare : bool
|
||||||
If True estimates the expected chisquare which is
|
If True estimates the expected chisquare which is
|
||||||
corrected by effects caused by correlated input data (default False).
|
corrected by effects caused by correlated input data (default False).
|
||||||
|
|
@ -160,11 +168,65 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
If True, a quantile-quantile plot of the fit result is generated (default False).
|
If True, a quantile-quantile plot of the fit result is generated (default False).
|
||||||
num_grad : bool
|
num_grad : bool
|
||||||
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
|
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
|
||||||
|
n_parms : int, optional
|
||||||
|
Number of fit parameters. Overrides automatic detection of parameter count.
|
||||||
|
Useful when autodetection fails. Must match the length of initial_guess or priors (if provided).
|
||||||
|
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
output : Fit_result
|
output : Fit_result
|
||||||
Parameters and information on the fitted result.
|
Parameters and information on the fitted result.
|
||||||
|
Examples
|
||||||
|
------
|
||||||
|
>>> # Example of a correlated (correlated_fit = True, inv_chol_cov_matrix handed over) combined fit, based on a randomly generated data set
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> from scipy.stats import norm
|
||||||
|
>>> from scipy.linalg import cholesky
|
||||||
|
>>> import pyerrors as pe
|
||||||
|
>>> # generating the random data set
|
||||||
|
>>> num_samples = 400
|
||||||
|
>>> N = 3
|
||||||
|
>>> x = np.arange(N)
|
||||||
|
>>> x1 = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||||
|
>>> x2 = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||||
|
>>> r = r1 = r2 = np.zeros((N, N))
|
||||||
|
>>> y = {}
|
||||||
|
>>> for i in range(N):
|
||||||
|
>>> for j in range(N):
|
||||||
|
>>> r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
|
||||||
|
>>> errl = np.sqrt([3.4, 2.5, 3.6]) # set y errors
|
||||||
|
>>> for i in range(N):
|
||||||
|
>>> for j in range(N):
|
||||||
|
>>> r[i, j] *= errl[i] * errl[j] # element in covariance matrix
|
||||||
|
>>> c = cholesky(r, lower=True)
|
||||||
|
>>> y = {'a': np.dot(c, x1), 'b': np.dot(c, x2)} # generate y data with the covariance matrix defined
|
||||||
|
>>> # random data set has been generated, now the dictionaries and the inverse covariance matrix to be handed over are built
|
||||||
|
>>> x_dict = {}
|
||||||
|
>>> y_dict = {}
|
||||||
|
>>> chol_inv_dict = {}
|
||||||
|
>>> data = []
|
||||||
|
>>> for key in y.keys():
|
||||||
|
>>> x_dict[key] = x
|
||||||
|
>>> for i in range(N):
|
||||||
|
>>> data.append(pe.Obs([[i + 1 + o for o in y[key][i]]], ['ens'])) # generate y Obs from the y data
|
||||||
|
>>> [o.gamma_method() for o in data]
|
||||||
|
>>> corr = pe.covariance(data, correlation=True)
|
||||||
|
>>> inverrdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
|
||||||
|
>>> chol_inv = pe.obs.invert_corr_cov_cholesky(corr, inverrdiag) # gives form of the inverse covariance matrix needed for the combined correlated fit below
|
||||||
|
>>> y_dict = {'a': data[:3], 'b': data[3:]}
|
||||||
|
>>> # common fit parameter p[0] in combined fit
|
||||||
|
>>> def fit1(p, x):
|
||||||
|
>>> return p[0] + p[1] * x
|
||||||
|
>>> def fit2(p, x):
|
||||||
|
>>> return p[0] + p[2] * x
|
||||||
|
>>> fitf_dict = {'a': fit1, 'b':fit2}
|
||||||
|
>>> fitp_inv_cov_combined_fit = pe.least_squares(x_dict,y_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,['a','b']])
|
||||||
|
Fit with 3 parameters
|
||||||
|
Method: Levenberg-Marquardt
|
||||||
|
`ftol` termination condition is satisfied.
|
||||||
|
chisquare/d.o.f.: 0.5388013574561786 # random
|
||||||
|
fit parameters [1.11897846 0.96361162 0.92325319] # random
|
||||||
|
|
||||||
'''
|
'''
|
||||||
output = Fit_result()
|
output = Fit_result()
|
||||||
|
|
||||||
|
|
@ -197,7 +259,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
if sorted(list(funcd.keys())) != key_ls:
|
if sorted(list(funcd.keys())) != key_ls:
|
||||||
raise ValueError('x and func dictionaries do not contain the same keys.')
|
raise ValueError('x and func dictionaries do not contain the same keys.')
|
||||||
|
|
||||||
x_all = np.concatenate([np.array(xd[key]) for key in key_ls])
|
x_all = np.concatenate([np.array(xd[key]).transpose() for key in key_ls]).transpose()
|
||||||
y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
|
y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
|
||||||
|
|
||||||
y_f = [o.value for o in y_all]
|
y_f = [o.value for o in y_all]
|
||||||
|
|
@ -210,31 +272,43 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
raise Exception("No y errors available, run the gamma method first.")
|
raise Exception("No y errors available, run the gamma method first.")
|
||||||
|
|
||||||
# number of fit parameters
|
# number of fit parameters
|
||||||
n_parms_ls = []
|
if 'n_parms' in kwargs:
|
||||||
for key in key_ls:
|
n_parms = kwargs.get('n_parms')
|
||||||
if not callable(funcd[key]):
|
if not isinstance(n_parms, int):
|
||||||
raise TypeError('func (key=' + key + ') is not a function.')
|
raise TypeError(
|
||||||
if np.asarray(xd[key]).shape[-1] != len(yd[key]):
|
f"'n_parms' must be an integer, got {n_parms!r} "
|
||||||
raise ValueError('x and y input (key=' + key + ') do not have the same length')
|
f"of type {type(n_parms).__name__}."
|
||||||
for n_loc in range(100):
|
)
|
||||||
try:
|
if n_parms <= 0:
|
||||||
funcd[key](np.arange(n_loc), x_all.T[0])
|
raise ValueError(
|
||||||
except TypeError:
|
f"'n_parms' must be a positive integer, got {n_parms}."
|
||||||
continue
|
)
|
||||||
except IndexError:
|
else:
|
||||||
continue
|
n_parms_ls = []
|
||||||
|
for key in key_ls:
|
||||||
|
if not callable(funcd[key]):
|
||||||
|
raise TypeError('func (key=' + key + ') is not a function.')
|
||||||
|
if np.asarray(xd[key]).shape[-1] != len(yd[key]):
|
||||||
|
raise ValueError('x and y input (key=' + key + ') do not have the same length')
|
||||||
|
for n_loc in range(100):
|
||||||
|
try:
|
||||||
|
funcd[key](np.arange(n_loc), x_all.T[0])
|
||||||
|
except TypeError:
|
||||||
|
continue
|
||||||
|
except IndexError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
break
|
raise RuntimeError("Fit function (key=" + key + ") is not valid.")
|
||||||
else:
|
n_parms_ls.append(n_loc)
|
||||||
raise RuntimeError("Fit function (key=" + key + ") is not valid.")
|
|
||||||
n_parms_ls.append(n_loc)
|
|
||||||
|
|
||||||
n_parms = max(n_parms_ls)
|
n_parms = max(n_parms_ls)
|
||||||
|
|
||||||
if len(key_ls) > 1:
|
if len(key_ls) > 1:
|
||||||
for key in key_ls:
|
for key in key_ls:
|
||||||
if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape:
|
if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape:
|
||||||
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
|
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {np.asarray(yd[key]).shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
|
||||||
|
|
||||||
if not silent:
|
if not silent:
|
||||||
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
||||||
|
|
@ -297,15 +371,21 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
|
return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
|
||||||
|
|
||||||
if kwargs.get('correlated_fit') is True:
|
if kwargs.get('correlated_fit') is True:
|
||||||
corr = covariance(y_all, correlation=True, **kwargs)
|
if 'inv_chol_cov_matrix' in kwargs:
|
||||||
covdiag = np.diag(1 / np.asarray(dy_f))
|
chol_inv = kwargs.get('inv_chol_cov_matrix')
|
||||||
condn = np.linalg.cond(corr)
|
if (chol_inv[0].shape[0] != len(dy_f)):
|
||||||
if condn > 0.1 / np.finfo(float).eps:
|
raise TypeError('The number of columns of the inverse covariance matrix handed over needs to be equal to the number of y errors.')
|
||||||
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
if (chol_inv[0].shape[0] != chol_inv[0].shape[1]):
|
||||||
if condn > 1e13:
|
raise TypeError('The inverse covariance matrix handed over needs to have the same number of rows as columns.')
|
||||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
if (chol_inv[1] != key_ls):
|
||||||
chol = np.linalg.cholesky(corr)
|
raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.')
|
||||||
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
chol_inv = chol_inv[0]
|
||||||
|
if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))):
|
||||||
|
raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.')
|
||||||
|
else:
|
||||||
|
corr = covariance(y_all, correlation=True, **kwargs)
|
||||||
|
inverrdiag = np.diag(1 / np.asarray(dy_f))
|
||||||
|
chol_inv = invert_corr_cov_cholesky(corr, inverrdiag)
|
||||||
|
|
||||||
def general_chisqfunc(p, ivars, pr):
|
def general_chisqfunc(p, ivars, pr):
|
||||||
model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
|
model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
|
||||||
|
|
@ -350,7 +430,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
|
|
||||||
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
|
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
|
||||||
if kwargs.get('correlated_fit') is True:
|
if kwargs.get('correlated_fit') is True:
|
||||||
|
|
||||||
def chisqfunc_residuals(p):
|
def chisqfunc_residuals(p):
|
||||||
return general_chisqfunc(p, y_f, p_f)
|
return general_chisqfunc(p, y_f, p_f)
|
||||||
|
|
||||||
|
|
@ -393,7 +472,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
||||||
hat_vector = prepare_hat_matrix()
|
hat_vector = prepare_hat_matrix()
|
||||||
A = W @ hat_vector
|
A = W @ hat_vector
|
||||||
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
|
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
|
||||||
expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W)
|
expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W) + len(loc_priors)
|
||||||
output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
|
output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
|
||||||
if not silent:
|
if not silent:
|
||||||
print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)
|
print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)
|
||||||
|
|
@ -471,17 +550,20 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
||||||
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
|
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
|
||||||
will not work.
|
will not work.
|
||||||
silent : bool, optional
|
silent : bool, optional
|
||||||
If true all output to the console is omitted (default False).
|
If True all output to the console is omitted (default False).
|
||||||
initial_guess : list
|
initial_guess : list
|
||||||
can provide an initial guess for the input parameters. Relevant for non-linear
|
can provide an initial guess for the input parameters. Relevant for non-linear
|
||||||
fits with many parameters.
|
fits with many parameters.
|
||||||
expected_chisquare : bool
|
expected_chisquare : bool
|
||||||
If true prints the expected chisquare which is
|
If True prints the expected chisquare which is
|
||||||
corrected by effects caused by correlated input data.
|
corrected by effects caused by correlated input data.
|
||||||
This can take a while as the full correlation matrix
|
This can take a while as the full correlation matrix
|
||||||
has to be calculated (default False).
|
has to be calculated (default False).
|
||||||
num_grad : bool
|
num_grad : bool
|
||||||
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
|
Use numerical differentiation instead of automatic differentiation to perform the error propagation (default False).
|
||||||
|
n_parms : int, optional
|
||||||
|
Number of fit parameters. Overrides automatic detection of parameter count.
|
||||||
|
Useful when autodetection fails. Must match the length of initial_guess (if provided).
|
||||||
|
|
||||||
Notes
|
Notes
|
||||||
-----
|
-----
|
||||||
|
|
@ -511,19 +593,32 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
||||||
if not callable(func):
|
if not callable(func):
|
||||||
raise TypeError('func has to be a function.')
|
raise TypeError('func has to be a function.')
|
||||||
|
|
||||||
for i in range(42):
|
if 'n_parms' in kwargs:
|
||||||
try:
|
n_parms = kwargs.get('n_parms')
|
||||||
func(np.arange(i), x.T[0])
|
if not isinstance(n_parms, int):
|
||||||
except TypeError:
|
raise TypeError(
|
||||||
continue
|
f"'n_parms' must be an integer, got {n_parms!r} "
|
||||||
except IndexError:
|
f"of type {type(n_parms).__name__}."
|
||||||
continue
|
)
|
||||||
else:
|
if n_parms <= 0:
|
||||||
break
|
raise ValueError(
|
||||||
|
f"'n_parms' must be a positive integer, got {n_parms}."
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("Fit function is not valid.")
|
for i in range(100):
|
||||||
|
try:
|
||||||
|
func(np.arange(i), x.T[0])
|
||||||
|
except TypeError:
|
||||||
|
continue
|
||||||
|
except IndexError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Fit function is not valid.")
|
||||||
|
|
||||||
|
n_parms = i
|
||||||
|
|
||||||
n_parms = i
|
|
||||||
if not silent:
|
if not silent:
|
||||||
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,11 +5,11 @@ r'''
|
||||||
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
||||||
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
||||||
'''
|
'''
|
||||||
from . import bdio
|
from . import bdio as bdio
|
||||||
from . import dobs
|
from . import dobs as dobs
|
||||||
from . import hadrons
|
from . import hadrons as hadrons
|
||||||
from . import json
|
from . import json as json
|
||||||
from . import misc
|
from . import misc as misc
|
||||||
from . import openQCD
|
from . import openQCD as openQCD
|
||||||
from . import pandas
|
from . import pandas as pandas
|
||||||
from . import sfcf
|
from . import sfcf as sfcf
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ def _dict_to_xmlstring_spaces(d, space=' '):
|
||||||
o += space
|
o += space
|
||||||
o += li + '\n'
|
o += li + '\n'
|
||||||
if li.startswith('<') and not cm:
|
if li.startswith('<') and not cm:
|
||||||
if not '<%s' % ('/') in li:
|
if '<%s' % ('/') not in li:
|
||||||
c += 1
|
c += 1
|
||||||
cm = False
|
cm = False
|
||||||
return o
|
return o
|
||||||
|
|
@ -529,7 +529,8 @@ def import_dobs_string(content, full_output=False, separator_insertion=True):
|
||||||
deltas.append(repdeltas)
|
deltas.append(repdeltas)
|
||||||
idl.append(repidl)
|
idl.append(repidl)
|
||||||
|
|
||||||
res.append(Obs(deltas, obs_names, idl=idl))
|
obsmeans = [np.average(deltas[j]) for j in range(len(deltas))]
|
||||||
|
res.append(Obs([np.array(deltas[j]) - obsmeans[j] for j in range(len(obsmeans))], obs_names, idl=idl, means=obsmeans))
|
||||||
res[-1]._value = mean[i]
|
res[-1]._value = mean[i]
|
||||||
_check(len(e_names) == ne)
|
_check(len(e_names) == ne)
|
||||||
|
|
||||||
|
|
@ -671,7 +672,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
|
||||||
o += space
|
o += space
|
||||||
o += li + '\n'
|
o += li + '\n'
|
||||||
if li.startswith('<') and not cm:
|
if li.startswith('<') and not cm:
|
||||||
if not '<%s' % ('/') in li:
|
if '<%s' % ('/') not in li:
|
||||||
c += 1
|
c += 1
|
||||||
cm = False
|
cm = False
|
||||||
return o
|
return o
|
||||||
|
|
|
||||||
|
|
@ -113,7 +113,7 @@ def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
|
||||||
infos = []
|
infos = []
|
||||||
for hd5_file in files:
|
for hd5_file in files:
|
||||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||||
if not group + '/' + entry in h5file:
|
if group + '/' + entry not in h5file:
|
||||||
raise Exception("Entry '" + entry + "' not contained in the files.")
|
raise Exception("Entry '" + entry + "' not contained in the files.")
|
||||||
raw_data = h5file[group + '/' + entry + '/corr']
|
raw_data = h5file[group + '/' + entry + '/corr']
|
||||||
real_data = raw_data[:].view("complex")
|
real_data = raw_data[:].view("complex")
|
||||||
|
|
@ -186,7 +186,7 @@ def _extract_real_arrays(path, files, tree, keys):
|
||||||
for hd5_file in files:
|
for hd5_file in files:
|
||||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||||
for key in keys:
|
for key in keys:
|
||||||
if not tree + '/' + key in h5file:
|
if tree + '/' + key not in h5file:
|
||||||
raise Exception("Entry '" + key + "' not contained in the files.")
|
raise Exception("Entry '" + key + "' not contained in the files.")
|
||||||
raw_data = h5file[tree + '/' + key + '/data']
|
raw_data = h5file[tree + '/' + key + '/data']
|
||||||
real_data = raw_data[:].astype(np.double)
|
real_data = raw_data[:].astype(np.double)
|
||||||
|
|
|
||||||
|
|
@ -133,10 +133,11 @@ def create_json_string(ol, description='', indent=1):
|
||||||
names = []
|
names = []
|
||||||
idl = []
|
idl = []
|
||||||
for key, value in obs.idl.items():
|
for key, value in obs.idl.items():
|
||||||
samples.append([np.nan] * len(value))
|
samples.append(np.array([np.nan] * len(value)))
|
||||||
names.append(key)
|
names.append(key)
|
||||||
idl.append(value)
|
idl.append(value)
|
||||||
my_obs = Obs(samples, names, idl)
|
my_obs = Obs(samples, names, idl, means=[np.nan for n in names])
|
||||||
|
my_obs._value = np.nan
|
||||||
my_obs._covobs = obs._covobs
|
my_obs._covobs = obs._covobs
|
||||||
for name in obs._covobs:
|
for name in obs._covobs:
|
||||||
my_obs.names.append(name)
|
my_obs.names.append(name)
|
||||||
|
|
@ -331,7 +332,8 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
||||||
cd = _gen_covobsd_from_cdatad(o.get('cdata', {}))
|
cd = _gen_covobsd_from_cdatad(o.get('cdata', {}))
|
||||||
|
|
||||||
if od:
|
if od:
|
||||||
ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
|
r_offsets = [np.average([ddi[0] for ddi in di]) for di in od['deltas']]
|
||||||
|
ret = Obs([np.array([ddi[0] for ddi in od['deltas'][i]]) - r_offsets[i] for i in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[0] for ro in r_offsets])
|
||||||
ret._value = values[0]
|
ret._value = values[0]
|
||||||
else:
|
else:
|
||||||
ret = Obs([], [], means=[])
|
ret = Obs([], [], means=[])
|
||||||
|
|
@ -356,7 +358,8 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
||||||
taglist = o.get('tag', layout * [None])
|
taglist = o.get('tag', layout * [None])
|
||||||
for i in range(layout):
|
for i in range(layout):
|
||||||
if od:
|
if od:
|
||||||
ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
|
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
|
||||||
|
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
|
||||||
ret[-1]._value = values[i]
|
ret[-1]._value = values[i]
|
||||||
else:
|
else:
|
||||||
ret.append(Obs([], [], means=[]))
|
ret.append(Obs([], [], means=[]))
|
||||||
|
|
@ -383,7 +386,8 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
||||||
taglist = o.get('tag', N * [None])
|
taglist = o.get('tag', N * [None])
|
||||||
for i in range(N):
|
for i in range(N):
|
||||||
if od:
|
if od:
|
||||||
ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
|
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
|
||||||
|
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
|
||||||
ret[-1]._value = values[i]
|
ret[-1]._value = values[i]
|
||||||
else:
|
else:
|
||||||
ret.append(Obs([], [], means=[]))
|
ret.append(Obs([], [], means=[]))
|
||||||
|
|
@ -567,7 +571,6 @@ def _ol_from_dict(ind, reps='DICTOBS'):
|
||||||
counter = 0
|
counter = 0
|
||||||
|
|
||||||
def dict_replace_obs(d):
|
def dict_replace_obs(d):
|
||||||
nonlocal ol
|
|
||||||
nonlocal counter
|
nonlocal counter
|
||||||
x = {}
|
x = {}
|
||||||
for k, v in d.items():
|
for k, v in d.items():
|
||||||
|
|
@ -588,7 +591,6 @@ def _ol_from_dict(ind, reps='DICTOBS'):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def list_replace_obs(li):
|
def list_replace_obs(li):
|
||||||
nonlocal ol
|
|
||||||
nonlocal counter
|
nonlocal counter
|
||||||
x = []
|
x = []
|
||||||
for e in li:
|
for e in li:
|
||||||
|
|
@ -609,7 +611,6 @@ def _ol_from_dict(ind, reps='DICTOBS'):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def obslist_replace_obs(li):
|
def obslist_replace_obs(li):
|
||||||
nonlocal ol
|
|
||||||
nonlocal counter
|
nonlocal counter
|
||||||
il = []
|
il = []
|
||||||
for e in li:
|
for e in li:
|
||||||
|
|
@ -690,7 +691,6 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
|
||||||
|
|
||||||
def dict_replace_string(d):
|
def dict_replace_string(d):
|
||||||
nonlocal counter
|
nonlocal counter
|
||||||
nonlocal ol
|
|
||||||
x = {}
|
x = {}
|
||||||
for k, v in d.items():
|
for k, v in d.items():
|
||||||
if isinstance(v, dict):
|
if isinstance(v, dict):
|
||||||
|
|
@ -706,7 +706,6 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
|
||||||
|
|
||||||
def list_replace_string(li):
|
def list_replace_string(li):
|
||||||
nonlocal counter
|
nonlocal counter
|
||||||
nonlocal ol
|
|
||||||
x = []
|
x = []
|
||||||
for e in li:
|
for e in li:
|
||||||
if isinstance(e, list):
|
if isinstance(e, list):
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
||||||
Reweighting factors read
|
Reweighting factors read
|
||||||
"""
|
"""
|
||||||
known_oqcd_versions = ['1.4', '1.6', '2.0']
|
known_oqcd_versions = ['1.4', '1.6', '2.0']
|
||||||
if not (version in known_oqcd_versions):
|
if version not in known_oqcd_versions:
|
||||||
raise Exception('Unknown openQCD version defined!')
|
raise Exception('Unknown openQCD version defined!')
|
||||||
print("Working with openQCD version " + version)
|
print("Working with openQCD version " + version)
|
||||||
if 'postfix' in kwargs:
|
if 'postfix' in kwargs:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
import warnings
|
import warnings
|
||||||
import gzip
|
import gzip
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
from contextlib import closing
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from ..obs import Obs
|
from ..obs import Obs
|
||||||
from ..correlators import Corr
|
from ..correlators import Corr
|
||||||
|
|
@ -29,9 +30,8 @@ def to_sql(df, table_name, db, if_exists='fail', gz=True, **kwargs):
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
se_df = _serialize_df(df, gz=gz)
|
se_df = _serialize_df(df, gz=gz)
|
||||||
con = sqlite3.connect(db)
|
with closing(sqlite3.connect(db)) as con:
|
||||||
se_df.to_sql(table_name, con, if_exists=if_exists, index=False, **kwargs)
|
se_df.to_sql(table_name, con=con, if_exists=if_exists, index=False, **kwargs)
|
||||||
con.close()
|
|
||||||
|
|
||||||
|
|
||||||
def read_sql(sql, db, auto_gamma=False, **kwargs):
|
def read_sql(sql, db, auto_gamma=False, **kwargs):
|
||||||
|
|
@ -52,9 +52,8 @@ def read_sql(sql, db, auto_gamma=False, **kwargs):
|
||||||
data : pandas.DataFrame
|
data : pandas.DataFrame
|
||||||
Dataframe with the content of the sqlite database.
|
Dataframe with the content of the sqlite database.
|
||||||
"""
|
"""
|
||||||
con = sqlite3.connect(db)
|
with closing(sqlite3.connect(db)) as con:
|
||||||
extract_df = pd.read_sql(sql, con, **kwargs)
|
extract_df = pd.read_sql(sql, con=con, **kwargs)
|
||||||
con.close()
|
|
||||||
return _deserialize_df(extract_df, auto_gamma=auto_gamma)
|
return _deserialize_df(extract_df, auto_gamma=auto_gamma)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ import itertools
|
||||||
sep = "/"
|
sep = "/"
|
||||||
|
|
||||||
|
|
||||||
def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs):
|
def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", cfg_func=None, silent=False, **kwargs):
|
||||||
"""Read sfcf files from given folder structure.
|
"""Read sfcf files from given folder structure.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
|
@ -71,11 +71,11 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0,
|
||||||
"""
|
"""
|
||||||
ret = read_sfcf_multi(path, prefix, [name], quarks_list=[quarks], corr_type_list=[corr_type],
|
ret = read_sfcf_multi(path, prefix, [name], quarks_list=[quarks], corr_type_list=[corr_type],
|
||||||
noffset_list=[noffset], wf_list=[wf], wf2_list=[wf2], version=version,
|
noffset_list=[noffset], wf_list=[wf], wf2_list=[wf2], version=version,
|
||||||
cfg_separator=cfg_separator, silent=silent, **kwargs)
|
cfg_separator=cfg_separator, cfg_func=cfg_func, silent=silent, **kwargs)
|
||||||
return ret[name][quarks][str(noffset)][str(wf)][str(wf2)]
|
return ret[name][quarks][str(noffset)][str(wf)][str(wf2)]
|
||||||
|
|
||||||
|
|
||||||
def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=['bi'], noffset_list=[0], wf_list=[0], wf2_list=[0], version="1.0c", cfg_separator="n", silent=False, keyed_out=False, **kwargs):
|
def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=['bi'], noffset_list=[0], wf_list=[0], wf2_list=[0], version="1.0c", cfg_separator="n", cfg_func=None, silent=False, keyed_out=False, **kwargs):
|
||||||
"""Read sfcf files from given folder structure.
|
"""Read sfcf files from given folder structure.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
|
|
@ -127,7 +127,8 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
check_configs: list[list[int]]
|
check_configs: list[list[int]]
|
||||||
list of list of supposed configs, eg. [range(1,1000)]
|
list of list of supposed configs, eg. [range(1,1000)]
|
||||||
for one replicum with 1000 configs
|
for one replicum with 1000 configs
|
||||||
|
rep_string: str
|
||||||
|
Separator of ensemble name and replicum. Example: In "ensAr0", "r" would be the separator string.
|
||||||
Returns
|
Returns
|
||||||
-------
|
-------
|
||||||
result: dict[list[Obs]]
|
result: dict[list[Obs]]
|
||||||
|
|
@ -199,9 +200,9 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
else:
|
else:
|
||||||
ens_name = kwargs.get("ens_name")
|
ens_name = kwargs.get("ens_name")
|
||||||
if not appended:
|
if not appended:
|
||||||
new_names = _get_rep_names(ls, ens_name)
|
new_names = _get_rep_names(ls, ens_name, rep_sep=(kwargs.get('rep_string', 'r')))
|
||||||
else:
|
else:
|
||||||
new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name)
|
new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name, rep_sep=(kwargs.get('rep_string', 'r')))
|
||||||
new_names = sort_names(new_names)
|
new_names = sort_names(new_names)
|
||||||
|
|
||||||
idl = []
|
idl = []
|
||||||
|
|
@ -244,6 +245,16 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
for key in needed_keys:
|
for key in needed_keys:
|
||||||
internal_ret_dict[key] = []
|
internal_ret_dict[key] = []
|
||||||
|
|
||||||
|
def _default_idl_func(cfg_string, cfg_sep):
|
||||||
|
return int(cfg_string.split(cfg_sep)[-1])
|
||||||
|
|
||||||
|
if cfg_func is None:
|
||||||
|
print("Default idl function in use.")
|
||||||
|
cfg_func = _default_idl_func
|
||||||
|
cfg_func_args = [cfg_separator]
|
||||||
|
else:
|
||||||
|
cfg_func_args = kwargs.get("cfg_func_args", [])
|
||||||
|
|
||||||
if not appended:
|
if not appended:
|
||||||
for i, item in enumerate(ls):
|
for i, item in enumerate(ls):
|
||||||
rep_path = path + '/' + item
|
rep_path = path + '/' + item
|
||||||
|
|
@ -267,7 +278,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
for cfg in sub_ls:
|
for cfg in sub_ls:
|
||||||
try:
|
try:
|
||||||
if compact:
|
if compact:
|
||||||
rep_idl.append(int(cfg.split(cfg_separator)[-1]))
|
rep_idl.append(cfg_func(cfg, *cfg_func_args))
|
||||||
else:
|
else:
|
||||||
rep_idl.append(int(cfg[3:]))
|
rep_idl.append(int(cfg[3:]))
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
@ -350,7 +361,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
||||||
for rep, file in enumerate(name_ls):
|
for rep, file in enumerate(name_ls):
|
||||||
rep_idl = []
|
rep_idl = []
|
||||||
filename = path + '/' + file
|
filename = path + '/' + file
|
||||||
T, rep_idl, rep_data = _read_append_rep(filename, pattern, intern[name]['b2b'], cfg_separator, im, intern[name]['single'])
|
T, rep_idl, rep_data = _read_append_rep(filename, pattern, intern[name]['b2b'], im, intern[name]['single'], cfg_func, cfg_func_args)
|
||||||
if rep == 0:
|
if rep == 0:
|
||||||
intern[name]['T'] = T
|
intern[name]['T'] = T
|
||||||
for t in range(intern[name]['T']):
|
for t in range(intern[name]['T']):
|
||||||
|
|
@ -580,12 +591,7 @@ def _read_compact_rep(path, rep, sub_ls, intern, needed_keys, im):
|
||||||
return return_vals
|
return return_vals
|
||||||
|
|
||||||
|
|
||||||
def _read_chunk(chunk, gauge_line, cfg_sep, start_read, T, corr_line, b2b, pattern, im, single):
|
def _read_chunk_data(chunk, start_read, T, corr_line, b2b, pattern, im, single):
|
||||||
try:
|
|
||||||
idl = int(chunk[gauge_line].split(cfg_sep)[-1])
|
|
||||||
except Exception:
|
|
||||||
raise Exception("Couldn't parse idl from directory, problem with chunk around line ", gauge_line)
|
|
||||||
|
|
||||||
found_pat = ""
|
found_pat = ""
|
||||||
data = []
|
data = []
|
||||||
for li in chunk[corr_line + 1:corr_line + 6 + b2b]:
|
for li in chunk[corr_line + 1:corr_line + 6 + b2b]:
|
||||||
|
|
@ -594,10 +600,10 @@ def _read_chunk(chunk, gauge_line, cfg_sep, start_read, T, corr_line, b2b, patte
|
||||||
for t, line in enumerate(chunk[start_read:start_read + T]):
|
for t, line in enumerate(chunk[start_read:start_read + T]):
|
||||||
floats = list(map(float, line.split()))
|
floats = list(map(float, line.split()))
|
||||||
data.append(floats[im + 1 - single])
|
data.append(floats[im + 1 - single])
|
||||||
return idl, data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
|
def _read_append_rep(filename, pattern, b2b, im, single, idl_func, cfg_func_args):
|
||||||
with open(filename, 'r') as fp:
|
with open(filename, 'r') as fp:
|
||||||
content = fp.readlines()
|
content = fp.readlines()
|
||||||
data_starts = []
|
data_starts = []
|
||||||
|
|
@ -633,7 +639,11 @@ def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
|
||||||
start = data_starts[cnfg]
|
start = data_starts[cnfg]
|
||||||
stop = start + data_starts[1]
|
stop = start + data_starts[1]
|
||||||
chunk = content[start:stop]
|
chunk = content[start:stop]
|
||||||
idl, data = _read_chunk(chunk, gauge_line, cfg_separator, start_read, T, corr_line, b2b, pattern, im, single)
|
try:
|
||||||
|
idl = idl_func(chunk[gauge_line], *cfg_func_args)
|
||||||
|
except Exception:
|
||||||
|
raise Exception("Couldn't parse idl from file", filename, ", problem with chunk of lines", start + 1, "to", stop + 1)
|
||||||
|
data = _read_chunk_data(chunk, start_read, T, corr_line, b2b, pattern, im, single)
|
||||||
rep_idl.append(idl)
|
rep_idl.append(idl)
|
||||||
rep_data.append(data)
|
rep_data.append(data)
|
||||||
|
|
||||||
|
|
@ -646,22 +656,22 @@ def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
|
||||||
return T, rep_idl, data
|
return T, rep_idl, data
|
||||||
|
|
||||||
|
|
||||||
def _get_rep_names(ls, ens_name=None):
|
def _get_rep_names(ls, ens_name=None, rep_sep='r'):
|
||||||
new_names = []
|
new_names = []
|
||||||
for entry in ls:
|
for entry in ls:
|
||||||
try:
|
try:
|
||||||
idx = entry.index('r')
|
idx = entry.index(rep_sep)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||||
|
|
||||||
if ens_name:
|
if ens_name:
|
||||||
new_names.append('ens_name' + '|' + entry[idx:])
|
new_names.append(ens_name + '|' + entry[idx:])
|
||||||
else:
|
else:
|
||||||
new_names.append(entry[:idx] + '|' + entry[idx:])
|
new_names.append(entry[:idx] + '|' + entry[idx:])
|
||||||
return new_names
|
return new_names
|
||||||
|
|
||||||
|
|
||||||
def _get_appended_rep_names(ls, prefix, name, ens_name=None):
|
def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
|
||||||
new_names = []
|
new_names = []
|
||||||
for exc in ls:
|
for exc in ls:
|
||||||
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
||||||
|
|
@ -670,12 +680,12 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None):
|
||||||
for entry in ls:
|
for entry in ls:
|
||||||
myentry = entry[:-len(name) - 1]
|
myentry = entry[:-len(name) - 1]
|
||||||
try:
|
try:
|
||||||
idx = myentry.index('r')
|
idx = myentry.index(rep_sep)
|
||||||
except Exception:
|
except Exception:
|
||||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||||
|
|
||||||
if ens_name:
|
if ens_name:
|
||||||
new_names.append('ens_name' + '|' + entry[idx:])
|
new_names.append(ens_name + '|' + entry[idx:])
|
||||||
else:
|
else:
|
||||||
new_names.append(myentry[:idx] + '|' + myentry[idx:])
|
new_names.append(myentry[:idx] + '|' + myentry[idx:])
|
||||||
return new_names
|
return new_names
|
||||||
|
|
|
||||||
154
pyerrors/obs.py
154
pyerrors/obs.py
|
|
@ -82,6 +82,8 @@ class Obs:
|
||||||
raise ValueError('Names are not unique.')
|
raise ValueError('Names are not unique.')
|
||||||
if not all(isinstance(x, str) for x in names):
|
if not all(isinstance(x, str) for x in names):
|
||||||
raise TypeError('All names have to be strings.')
|
raise TypeError('All names have to be strings.')
|
||||||
|
if len(set([o.split('|')[0] for o in names])) > 1:
|
||||||
|
raise ValueError('Cannot initialize Obs based on multiple ensembles. Please average separate Obs from each ensemble.')
|
||||||
else:
|
else:
|
||||||
if not isinstance(names[0], str):
|
if not isinstance(names[0], str):
|
||||||
raise TypeError('All names have to be strings.')
|
raise TypeError('All names have to be strings.')
|
||||||
|
|
@ -222,7 +224,7 @@ class Obs:
|
||||||
tmp = kwargs.get(kwarg_name)
|
tmp = kwargs.get(kwarg_name)
|
||||||
if isinstance(tmp, (int, float)):
|
if isinstance(tmp, (int, float)):
|
||||||
if tmp < 0:
|
if tmp < 0:
|
||||||
raise Exception(kwarg_name + ' has to be larger or equal to 0.')
|
raise ValueError(kwarg_name + ' has to be larger or equal to 0.')
|
||||||
for e, e_name in enumerate(self.e_names):
|
for e, e_name in enumerate(self.e_names):
|
||||||
getattr(self, kwarg_name)[e_name] = tmp
|
getattr(self, kwarg_name)[e_name] = tmp
|
||||||
else:
|
else:
|
||||||
|
|
@ -291,7 +293,7 @@ class Obs:
|
||||||
texp = self.tau_exp[e_name]
|
texp = self.tau_exp[e_name]
|
||||||
# Critical slowing down analysis
|
# Critical slowing down analysis
|
||||||
if w_max // 2 <= 1:
|
if w_max // 2 <= 1:
|
||||||
raise Exception("Need at least 8 samples for tau_exp error analysis")
|
raise ValueError("Need at least 8 samples for tau_exp error analysis")
|
||||||
for n in range(1, w_max // 2):
|
for n in range(1, w_max // 2):
|
||||||
_compute_drho(n + 1)
|
_compute_drho(n + 1)
|
||||||
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||||
|
|
@ -620,7 +622,7 @@ class Obs:
|
||||||
if not hasattr(self, 'e_dvalue'):
|
if not hasattr(self, 'e_dvalue'):
|
||||||
raise Exception('Run the gamma method first.')
|
raise Exception('Run the gamma method first.')
|
||||||
if np.isclose(0.0, self._dvalue, atol=1e-15):
|
if np.isclose(0.0, self._dvalue, atol=1e-15):
|
||||||
raise Exception('Error is 0.0')
|
raise ValueError('Error is 0.0')
|
||||||
labels = self.e_names
|
labels = self.e_names
|
||||||
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
|
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
|
||||||
fig1, ax1 = plt.subplots()
|
fig1, ax1 = plt.subplots()
|
||||||
|
|
@ -659,7 +661,7 @@ class Obs:
|
||||||
with open(file_name + '.p', 'wb') as fb:
|
with open(file_name + '.p', 'wb') as fb:
|
||||||
pickle.dump(self, fb)
|
pickle.dump(self, fb)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown datatype " + str(datatype))
|
raise TypeError("Unknown datatype " + str(datatype))
|
||||||
|
|
||||||
def export_jackknife(self):
|
def export_jackknife(self):
|
||||||
"""Export jackknife samples from the Obs
|
"""Export jackknife samples from the Obs
|
||||||
|
|
@ -676,7 +678,7 @@ class Obs:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if len(self.names) != 1:
|
if len(self.names) != 1:
|
||||||
raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
|
raise ValueError("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
|
||||||
|
|
||||||
name = self.names[0]
|
name = self.names[0]
|
||||||
full_data = self.deltas[name] + self.r_values[name]
|
full_data = self.deltas[name] + self.r_values[name]
|
||||||
|
|
@ -711,7 +713,7 @@ class Obs:
|
||||||
should agree with samples from a full bootstrap analysis up to O(1/N).
|
should agree with samples from a full bootstrap analysis up to O(1/N).
|
||||||
"""
|
"""
|
||||||
if len(self.names) != 1:
|
if len(self.names) != 1:
|
||||||
raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
|
raise ValueError("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
|
||||||
|
|
||||||
name = self.names[0]
|
name = self.names[0]
|
||||||
length = self.N
|
length = self.N
|
||||||
|
|
@ -856,15 +858,12 @@ class Obs:
|
||||||
|
|
||||||
def __pow__(self, y):
|
def __pow__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x: x[0] ** x[1], [self, y])
|
return derived_observable(lambda x, **kwargs: x[0] ** x[1], [self, y], man_grad=[y.value * self.value ** (y.value - 1), self.value ** y.value * np.log(self.value)])
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x: x[0] ** y, [self])
|
return derived_observable(lambda x, **kwargs: x[0] ** y, [self], man_grad=[y * self.value ** (y - 1)])
|
||||||
|
|
||||||
def __rpow__(self, y):
|
def __rpow__(self, y):
|
||||||
if isinstance(y, Obs):
|
return derived_observable(lambda x, **kwargs: y ** x[0], [self], man_grad=[y ** self.value * np.log(y)])
|
||||||
return derived_observable(lambda x: x[0] ** x[1], [y, self])
|
|
||||||
else:
|
|
||||||
return derived_observable(lambda x: y ** x[0], [self])
|
|
||||||
|
|
||||||
def __abs__(self):
|
def __abs__(self):
|
||||||
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
||||||
|
|
@ -1270,7 +1269,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
||||||
if 'man_grad' in kwargs:
|
if 'man_grad' in kwargs:
|
||||||
deriv = np.asarray(kwargs.get('man_grad'))
|
deriv = np.asarray(kwargs.get('man_grad'))
|
||||||
if new_values.shape + data.shape != deriv.shape:
|
if new_values.shape + data.shape != deriv.shape:
|
||||||
raise Exception('Manual derivative does not have correct shape.')
|
raise ValueError('Manual derivative does not have correct shape.')
|
||||||
elif kwargs.get('num_grad') is True:
|
elif kwargs.get('num_grad') is True:
|
||||||
if multi > 0:
|
if multi > 0:
|
||||||
raise Exception('Multi mode currently not supported for numerical derivative')
|
raise Exception('Multi mode currently not supported for numerical derivative')
|
||||||
|
|
@ -1336,7 +1335,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
||||||
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
|
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
|
||||||
|
|
||||||
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
|
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
|
||||||
raise Exception('The same name has been used for deltas and covobs!')
|
raise ValueError('The same name has been used for deltas and covobs!')
|
||||||
new_samples = []
|
new_samples = []
|
||||||
new_means = []
|
new_means = []
|
||||||
new_idl = []
|
new_idl = []
|
||||||
|
|
@ -1377,7 +1376,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
||||||
Has to be a subset of idx_old.
|
Has to be a subset of idx_old.
|
||||||
"""
|
"""
|
||||||
if not len(deltas) == len(idx_old):
|
if not len(deltas) == len(idx_old):
|
||||||
raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
||||||
if type(idx_old) is range and type(idx_new) is range:
|
if type(idx_old) is range and type(idx_new) is range:
|
||||||
if idx_old == idx_new:
|
if idx_old == idx_new:
|
||||||
return deltas
|
return deltas
|
||||||
|
|
@ -1385,7 +1384,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
||||||
return deltas
|
return deltas
|
||||||
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
|
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
|
||||||
if len(indices) < len(idx_new):
|
if len(indices) < len(idx_new):
|
||||||
raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old')
|
raise ValueError('Error in _reduce_deltas: Config of idx_new not in idx_old')
|
||||||
return np.array(deltas)[indices]
|
return np.array(deltas)[indices]
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1407,12 +1406,14 @@ def reweight(weight, obs, **kwargs):
|
||||||
result = []
|
result = []
|
||||||
for i in range(len(obs)):
|
for i in range(len(obs)):
|
||||||
if len(obs[i].cov_names):
|
if len(obs[i].cov_names):
|
||||||
raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
|
raise ValueError('Error: Not possible to reweight an Obs that contains covobs!')
|
||||||
if not set(obs[i].names).issubset(weight.names):
|
if not set(obs[i].names).issubset(weight.names):
|
||||||
raise Exception('Error: Ensembles do not fit')
|
raise ValueError('Error: Ensembles do not fit')
|
||||||
|
if len(obs[i].mc_names) > 1 or len(weight.mc_names) > 1:
|
||||||
|
raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.')
|
||||||
for name in obs[i].names:
|
for name in obs[i].names:
|
||||||
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
|
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
|
||||||
raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
||||||
new_samples = []
|
new_samples = []
|
||||||
w_deltas = {}
|
w_deltas = {}
|
||||||
for name in sorted(obs[i].names):
|
for name in sorted(obs[i].names):
|
||||||
|
|
@ -1445,18 +1446,21 @@ def correlate(obs_a, obs_b):
|
||||||
-----
|
-----
|
||||||
Keep in mind to only correlate primary observables which have not been reweighted
|
Keep in mind to only correlate primary observables which have not been reweighted
|
||||||
yet. The reweighting has to be applied after correlating the observables.
|
yet. The reweighting has to be applied after correlating the observables.
|
||||||
Currently only works if ensembles are identical (this is not strictly necessary).
|
Only works if a single ensemble is present in the Obs.
|
||||||
|
Currently only works if ensemble content is identical (this is not strictly necessary).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if len(obs_a.mc_names) > 1 or len(obs_b.mc_names) > 1:
|
||||||
|
raise ValueError('Error: Cannot correlate Obs that contain multiple ensembles.')
|
||||||
if sorted(obs_a.names) != sorted(obs_b.names):
|
if sorted(obs_a.names) != sorted(obs_b.names):
|
||||||
raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
|
raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
|
||||||
if len(obs_a.cov_names) or len(obs_b.cov_names):
|
if len(obs_a.cov_names) or len(obs_b.cov_names):
|
||||||
raise Exception('Error: Not possible to correlate Obs that contain covobs!')
|
raise ValueError('Error: Not possible to correlate Obs that contain covobs!')
|
||||||
for name in obs_a.names:
|
for name in obs_a.names:
|
||||||
if obs_a.shape[name] != obs_b.shape[name]:
|
if obs_a.shape[name] != obs_b.shape[name]:
|
||||||
raise Exception('Shapes of ensemble', name, 'do not fit')
|
raise ValueError('Shapes of ensemble', name, 'do not fit')
|
||||||
if obs_a.idl[name] != obs_b.idl[name]:
|
if obs_a.idl[name] != obs_b.idl[name]:
|
||||||
raise Exception('idl of ensemble', name, 'do not fit')
|
raise ValueError('idl of ensemble', name, 'do not fit')
|
||||||
|
|
||||||
if obs_a.reweighted is True:
|
if obs_a.reweighted is True:
|
||||||
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
|
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
|
||||||
|
|
@ -1544,6 +1548,92 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
|
||||||
return cov
|
return cov
|
||||||
|
|
||||||
|
|
||||||
|
def invert_corr_cov_cholesky(corr, inverrdiag):
|
||||||
|
"""Constructs a lower triangular matrix `chol` via the Cholesky decomposition of the correlation matrix `corr`
|
||||||
|
and then returns the inverse covariance matrix `chol_inv` as a lower triangular matrix by solving `chol * x = inverrdiag`.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
corr : np.ndarray
|
||||||
|
correlation matrix
|
||||||
|
inverrdiag : np.ndarray
|
||||||
|
diagonal matrix, the entries are the inverse errors of the data points considered
|
||||||
|
"""
|
||||||
|
|
||||||
|
condn = np.linalg.cond(corr)
|
||||||
|
if condn > 0.1 / np.finfo(float).eps:
|
||||||
|
raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
||||||
|
if condn > 1e13:
|
||||||
|
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||||
|
chol = np.linalg.cholesky(corr)
|
||||||
|
chol_inv = scipy.linalg.solve_triangular(chol, inverrdiag, lower=True)
|
||||||
|
|
||||||
|
return chol_inv
|
||||||
|
|
||||||
|
|
||||||
|
def sort_corr(corr, kl, yd):
|
||||||
|
""" Reorders a correlation matrix to match the alphabetical order of its underlying y data.
|
||||||
|
|
||||||
|
The ordering of the input correlation matrix `corr` is given by the list of keys `kl`.
|
||||||
|
The input dictionary `yd` (with the same keys `kl`) must contain the corresponding y data
|
||||||
|
that the correlation matrix is based on.
|
||||||
|
This function sorts the list of keys `kl` alphabetically and sorts the matrix `corr`
|
||||||
|
according to this alphabetical order such that the sorted matrix `corr_sorted` corresponds
|
||||||
|
to the y data `yd` when arranged in an alphabetical order by its keys.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
corr : np.ndarray
|
||||||
|
A square correlation matrix constructed using the order of the y data specified by `kl`.
|
||||||
|
The dimensions of `corr` should match the total number of y data points in `yd` combined.
|
||||||
|
kl : list of str
|
||||||
|
A list of keys that denotes the order in which the y data from `yd` was used to build the
|
||||||
|
input correlation matrix `corr`.
|
||||||
|
yd : dict of list
|
||||||
|
A dictionary where each key corresponds to a unique identifier, and its value is a list of
|
||||||
|
y data points. The total number of y data points across all keys must match the dimensions
|
||||||
|
of `corr`. The lists in the dictionary can be lists of Obs.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
np.ndarray
|
||||||
|
A new, sorted correlation matrix that corresponds to the y data from `yd` when arranged alphabetically by its keys.
|
||||||
|
|
||||||
|
Example
|
||||||
|
-------
|
||||||
|
>>> import numpy as np
|
||||||
|
>>> import pyerrors as pe
|
||||||
|
>>> corr = np.array([[1, 0.2, 0.3], [0.2, 1, 0.4], [0.3, 0.4, 1]])
|
||||||
|
>>> kl = ['b', 'a']
|
||||||
|
>>> yd = {'a': [1, 2], 'b': [3]}
|
||||||
|
>>> sorted_corr = pe.obs.sort_corr(corr, kl, yd)
|
||||||
|
>>> print(sorted_corr)
|
||||||
|
array([[1. , 0.3, 0.4],
|
||||||
|
[0.3, 1. , 0.2],
|
||||||
|
[0.4, 0.2, 1. ]])
|
||||||
|
|
||||||
|
"""
|
||||||
|
kl_sorted = sorted(kl)
|
||||||
|
|
||||||
|
posd = {}
|
||||||
|
ofs = 0
|
||||||
|
for ki, k in enumerate(kl):
|
||||||
|
posd[k] = [i + ofs for i in range(len(yd[k]))]
|
||||||
|
ofs += len(posd[k])
|
||||||
|
|
||||||
|
mapping = []
|
||||||
|
for k in kl_sorted:
|
||||||
|
for i in range(len(yd[k])):
|
||||||
|
mapping.append(posd[k][i])
|
||||||
|
|
||||||
|
corr_sorted = np.zeros_like(corr)
|
||||||
|
for i in range(corr.shape[0]):
|
||||||
|
for j in range(corr.shape[0]):
|
||||||
|
corr_sorted[i][j] = corr[mapping[i]][mapping[j]]
|
||||||
|
|
||||||
|
return corr_sorted
|
||||||
|
|
||||||
|
|
||||||
def _smooth_eigenvalues(corr, E):
|
def _smooth_eigenvalues(corr, E):
|
||||||
"""Eigenvalue smoothing as described in hep-lat/9412087
|
"""Eigenvalue smoothing as described in hep-lat/9412087
|
||||||
|
|
||||||
|
|
@ -1553,7 +1643,7 @@ def _smooth_eigenvalues(corr, E):
|
||||||
Number of eigenvalues to be left substantially unchanged
|
Number of eigenvalues to be left substantially unchanged
|
||||||
"""
|
"""
|
||||||
if not (2 < E < corr.shape[0] - 1):
|
if not (2 < E < corr.shape[0] - 1):
|
||||||
raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
|
raise ValueError(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
|
||||||
vals, vec = np.linalg.eigh(corr)
|
vals, vec = np.linalg.eigh(corr)
|
||||||
lambda_min = np.mean(vals[:-E])
|
lambda_min = np.mean(vals[:-E])
|
||||||
vals[vals < lambda_min] = lambda_min
|
vals[vals < lambda_min] = lambda_min
|
||||||
|
|
@ -1672,7 +1762,11 @@ def import_bootstrap(boots, name, random_numbers):
|
||||||
|
|
||||||
|
|
||||||
def merge_obs(list_of_obs):
|
def merge_obs(list_of_obs):
|
||||||
"""Combine all observables in list_of_obs into one new observable
|
"""Combine all observables in list_of_obs into one new observable.
|
||||||
|
This allows to merge Obs that have been computed on multiple replica
|
||||||
|
of the same ensemble.
|
||||||
|
If you like to merge Obs that are based on several ensembles, please
|
||||||
|
average them yourself.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
|
|
@ -1685,9 +1779,9 @@ def merge_obs(list_of_obs):
|
||||||
"""
|
"""
|
||||||
replist = [item for obs in list_of_obs for item in obs.names]
|
replist = [item for obs in list_of_obs for item in obs.names]
|
||||||
if (len(replist) == len(set(replist))) is False:
|
if (len(replist) == len(set(replist))) is False:
|
||||||
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||||
if any([len(o.cov_names) for o in list_of_obs]):
|
if any([len(o.cov_names) for o in list_of_obs]):
|
||||||
raise Exception('Not possible to merge data that contains covobs!')
|
raise ValueError('Not possible to merge data that contains covobs!')
|
||||||
new_dict = {}
|
new_dict = {}
|
||||||
idl_dict = {}
|
idl_dict = {}
|
||||||
for o in list_of_obs:
|
for o in list_of_obs:
|
||||||
|
|
@ -1738,7 +1832,7 @@ def cov_Obs(means, cov, name, grad=None):
|
||||||
for i in range(len(means)):
|
for i in range(len(means)):
|
||||||
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
||||||
if ol[0].covobs[name].N != len(means):
|
if ol[0].covobs[name].N != len(means):
|
||||||
raise Exception('You have to provide %d mean values!' % (ol[0].N))
|
raise ValueError('You have to provide %d mean values!' % (ol[0].N))
|
||||||
if len(ol) == 1:
|
if len(ol) == 1:
|
||||||
return ol[0]
|
return ol[0]
|
||||||
return ol
|
return ol
|
||||||
|
|
@ -1754,7 +1848,7 @@ def _determine_gap(o, e_content, e_name):
|
||||||
|
|
||||||
gap = min(gaps)
|
gap = min(gaps)
|
||||||
if not np.all([gi % gap == 0 for gi in gaps]):
|
if not np.all([gi % gap == 0 for gi in gaps]):
|
||||||
raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
|
raise ValueError(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
|
||||||
|
|
||||||
return gap
|
return gap
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
__version__ = "2.12.0"
|
__version__ = "2.17.0-dev"
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools >= 63.0.0", "wheel"]
|
requires = ["setuptools >= 63.0.0", "wheel"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[tool.ruff.lint]
|
||||||
|
ignore = ["F403"]
|
||||||
|
|
|
||||||
6
setup.py
6
setup.py
|
|
@ -24,18 +24,18 @@ setup(name='pyerrors',
|
||||||
author_email='fabian.joswig@ed.ac.uk',
|
author_email='fabian.joswig@ed.ac.uk',
|
||||||
license="MIT",
|
license="MIT",
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
python_requires='>=3.9.0',
|
python_requires='>=3.10.0',
|
||||||
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2'],
|
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2'],
|
||||||
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
|
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 5 - Production/Stable',
|
'Development Status :: 5 - Production/Stable',
|
||||||
'Intended Audience :: Science/Research',
|
'Intended Audience :: Science/Research',
|
||||||
'License :: OSI Approved :: MIT License',
|
|
||||||
'Programming Language :: Python :: 3',
|
'Programming Language :: Python :: 3',
|
||||||
'Programming Language :: Python :: 3.9',
|
|
||||||
'Programming Language :: Python :: 3.10',
|
'Programming Language :: Python :: 3.10',
|
||||||
'Programming Language :: Python :: 3.11',
|
'Programming Language :: Python :: 3.11',
|
||||||
'Programming Language :: Python :: 3.12',
|
'Programming Language :: Python :: 3.12',
|
||||||
|
'Programming Language :: Python :: 3.13',
|
||||||
|
'Programming Language :: Python :: 3.14',
|
||||||
'Topic :: Scientific/Engineering :: Physics'
|
'Topic :: Scientific/Engineering :: Physics'
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -129,7 +129,7 @@ def test_m_eff():
|
||||||
with pytest.warns(RuntimeWarning):
|
with pytest.warns(RuntimeWarning):
|
||||||
my_corr.m_eff('sinh')
|
my_corr.m_eff('sinh')
|
||||||
|
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_corr.m_eff('unkown_variant')
|
my_corr.m_eff('unkown_variant')
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -140,7 +140,7 @@ def test_m_eff_negative_values():
|
||||||
assert m_eff_log[padding + 1] is None
|
assert m_eff_log[padding + 1] is None
|
||||||
m_eff_cosh = my_corr.m_eff('cosh')
|
m_eff_cosh = my_corr.m_eff('cosh')
|
||||||
assert m_eff_cosh[padding + 1] is None
|
assert m_eff_cosh[padding + 1] is None
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_corr.m_eff('logsym')
|
my_corr.m_eff('logsym')
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -155,7 +155,7 @@ def test_correlate():
|
||||||
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
|
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
|
||||||
corr1 = my_corr.correlate(my_corr)
|
corr1 = my_corr.correlate(my_corr)
|
||||||
corr2 = my_corr.correlate(my_corr[0])
|
corr2 = my_corr.correlate(my_corr[0])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
corr3 = my_corr.correlate(7.3)
|
corr3 = my_corr.correlate(7.3)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -176,9 +176,9 @@ def test_fit_correlator():
|
||||||
assert fit_res[0] == my_corr[0]
|
assert fit_res[0] == my_corr[0]
|
||||||
assert fit_res[1] == my_corr[1] - my_corr[0]
|
assert fit_res[1] == my_corr[1] - my_corr[0]
|
||||||
|
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
my_corr.fit(f, "from 0 to 3")
|
my_corr.fit(f, "from 0 to 3")
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_corr.fit(f, [0, 2, 3])
|
my_corr.fit(f, [0, 2, 3])
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -256,11 +256,11 @@ def test_prange():
|
||||||
corr = pe.correlators.Corr(corr_content)
|
corr = pe.correlators.Corr(corr_content)
|
||||||
|
|
||||||
corr.set_prange([2, 4])
|
corr.set_prange([2, 4])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
corr.set_prange([2])
|
corr.set_prange([2])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
corr.set_prange([2, 2.3])
|
corr.set_prange([2, 2.3])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
corr.set_prange([4, 1])
|
corr.set_prange([4, 1])
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -781,3 +781,26 @@ def test_complex_add_and_mul():
|
||||||
cc += 2j
|
cc += 2j
|
||||||
cc = cc * 4j
|
cc = cc * 4j
|
||||||
cc.real + cc.imag
|
cc.real + cc.imag
|
||||||
|
|
||||||
|
|
||||||
|
def test_prune_with_Nones():
|
||||||
|
N = 3
|
||||||
|
T = 10
|
||||||
|
|
||||||
|
front_padding = 1
|
||||||
|
back_padding = T // 2
|
||||||
|
|
||||||
|
Ntrunc = N - 1
|
||||||
|
t0proj = 2
|
||||||
|
tproj = 3
|
||||||
|
|
||||||
|
corr_content = np.array([[[pe.pseudo_Obs((i+j+1)**(-t), .01, "None_prune_test") for i in range(N)] for j in range(N)] for t in range(T // 2 - front_padding)])
|
||||||
|
unpadded_corr = pe.Corr(corr_content)
|
||||||
|
padded_corr = pe.Corr(corr_content, padding=[front_padding, back_padding])
|
||||||
|
|
||||||
|
tmp_corr = unpadded_corr.prune(Ntrunc, t0proj=t0proj-front_padding, tproj=tproj-front_padding)
|
||||||
|
pruned_then_padded = pe.Corr(tmp_corr.content, padding=[front_padding, back_padding])
|
||||||
|
padded_then_pruned = padded_corr.prune(Ntrunc, t0proj=t0proj, tproj=tproj)
|
||||||
|
|
||||||
|
for t in range(T):
|
||||||
|
assert np.all(pruned_then_padded.content[t] == padded_then_pruned.content[t])
|
||||||
|
|
|
||||||
1150
tests/data/sfcf_test/data_apf/data_apf_r0.F_V0
Normal file
1150
tests/data/sfcf_test/data_apf/data_apf_r0.F_V0
Normal file
File diff suppressed because it is too large
Load diff
970
tests/data/sfcf_test/data_apf/data_apf_r0.f_1
Normal file
970
tests/data/sfcf_test/data_apf/data_apf_r0.f_1
Normal file
|
|
@ -0,0 +1,970 @@
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:03 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n1.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_1
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545021e+02 +6.7620978057264750e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855339e+02 +6.5026340956203663e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177868e+02 +6.5443496235264788e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855515e+02 +6.9706500417651470e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609065e+02 +6.9516150897757419e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046199e+02 +6.9232860455434941e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177447e+02 +1.0849949614595719e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046182e+02 +1.0866063643253473e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098047e+02 +1.0827277318679030e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545038e+02 +3.0143306723935508e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855367e+02 +4.3340379505972648e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177902e+02 +3.9652247575094006e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855526e+02 -8.2540994138261318e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609082e+02 -9.7121215247039609e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046227e+02 -9.0872484903683497e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177453e+02 +5.1331372776616026e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046193e+02 +5.0816653044831932e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098064e+02 +5.1165649253001659e-15
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:05 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n2.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_1
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545021e+02 +6.7620978057264750e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855339e+02 +6.5026340956203663e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177868e+02 +6.5443496235264788e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855515e+02 +6.9706500417651470e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609065e+02 +6.9516150897757419e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046199e+02 +6.9232860455434941e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177447e+02 +1.0849949614595719e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046182e+02 +1.0866063643253473e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098047e+02 +1.0827277318679030e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545038e+02 +3.0143306723935508e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855367e+02 +4.3340379505972648e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177902e+02 +3.9652247575094006e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855526e+02 -8.2540994138261318e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609082e+02 -9.7121215247039609e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046227e+02 -9.0872484903683497e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177453e+02 +5.1331372776616026e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046193e+02 +5.0816653044831932e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098064e+02 +5.1165649253001659e-15
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:07 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n3.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_1
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545021e+02 +6.7620978057264750e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855339e+02 +6.5026340956203663e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177868e+02 +6.5443496235264788e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855515e+02 +6.9706500417651470e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609065e+02 +6.9516150897757419e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046199e+02 +6.9232860455434941e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177447e+02 +1.0849949614595719e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046182e+02 +1.0866063643253473e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098047e+02 +1.0827277318679030e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545038e+02 +3.0143306723935508e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855367e+02 +4.3340379505972648e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177902e+02 +3.9652247575094006e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855526e+02 -8.2540994138261318e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609082e+02 -9.7121215247039609e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046227e+02 -9.0872484903683497e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177453e+02 +5.1331372776616026e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046193e+02 +5.0816653044831932e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098064e+02 +5.1165649253001659e-15
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:09 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n4.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_1
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545021e+02 +6.7620978057264750e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855339e+02 +6.5026340956203663e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177868e+02 +6.5443496235264788e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855515e+02 +6.9706500417651470e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609065e+02 +6.9516150897757419e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046199e+02 +6.9232860455434941e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177447e+02 +1.0849949614595719e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046182e+02 +1.0866063643253473e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098047e+02 +1.0827277318679030e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545038e+02 +3.0143306723935508e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855367e+02 +4.3340379505972648e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177902e+02 +3.9652247575094006e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855526e+02 -8.2540994138261318e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609082e+02 -9.7121215247039609e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046227e+02 -9.0872484903683497e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177453e+02 +5.1331372776616026e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046193e+02 +5.0816653044831932e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098064e+02 +5.1165649253001659e-15
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:11 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n5.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_1
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545021e+02 +6.7620978057264750e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855339e+02 +6.5026340956203663e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177868e+02 +6.5443496235264788e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855515e+02 +6.9706500417651470e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609065e+02 +6.9516150897757419e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046199e+02 +6.9232860455434941e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177447e+02 +1.0849949614595719e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046182e+02 +1.0866063643253473e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098047e+02 +1.0827277318679030e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5119415254545038e+02 +3.0143306723935508e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5120703575855367e+02 +4.3340379505972648e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5120808902177902e+02 +3.9652247575094006e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120703575855526e+02 -8.2540994138261318e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122001235609082e+02 -9.7121215247039609e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122104108046227e+02 -9.0872484903683497e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 0
|
||||||
|
corr
|
||||||
|
+3.5120808902177453e+02 +5.1331372776616026e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 1
|
||||||
|
corr
|
||||||
|
+3.5122104108046193e+02 +5.0816653044831932e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_1
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
wf_2 2
|
||||||
|
corr
|
||||||
|
+3.5122207631098064e+02 +5.1165649253001659e-15
|
||||||
|
|
||||||
400
tests/data/sfcf_test/data_apf/data_apf_r0.f_A
Normal file
400
tests/data/sfcf_test/data_apf/data_apf_r0.f_A
Normal file
|
|
@ -0,0 +1,400 @@
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:03 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n1.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_A
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727972304e+01 -6.1214214711790100e-12
|
||||||
|
2 +1.0447210336915187e+00 +8.9219487930753188e-13
|
||||||
|
3 -4.1025094911185178e+01 -4.8315634170546161e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722862705e+01 +2.0963356863957609e-13
|
||||||
|
2 +1.0542820240851569e+00 +2.3989756974599379e-15
|
||||||
|
3 -4.1024441815729936e+01 -5.7107484666182308e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269442847e+01 -6.6512260271334321e-14
|
||||||
|
2 +1.0516822345055969e+00 -2.2935262162529075e-15
|
||||||
|
3 -4.1025142768037746e+01 +3.7566377680004518e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727965909e+01 -1.6112786177915427e-11
|
||||||
|
2 +1.0447210337411881e+00 -7.0387528705692678e-13
|
||||||
|
3 -4.1025094911167137e+01 +4.6509152745618223e-13
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722842213e+01 -8.1976426690345305e-13
|
||||||
|
2 +1.0542820240843382e+00 +2.1626370477046812e-13
|
||||||
|
3 -4.1024441815730086e+01 -2.4147931196409923e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269443117e+01 +7.9192560386479701e-14
|
||||||
|
2 +1.0516822345055870e+00 -1.2443038782429568e-14
|
||||||
|
3 -4.1025142768037739e+01 +5.9315333178954509e-17
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:05 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n2.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_A
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727972304e+01 -6.1214214711790100e-12
|
||||||
|
2 +1.0447210336915187e+00 +8.9219487930753188e-13
|
||||||
|
3 -4.1025094911185178e+01 -4.8315634170546161e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722862705e+01 +2.0963356863957609e-13
|
||||||
|
2 +1.0542820240851569e+00 +2.3989756974599379e-15
|
||||||
|
3 -4.1024441815729936e+01 -5.7107484666182308e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269442847e+01 -6.6512260271334321e-14
|
||||||
|
2 +1.0516822345055969e+00 -2.2935262162529075e-15
|
||||||
|
3 -4.1025142768037746e+01 +3.7566377680004518e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727965909e+01 -1.6112786177915427e-11
|
||||||
|
2 +1.0447210337411881e+00 -7.0387528705692678e-13
|
||||||
|
3 -4.1025094911167137e+01 +4.6509152745618223e-13
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722842213e+01 -8.1976426690345305e-13
|
||||||
|
2 +1.0542820240843382e+00 +2.1626370477046812e-13
|
||||||
|
3 -4.1024441815730086e+01 -2.4147931196409923e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269443117e+01 +7.9192560386479701e-14
|
||||||
|
2 +1.0516822345055870e+00 -1.2443038782429568e-14
|
||||||
|
3 -4.1025142768037739e+01 +5.9315333178954509e-17
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:07 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n3.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_A
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727972304e+01 -6.1214214711790100e-12
|
||||||
|
2 +1.0447210336915187e+00 +8.9219487930753188e-13
|
||||||
|
3 -4.1025094911185178e+01 -4.8315634170546161e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722862705e+01 +2.0963356863957609e-13
|
||||||
|
2 +1.0542820240851569e+00 +2.3989756974599379e-15
|
||||||
|
3 -4.1024441815729936e+01 -5.7107484666182308e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269442847e+01 -6.6512260271334321e-14
|
||||||
|
2 +1.0516822345055969e+00 -2.2935262162529075e-15
|
||||||
|
3 -4.1025142768037746e+01 +3.7566377680004518e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727965909e+01 -1.6112786177915427e-11
|
||||||
|
2 +1.0447210337411881e+00 -7.0387528705692678e-13
|
||||||
|
3 -4.1025094911167137e+01 +4.6509152745618223e-13
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722842213e+01 -8.1976426690345305e-13
|
||||||
|
2 +1.0542820240843382e+00 +2.1626370477046812e-13
|
||||||
|
3 -4.1024441815730086e+01 -2.4147931196409923e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269443117e+01 +7.9192560386479701e-14
|
||||||
|
2 +1.0516822345055870e+00 -1.2443038782429568e-14
|
||||||
|
3 -4.1025142768037739e+01 +5.9315333178954509e-17
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:09 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n4.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_A
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727972304e+01 -6.1214214711790100e-12
|
||||||
|
2 +1.0447210336915187e+00 +8.9219487930753188e-13
|
||||||
|
3 -4.1025094911185178e+01 -4.8315634170546161e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722862705e+01 +2.0963356863957609e-13
|
||||||
|
2 +1.0542820240851569e+00 +2.3989756974599379e-15
|
||||||
|
3 -4.1024441815729936e+01 -5.7107484666182308e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269442847e+01 -6.6512260271334321e-14
|
||||||
|
2 +1.0516822345055969e+00 -2.2935262162529075e-15
|
||||||
|
3 -4.1025142768037746e+01 +3.7566377680004518e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727965909e+01 -1.6112786177915427e-11
|
||||||
|
2 +1.0447210337411881e+00 -7.0387528705692678e-13
|
||||||
|
3 -4.1025094911167137e+01 +4.6509152745618223e-13
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722842213e+01 -8.1976426690345305e-13
|
||||||
|
2 +1.0542820240843382e+00 +2.1626370477046812e-13
|
||||||
|
3 -4.1024441815730086e+01 -2.4147931196409923e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269443117e+01 +7.9192560386479701e-14
|
||||||
|
2 +1.0516822345055870e+00 -1.2443038782429568e-14
|
||||||
|
3 -4.1025142768037739e+01 +5.9315333178954509e-17
|
||||||
|
|
||||||
|
[run]
|
||||||
|
|
||||||
|
version 2.1
|
||||||
|
date 2022-01-19 11:04:11 +0100
|
||||||
|
host r04n07.palma.wwu
|
||||||
|
dir /scratch/tmp/j_kuhl19
|
||||||
|
user j_kuhl19
|
||||||
|
gauge_name /data_a_r0_n5.lex
|
||||||
|
gauge_md5 1ea28326e4090996111a320b8372811d
|
||||||
|
param_name sfcf_unity_test.in
|
||||||
|
param_md5 d881e90d41188a33b8b0f1bd0bc53ea5
|
||||||
|
param_hash 686af5e712ee2902180f5428af94c6e7
|
||||||
|
data_name ./output_10519905/data_af_A
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727972304e+01 -6.1214214711790100e-12
|
||||||
|
2 +1.0447210336915187e+00 +8.9219487930753188e-13
|
||||||
|
3 -4.1025094911185178e+01 -4.8315634170546161e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722862705e+01 +2.0963356863957609e-13
|
||||||
|
2 +1.0542820240851569e+00 +2.3989756974599379e-15
|
||||||
|
3 -4.1024441815729936e+01 -5.7107484666182308e-15
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 0
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269442847e+01 -6.6512260271334321e-14
|
||||||
|
2 +1.0516822345055969e+00 -2.2935262162529075e-15
|
||||||
|
3 -4.1025142768037746e+01 +3.7566377680004518e-16
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 0
|
||||||
|
corr_t
|
||||||
|
1 +6.5471188727965909e+01 -1.6112786177915427e-11
|
||||||
|
2 +1.0447210337411881e+00 -7.0387528705692678e-13
|
||||||
|
3 -4.1025094911167137e+01 +4.6509152745618223e-13
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 1
|
||||||
|
corr_t
|
||||||
|
1 +6.5551520722842213e+01 -8.1976426690345305e-13
|
||||||
|
2 +1.0542820240843382e+00 +2.1626370477046812e-13
|
||||||
|
3 -4.1024441815730086e+01 -2.4147931196409923e-14
|
||||||
|
|
||||||
|
[correlator]
|
||||||
|
|
||||||
|
name f_A
|
||||||
|
quarks lquark lquark
|
||||||
|
offset 1
|
||||||
|
wf 2
|
||||||
|
corr_t
|
||||||
|
1 +6.5529951269443117e+01 +7.9192560386479701e-14
|
||||||
|
2 +1.0516822345055870e+00 -1.2443038782429568e-14
|
||||||
|
3 -4.1025142768037739e+01 +5.9315333178954509e-17
|
||||||
|
|
||||||
|
|
@ -30,7 +30,7 @@ def test_grid_dirac():
|
||||||
'SigmaYZ',
|
'SigmaYZ',
|
||||||
'SigmaZT']:
|
'SigmaZT']:
|
||||||
pe.dirac.Grid_gamma(gamma)
|
pe.dirac.Grid_gamma(gamma)
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.dirac.Grid_gamma('Not a gamma matrix')
|
pe.dirac.Grid_gamma('Not a gamma matrix')
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -44,7 +44,7 @@ def test_epsilon_tensor():
|
||||||
(1, 1, 3) : 0.0}
|
(1, 1, 3) : 0.0}
|
||||||
for key, value in check.items():
|
for key, value in check.items():
|
||||||
assert pe.dirac.epsilon_tensor(*key) == value
|
assert pe.dirac.epsilon_tensor(*key) == value
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.dirac.epsilon_tensor(0, 1, 3)
|
pe.dirac.epsilon_tensor(0, 1, 3)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -59,5 +59,5 @@ def test_epsilon_tensor_rank4():
|
||||||
(1, 2, 3, 1) : 0.0}
|
(1, 2, 3, 1) : 0.0}
|
||||||
for key, value in check.items():
|
for key, value in check.items():
|
||||||
assert pe.dirac.epsilon_tensor_rank4(*key) == value
|
assert pe.dirac.epsilon_tensor_rank4(*key) == value
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)
|
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)
|
||||||
|
|
|
||||||
|
|
@ -152,6 +152,127 @@ def test_alternative_solvers():
|
||||||
chisquare_values = np.array(chisquare_values)
|
chisquare_values = np.array(chisquare_values)
|
||||||
assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
|
assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
|
||||||
|
|
||||||
|
def test_inv_cov_matrix_input_least_squares():
|
||||||
|
|
||||||
|
|
||||||
|
num_samples = 400
|
||||||
|
N = 10
|
||||||
|
|
||||||
|
x = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||||
|
|
||||||
|
r = np.zeros((N, N))
|
||||||
|
for i in range(N):
|
||||||
|
for j in range(N):
|
||||||
|
r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
|
||||||
|
|
||||||
|
errl = np.sqrt([3.4, 2.5, 3.6, 2.8, 4.2, 4.7, 4.9, 5.1, 3.2, 4.2]) # set y errors
|
||||||
|
for i in range(N):
|
||||||
|
for j in range(N):
|
||||||
|
r[i, j] *= errl[i] * errl[j] # element in covariance matrix
|
||||||
|
|
||||||
|
c = cholesky(r, lower=True)
|
||||||
|
y = np.dot(c, x)
|
||||||
|
x = np.arange(N)
|
||||||
|
x_dict = {}
|
||||||
|
y_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
x_dict[str(item)] = [x[i]]
|
||||||
|
|
||||||
|
for linear in [True, False]:
|
||||||
|
data = []
|
||||||
|
for i in range(N):
|
||||||
|
if linear:
|
||||||
|
data.append(pe.Obs([[i + 1 + o for o in y[i]]], ['ens']))
|
||||||
|
else:
|
||||||
|
data.append(pe.Obs([[np.exp(-(i + 1)) + np.exp(-(i + 1)) * o for o in y[i]]], ['ens']))
|
||||||
|
|
||||||
|
[o.gamma_method() for o in data]
|
||||||
|
|
||||||
|
data_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
data_dict[str(item)] = [data[i]]
|
||||||
|
|
||||||
|
corr = pe.covariance(data, correlation=True)
|
||||||
|
chol = np.linalg.cholesky(corr)
|
||||||
|
covdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
|
||||||
|
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||||
|
chol_inv_keys = [""]
|
||||||
|
chol_inv_keys_combined_fit = [str(item) for i,item in enumerate(x)]
|
||||||
|
|
||||||
|
if linear:
|
||||||
|
def fitf(p, x):
|
||||||
|
return p[1] + p[0] * x
|
||||||
|
fitf_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
fitf_dict[str(item)] = fitf
|
||||||
|
else:
|
||||||
|
def fitf(p, x):
|
||||||
|
return p[1] * anp.exp(-p[0] * x)
|
||||||
|
fitf_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
fitf_dict[str(item)] = fitf
|
||||||
|
|
||||||
|
fitpc = pe.least_squares(x, data, fitf, correlated_fit=True)
|
||||||
|
fitp_inv_cov = pe.least_squares(x, data, fitf, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys])
|
||||||
|
fitp_inv_cov_combined_fit = pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys_combined_fit])
|
||||||
|
for i in range(2):
|
||||||
|
diff_inv_cov = fitp_inv_cov[i] - fitpc[i]
|
||||||
|
diff_inv_cov.gamma_method()
|
||||||
|
assert(diff_inv_cov.is_zero(atol=0.0))
|
||||||
|
diff_inv_cov_combined_fit = fitp_inv_cov_combined_fit[i] - fitpc[i]
|
||||||
|
diff_inv_cov_combined_fit.gamma_method()
|
||||||
|
assert(diff_inv_cov_combined_fit.is_zero(atol=1e-12))
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [corr,chol_inv_keys_combined_fit])
|
||||||
|
|
||||||
|
def test_least_squares_invalid_inv_cov_matrix_input():
|
||||||
|
xvals = []
|
||||||
|
yvals = []
|
||||||
|
err = 0.1
|
||||||
|
def func_valid(a,x):
|
||||||
|
return a[0] + a[1] * x
|
||||||
|
for x in range(1, 8, 2):
|
||||||
|
xvals.append(x)
|
||||||
|
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
|
||||||
|
|
||||||
|
[o.gamma_method() for o in yvals]
|
||||||
|
|
||||||
|
#dictionaries for a combined fit
|
||||||
|
xvals_dict = { }
|
||||||
|
yvals_dict = { }
|
||||||
|
for i,item in enumerate(np.arange(1, 8, 2)):
|
||||||
|
xvals_dict[str(item)] = [xvals[i]]
|
||||||
|
yvals_dict[str(item)] = [yvals[i]]
|
||||||
|
chol_inv_keys_combined_fit = ['1', '3', '5', '7']
|
||||||
|
chol_inv_keys_combined_fit_invalid = ['2', '7', '100', '8']
|
||||||
|
func_dict_valid = {"1": func_valid,"3": func_valid,"5": func_valid,"7": func_valid}
|
||||||
|
|
||||||
|
corr_valid = pe.covariance(yvals, correlation = True)
|
||||||
|
chol = np.linalg.cholesky(corr_valid)
|
||||||
|
covdiag = np.diag(1 / np.asarray([o.dvalue for o in yvals]))
|
||||||
|
chol_inv_valid = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||||
|
chol_inv_keys = [""]
|
||||||
|
pe.least_squares(xvals, yvals,func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys])
|
||||||
|
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit])
|
||||||
|
chol_inv_invalid_shape1 = np.zeros((len(yvals),len(yvals)-1))
|
||||||
|
chol_inv_invalid_shape2 = np.zeros((len(yvals)+2,len(yvals)))
|
||||||
|
|
||||||
|
# for an uncombined fit
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
pe.least_squares(xvals, yvals, func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys])
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys])
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
|
||||||
|
|
||||||
|
#repeat for a combined fit
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys_combined_fit])
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys_combined_fit])
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
|
||||||
|
|
||||||
def test_correlated_fit():
|
def test_correlated_fit():
|
||||||
num_samples = 400
|
num_samples = 400
|
||||||
|
|
@ -964,6 +1085,21 @@ def test_combined_resplot_qqplot():
|
||||||
fr = pe.least_squares(xd, yd, fd, resplot=True, qqplot=True)
|
fr = pe.least_squares(xd, yd, fd, resplot=True, qqplot=True)
|
||||||
plt.close('all')
|
plt.close('all')
|
||||||
|
|
||||||
|
def test_combined_fit_xerr():
|
||||||
|
fitd = {
|
||||||
|
'a' : lambda p, x: p[0] * x[0] + p[1] * x[1],
|
||||||
|
'b' : lambda p, x: p[0] * x[0] + p[2] * x[1],
|
||||||
|
'c' : lambda p, x: p[0] * x[0] + p[3] * x[1],
|
||||||
|
}
|
||||||
|
yd = {
|
||||||
|
'a': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'a' + str(i)) for i in range(5)],
|
||||||
|
'b': [pe.cov_Obs(1 + .1 * np.random.uniform(), .1**2, 'b' + str(i)) for i in range(6)],
|
||||||
|
'c': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'c' + str(i)) for i in range(3)],
|
||||||
|
}
|
||||||
|
xd = {k: np.transpose([[1 + .01 * np.random.uniform(), 2] for i in range(len(yd[k]))]) for k in fitd}
|
||||||
|
pe.fits.least_squares(xd, yd, fitd)
|
||||||
|
pe.fits.least_squares(xd, yd, fitd, n_parms=4)
|
||||||
|
|
||||||
|
|
||||||
def test_x_multidim_fit():
|
def test_x_multidim_fit():
|
||||||
x1 = np.arange(1, 10)
|
x1 = np.arange(1, 10)
|
||||||
|
|
@ -1205,6 +1341,54 @@ def test_combined_fit_constant_shape():
|
||||||
funcs = {"a": lambda a, x: a[0] + a[1] * x,
|
funcs = {"a": lambda a, x: a[0] + a[1] * x,
|
||||||
"": lambda a, x: a[1] + x * 0}
|
"": lambda a, x: a[1] + x * 0}
|
||||||
pe.fits.least_squares(x, y, funcs, method='migrad')
|
pe.fits.least_squares(x, y, funcs, method='migrad')
|
||||||
|
pe.fits.least_squares(x, y, funcs, method='migrad', n_parms=2)
|
||||||
|
|
||||||
|
def test_fit_n_parms():
|
||||||
|
# Function that fails if the number of parameters is not specified:
|
||||||
|
def fcn(p, x):
|
||||||
|
# Assumes first half of terms are A second half are E
|
||||||
|
NTerms = int(len(p)/2)
|
||||||
|
A = anp.array(p[0:NTerms])[:, np.newaxis] # shape (n, 1)
|
||||||
|
E_P = anp.array(p[NTerms:])[:, np.newaxis] # shape (n, 1)
|
||||||
|
# This if statement handles the case where x is a single value rather than an array
|
||||||
|
if isinstance(x, anp.float64) or isinstance(x, anp.int64) or isinstance(x, float) or isinstance(x, int):
|
||||||
|
x = anp.array([x])[np.newaxis, :] # shape (1, m)
|
||||||
|
else:
|
||||||
|
x = anp.array(x)[np.newaxis, :] # shape (1, m)
|
||||||
|
exp_term = anp.exp(-E_P * x)
|
||||||
|
weighted_sum = A * exp_term # shape (n, m)
|
||||||
|
return anp.mean(weighted_sum, axis=0) # shape(m)
|
||||||
|
|
||||||
|
c = pe.Corr([pe.pseudo_Obs(2. * np.exp(-.2 * t) + .4 * np.exp(+.4 * t) + .4 * np.exp(-.6 * t), .1, 'corr') for t in range(12)])
|
||||||
|
|
||||||
|
c.fit(fcn, n_parms=2)
|
||||||
|
c.fit(fcn, n_parms=4)
|
||||||
|
|
||||||
|
xf = [pe.pseudo_Obs(t, .05, 'corr') for t in range(c.T)]
|
||||||
|
yf = [c[t] for t in range(c.T)]
|
||||||
|
pe.fits.total_least_squares(xf, yf, fcn, n_parms=2)
|
||||||
|
pe.fits.total_least_squares(xf, yf, fcn, n_parms=4)
|
||||||
|
|
||||||
|
# Is expected to fail, this is what is fixed with n_parms
|
||||||
|
with pytest.raises(RuntimeError):
|
||||||
|
c.fit(fcn, )
|
||||||
|
with pytest.raises(RuntimeError):
|
||||||
|
pe.fits.total_least_squares(xf, yf, fcn, )
|
||||||
|
# Test for positivity
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
c.fit(fcn, n_parms=-2)
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.fits.total_least_squares(xf, yf, fcn, n_parms=-4)
|
||||||
|
# Have to pass an interger
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
c.fit(fcn, n_parms=2.)
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
pe.fits.total_least_squares(xf, yf, fcn, n_parms=1.2343)
|
||||||
|
# Improper number of parameters (function should fail)
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
c.fit(fcn, n_parms=7)
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.fits.total_least_squares(xf, yf, fcn, n_parms=5)
|
||||||
|
|
||||||
|
|
||||||
def fit_general(x, y, func, silent=False, **kwargs):
|
def fit_general(x, y, func, silent=False, **kwargs):
|
||||||
|
|
@ -1427,3 +1611,81 @@ def old_prior_fit(x, y, func, priors, silent=False, **kwargs):
|
||||||
qqplot(x, y, func, result)
|
qqplot(x, y, func, result)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
def test_dof_prior_fit():
|
||||||
|
"""Performs an uncorrelated fit with a prior to uncorrelated data then
|
||||||
|
the expected chisquare and the usual dof need to agree"""
|
||||||
|
N = 5
|
||||||
|
|
||||||
|
def fitf(a, x):
|
||||||
|
return a[0] + 0 * x
|
||||||
|
|
||||||
|
x = [1. for i in range(N)]
|
||||||
|
y = [pe.cov_Obs(i, .1, '%d' % (i)) for i in range(N)]
|
||||||
|
[o.gm() for o in y]
|
||||||
|
res = pe.fits.least_squares(x, y, fitf, expected_chisquare=True, priors=[pe.cov_Obs(3, 1, 'p')])
|
||||||
|
assert res.chisquare_by_expected_chisquare == res.chisquare_by_dof
|
||||||
|
|
||||||
|
num_samples = 400
|
||||||
|
N = 10
|
||||||
|
|
||||||
|
x = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||||
|
|
||||||
|
r = np.zeros((N, N))
|
||||||
|
for i in range(N):
|
||||||
|
for j in range(N):
|
||||||
|
if(i==j):
|
||||||
|
r[i, j] = 1.0 # element in correlation matrix
|
||||||
|
|
||||||
|
errl = np.sqrt([3.4, 2.5, 3.6, 2.8, 4.2, 4.7, 4.9, 5.1, 3.2, 4.2]) # set y errors
|
||||||
|
for i in range(N):
|
||||||
|
for j in range(N):
|
||||||
|
if(i==j):
|
||||||
|
r[i, j] *= errl[i] * errl[j] # element in covariance matrix
|
||||||
|
|
||||||
|
c = cholesky(r, lower=True)
|
||||||
|
y = np.dot(c, x)
|
||||||
|
x = np.arange(N)
|
||||||
|
x_dict = {}
|
||||||
|
y_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
x_dict[str(item)] = [x[i]]
|
||||||
|
|
||||||
|
for linear in [True, False]:
|
||||||
|
data = []
|
||||||
|
for i in range(N):
|
||||||
|
if linear:
|
||||||
|
data.append(pe.Obs([[i + 1 + o for o in y[i]]], ['ens'+str(i)]))
|
||||||
|
else:
|
||||||
|
data.append(pe.Obs([[np.exp(-(i + 1)) + np.exp(-(i + 1)) * o for o in y[i]]], ['ens'+str(i)]))
|
||||||
|
|
||||||
|
[o.gamma_method() for o in data]
|
||||||
|
|
||||||
|
data_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
data_dict[str(item)] = [data[i]]
|
||||||
|
|
||||||
|
corr = pe.covariance(data, correlation=True)
|
||||||
|
chol = np.linalg.cholesky(corr)
|
||||||
|
covdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
|
||||||
|
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||||
|
chol_inv_keys = [""]
|
||||||
|
chol_inv_keys_combined_fit = [str(item) for i,item in enumerate(x)]
|
||||||
|
|
||||||
|
if linear:
|
||||||
|
def fitf(p, x):
|
||||||
|
return p[1] + p[0] * x
|
||||||
|
fitf_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
fitf_dict[str(item)] = fitf
|
||||||
|
else:
|
||||||
|
def fitf(p, x):
|
||||||
|
return p[1] * anp.exp(-p[0] * x)
|
||||||
|
fitf_dict = {}
|
||||||
|
for i,item in enumerate(x):
|
||||||
|
fitf_dict[str(item)] = fitf
|
||||||
|
|
||||||
|
fit_exp = pe.least_squares(x, data, fitf, expected_chisquare=True, priors = {0:pe.cov_Obs(1.0, 1, 'p')})
|
||||||
|
fit_cov = pe.least_squares(x, data, fitf, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys], priors = {0:pe.cov_Obs(1.0, 1, 'p')})
|
||||||
|
assert np.isclose(fit_exp.chisquare_by_expected_chisquare,fit_exp.chisquare_by_dof,atol=1e-8)
|
||||||
|
assert np.isclose(fit_exp.chisquare_by_expected_chisquare,fit_cov.chisquare_by_dof,atol=1e-8)
|
||||||
|
|
@ -12,7 +12,7 @@ def test_jsonio():
|
||||||
o = pe.pseudo_Obs(1.0, .2, 'one')
|
o = pe.pseudo_Obs(1.0, .2, 'one')
|
||||||
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
|
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
|
||||||
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
|
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
|
||||||
o4 = pe.merge_obs([o2, o3])
|
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
|
||||||
otag = 'This has been merged!'
|
otag = 'This has been merged!'
|
||||||
o4.tag = otag
|
o4.tag = otag
|
||||||
do = o - .2 * o4
|
do = o - .2 * o4
|
||||||
|
|
@ -101,8 +101,8 @@ def test_json_string_reconstruction():
|
||||||
|
|
||||||
|
|
||||||
def test_json_corr_io():
|
def test_json_corr_io():
|
||||||
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']) for o in range(8)]
|
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]) for o in range(8)]
|
||||||
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']), my_list)
|
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]), my_list)
|
||||||
|
|
||||||
for obs_list in [my_list, rw_list]:
|
for obs_list in [my_list, rw_list]:
|
||||||
for tag in [None, "test"]:
|
for tag in [None, "test"]:
|
||||||
|
|
@ -111,40 +111,51 @@ def test_json_corr_io():
|
||||||
for corr_tag in [None, 'my_Corr_tag']:
|
for corr_tag in [None, 'my_Corr_tag']:
|
||||||
for prange in [None, [3, 6]]:
|
for prange in [None, [3, 6]]:
|
||||||
for gap in [False, True]:
|
for gap in [False, True]:
|
||||||
my_corr = pe.Corr(obs_list, padding=[pad, pad], prange=prange)
|
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
|
||||||
my_corr.tag = corr_tag
|
my_corr = mult * pe.Corr(obs_list, padding=[pad, pad], prange=prange)
|
||||||
if gap:
|
my_corr.tag = corr_tag
|
||||||
my_corr.content[4] = None
|
if gap:
|
||||||
pe.input.json.dump_to_json(my_corr, 'corr')
|
my_corr.content[4] = None
|
||||||
recover = pe.input.json.load_json('corr')
|
pe.input.json.dump_to_json(my_corr, 'corr')
|
||||||
os.remove('corr.json.gz')
|
recover = pe.input.json.load_json('corr')
|
||||||
assert np.all([o.is_zero() for o in [x for x in (my_corr - recover) if x is not None]])
|
os.remove('corr.json.gz')
|
||||||
for index, entry in enumerate(my_corr):
|
assert np.all([o.is_zero() for o in [x for x in (my_corr - recover) if x is not None]])
|
||||||
if entry is None:
|
for index, entry in enumerate(my_corr):
|
||||||
assert recover[index] is None
|
if entry is None:
|
||||||
assert my_corr.tag == recover.tag
|
assert recover[index] is None
|
||||||
assert my_corr.prange == recover.prange
|
assert my_corr.tag == recover.tag
|
||||||
assert my_corr.reweighted == recover.reweighted
|
assert my_corr.prange == recover.prange
|
||||||
|
assert my_corr.reweighted == recover.reweighted
|
||||||
|
|
||||||
|
|
||||||
def test_json_corr_2d_io():
|
def test_json_corr_2d_io():
|
||||||
obs_list = [np.array([[pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test')], [pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test')]]) for i in range(4)]
|
obs_list = [np.array([
|
||||||
|
[
|
||||||
|
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
|
||||||
|
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321)]),
|
||||||
|
],
|
||||||
|
[
|
||||||
|
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321),]),
|
||||||
|
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
|
||||||
|
],
|
||||||
|
]) for i in range(4)]
|
||||||
|
|
||||||
for tag in [None, "test"]:
|
for tag in [None, "test"]:
|
||||||
obs_list[3][0, 1].tag = tag
|
obs_list[3][0, 1].tag = tag
|
||||||
for padding in [0, 1]:
|
for padding in [0, 1]:
|
||||||
for prange in [None, [3, 6]]:
|
for prange in [None, [3, 6]]:
|
||||||
my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange)
|
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
|
||||||
my_corr.tag = tag
|
my_corr = mult * pe.Corr(obs_list, padding=[padding, padding], prange=prange)
|
||||||
pe.input.json.dump_to_json(my_corr, 'corr')
|
my_corr.tag = tag
|
||||||
recover = pe.input.json.load_json('corr')
|
pe.input.json.dump_to_json(my_corr, 'corr')
|
||||||
os.remove('corr.json.gz')
|
recover = pe.input.json.load_json('corr')
|
||||||
assert np.all([np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None]])
|
os.remove('corr.json.gz')
|
||||||
for index, entry in enumerate(my_corr):
|
assert np.all([np.all([o.is_zero() for o in q]) for q in [x.ravel() for x in (my_corr - recover) if x is not None]])
|
||||||
if entry is None:
|
for index, entry in enumerate(my_corr):
|
||||||
assert recover[index] is None
|
if entry is None:
|
||||||
assert my_corr.tag == recover.tag
|
assert recover[index] is None
|
||||||
assert my_corr.prange == recover.prange
|
assert my_corr.tag == recover.tag
|
||||||
|
assert my_corr.prange == recover.prange
|
||||||
|
|
||||||
|
|
||||||
def test_json_dict_io():
|
def test_json_dict_io():
|
||||||
|
|
@ -211,6 +222,7 @@ def test_json_dict_io():
|
||||||
'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'),
|
'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'),
|
||||||
'se': None,
|
'se': None,
|
||||||
'sf': 1.2,
|
'sf': 1.2,
|
||||||
|
'k': pe.cov_Obs(.1, .001**2, 'cov') * pe.merge_obs([pe.pseudo_Obs(1.0, 0.1, 'test|r2'), pe.pseudo_Obs(1.0, 0.1, 'test|r1', samples=321)]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -314,7 +326,7 @@ def test_dobsio():
|
||||||
|
|
||||||
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
|
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
|
||||||
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
|
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
|
||||||
o4 = pe.merge_obs([o2, o3])
|
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
|
||||||
otag = 'This has been merged!'
|
otag = 'This has been merged!'
|
||||||
o4.tag = otag
|
o4.tag = otag
|
||||||
do = o - .2 * o4
|
do = o - .2 * o4
|
||||||
|
|
@ -328,7 +340,7 @@ def test_dobsio():
|
||||||
o5 /= co2[0]
|
o5 /= co2[0]
|
||||||
o5.tag = 2 * otag
|
o5.tag = 2 * otag
|
||||||
|
|
||||||
tt1 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 222, 2)])
|
tt1 = pe.Obs([np.random.rand(100), np.random.rand(102)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 226, 2)])
|
||||||
tt3 = pe.Obs([np.random.rand(102)], ['qe|r1'])
|
tt3 = pe.Obs([np.random.rand(102)], ['qe|r1'])
|
||||||
|
|
||||||
tt = tt1 + tt3
|
tt = tt1 + tt3
|
||||||
|
|
@ -337,7 +349,7 @@ def test_dobsio():
|
||||||
|
|
||||||
tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)])
|
tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)])
|
||||||
|
|
||||||
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt)]
|
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt), o4.reweight(o4)]
|
||||||
print(ol)
|
print(ol)
|
||||||
fname = 'test_rw'
|
fname = 'test_rw'
|
||||||
|
|
||||||
|
|
@ -362,9 +374,12 @@ def test_dobsio():
|
||||||
|
|
||||||
|
|
||||||
def test_reconstruct_non_linear_r_obs(tmp_path):
|
def test_reconstruct_non_linear_r_obs(tmp_path):
|
||||||
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
|
to = (
|
||||||
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
|
pe.Obs([np.random.rand(500), np.random.rand(1200)],
|
||||||
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
|
["e|r1", "e|r2", ],
|
||||||
|
idl=[range(1, 501), range(0, 1200)])
|
||||||
|
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
|
||||||
|
)
|
||||||
to = np.log(to ** 2) / to
|
to = np.log(to ** 2) / to
|
||||||
to.dump((tmp_path / "test_equality").as_posix())
|
to.dump((tmp_path / "test_equality").as_posix())
|
||||||
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
|
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
|
||||||
|
|
@ -372,9 +387,12 @@ def test_reconstruct_non_linear_r_obs(tmp_path):
|
||||||
|
|
||||||
|
|
||||||
def test_reconstruct_non_linear_r_obs_list(tmp_path):
|
def test_reconstruct_non_linear_r_obs_list(tmp_path):
|
||||||
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
|
to = (
|
||||||
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
|
pe.Obs([np.random.rand(500), np.random.rand(1200)],
|
||||||
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
|
["e|r1", "e|r2", ],
|
||||||
|
idl=[range(1, 501), range(0, 1200)])
|
||||||
|
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
|
||||||
|
)
|
||||||
to = np.log(to ** 2) / to
|
to = np.log(to ** 2) / to
|
||||||
for to_list in [[to, to, to], np.array([to, to, to])]:
|
for to_list in [[to, to, to], np.array([to, to, to])]:
|
||||||
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())
|
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ def test_matmul():
|
||||||
my_list = []
|
my_list = []
|
||||||
length = 100 + np.random.randint(200)
|
length = 100 + np.random.randint(200)
|
||||||
for i in range(dim ** 2):
|
for i in range(dim ** 2):
|
||||||
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
|
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
|
||||||
my_array = const * np.array(my_list).reshape((dim, dim))
|
my_array = const * np.array(my_list).reshape((dim, dim))
|
||||||
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
||||||
for t, e in np.ndenumerate(tt):
|
for t, e in np.ndenumerate(tt):
|
||||||
|
|
@ -43,8 +43,8 @@ def test_matmul():
|
||||||
my_list = []
|
my_list = []
|
||||||
length = 100 + np.random.randint(200)
|
length = 100 + np.random.randint(200)
|
||||||
for i in range(dim ** 2):
|
for i in range(dim ** 2):
|
||||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
|
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
|
||||||
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
|
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
|
||||||
my_array = np.array(my_list).reshape((dim, dim)) * const
|
my_array = np.array(my_list).reshape((dim, dim)) * const
|
||||||
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
||||||
for t, e in np.ndenumerate(tt):
|
for t, e in np.ndenumerate(tt):
|
||||||
|
|
@ -151,7 +151,7 @@ def test_multi_dot():
|
||||||
my_list = []
|
my_list = []
|
||||||
length = 1000 + np.random.randint(200)
|
length = 1000 + np.random.randint(200)
|
||||||
for i in range(dim ** 2):
|
for i in range(dim ** 2):
|
||||||
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
|
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
|
||||||
my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim))
|
my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim))
|
||||||
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
||||||
for t, e in np.ndenumerate(tt):
|
for t, e in np.ndenumerate(tt):
|
||||||
|
|
@ -160,8 +160,8 @@ def test_multi_dot():
|
||||||
my_list = []
|
my_list = []
|
||||||
length = 1000 + np.random.randint(200)
|
length = 1000 + np.random.randint(200)
|
||||||
for i in range(dim ** 2):
|
for i in range(dim ** 2):
|
||||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
|
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
|
||||||
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
|
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
|
||||||
my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov')
|
my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov')
|
||||||
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
||||||
for t, e in np.ndenumerate(tt):
|
for t, e in np.ndenumerate(tt):
|
||||||
|
|
@ -209,7 +209,7 @@ def test_irregular_matrix_inverse():
|
||||||
for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]:
|
for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]:
|
||||||
irregular_array = []
|
irregular_array = []
|
||||||
for i in range(dim ** 2):
|
for i in range(dim ** 2):
|
||||||
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)]))
|
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl]) + pe.Obs([np.random.normal(0.25, 0.1, 10)], ['ens2'], idl=[range(1, 11)]))
|
||||||
irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23')
|
irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23')
|
||||||
|
|
||||||
invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T
|
invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T
|
||||||
|
|
@ -276,10 +276,10 @@ def test_matrix_functions():
|
||||||
for (i, j), entry in np.ndenumerate(check_inv):
|
for (i, j), entry in np.ndenumerate(check_inv):
|
||||||
entry.gamma_method()
|
entry.gamma_method()
|
||||||
if(i == j):
|
if(i == j):
|
||||||
assert math.isclose(entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
assert math.isclose(entry.value, 1.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||||
else:
|
else:
|
||||||
assert math.isclose(entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
assert math.isclose(entry.value, 0.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||||
assert math.isclose(entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
|
assert math.isclose(entry.dvalue, 0.0, abs_tol=2e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
|
||||||
|
|
||||||
# Check Cholesky decomposition
|
# Check Cholesky decomposition
|
||||||
sym = np.dot(matrix, matrix.T)
|
sym = np.dot(matrix, matrix.T)
|
||||||
|
|
|
||||||
|
|
@ -61,9 +61,9 @@ def test_Obs_exceptions():
|
||||||
my_obs.plot_rep_dist()
|
my_obs.plot_rep_dist()
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
my_obs.plot_piechart()
|
my_obs.plot_piechart()
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
my_obs.gamma_method(S='2.3')
|
my_obs.gamma_method(S='2.3')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_obs.gamma_method(tau_exp=2.3)
|
my_obs.gamma_method(tau_exp=2.3)
|
||||||
my_obs.gamma_method()
|
my_obs.gamma_method()
|
||||||
my_obs.details()
|
my_obs.details()
|
||||||
|
|
@ -152,7 +152,7 @@ def test_function_overloading():
|
||||||
np.arccos(1 / b)
|
np.arccos(1 / b)
|
||||||
np.arctan(1 / b)
|
np.arctan(1 / b)
|
||||||
np.arctanh(1 / b)
|
np.arctanh(1 / b)
|
||||||
np.sinc(1 / b)
|
#np.sinc(1 / b) # Commented out for now
|
||||||
|
|
||||||
b ** b
|
b ** b
|
||||||
0.5 ** b
|
0.5 ** b
|
||||||
|
|
@ -199,7 +199,7 @@ def test_gamma_method_no_windowing():
|
||||||
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
||||||
obs.gamma_method(S=1.1)
|
obs.gamma_method(S=1.1)
|
||||||
assert obs.e_tauint['ens'] > 0.5
|
assert obs.e_tauint['ens'] > 0.5
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
obs.gamma_method(S=-0.2)
|
obs.gamma_method(S=-0.2)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -333,7 +333,7 @@ def test_derived_observables():
|
||||||
|
|
||||||
def test_multi_ens():
|
def test_multi_ens():
|
||||||
names = ['A0', 'A1|r001', 'A1|r002']
|
names = ['A0', 'A1|r001', 'A1|r002']
|
||||||
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
|
test_obs = pe.Obs([np.random.rand(50)], names[:1]) + pe.Obs([np.random.rand(50), np.random.rand(50)], names[1:])
|
||||||
assert test_obs.e_names == ['A0', 'A1']
|
assert test_obs.e_names == ['A0', 'A1']
|
||||||
assert test_obs.e_content['A0'] == ['A0']
|
assert test_obs.e_content['A0'] == ['A0']
|
||||||
assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002']
|
assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002']
|
||||||
|
|
@ -345,6 +345,9 @@ def test_multi_ens():
|
||||||
ensembles.append(str(i))
|
ensembles.append(str(i))
|
||||||
assert my_sum.e_names == sorted(ensembles)
|
assert my_sum.e_names == sorted(ensembles)
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
|
||||||
|
|
||||||
|
|
||||||
def test_multi_ens2():
|
def test_multi_ens2():
|
||||||
names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh']
|
names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh']
|
||||||
|
|
@ -461,6 +464,18 @@ def test_cobs_overloading():
|
||||||
obs / cobs
|
obs / cobs
|
||||||
|
|
||||||
|
|
||||||
|
def test_pow():
|
||||||
|
data = [1, 2.341, pe.pseudo_Obs(4.8, 0.48, "test_obs"), pe.cov_Obs(1.1, 0.3 ** 2, "test_cov_obs")]
|
||||||
|
|
||||||
|
for d in data:
|
||||||
|
assert d * d == d ** 2
|
||||||
|
assert d * d * d == d ** 3
|
||||||
|
|
||||||
|
for d2 in data:
|
||||||
|
assert np.log(d ** d2) == d2 * np.log(d)
|
||||||
|
assert (d ** d2) ** (1 / d2) == d
|
||||||
|
|
||||||
|
|
||||||
def test_reweighting():
|
def test_reweighting():
|
||||||
my_obs = pe.Obs([np.random.rand(1000)], ['t'])
|
my_obs = pe.Obs([np.random.rand(1000)], ['t'])
|
||||||
assert not my_obs.reweighted
|
assert not my_obs.reweighted
|
||||||
|
|
@ -478,26 +493,33 @@ def test_reweighting():
|
||||||
r_obs2 = r_obs[0] * my_obs
|
r_obs2 = r_obs[0] * my_obs
|
||||||
assert r_obs2.reweighted
|
assert r_obs2.reweighted
|
||||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.reweight(my_obs, [my_covobs])
|
pe.reweight(my_obs, [my_covobs])
|
||||||
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
|
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.reweight(my_obs, [my_obs + my_obs2])
|
pe.reweight(my_obs, [my_obs + my_obs2])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.reweight(my_irregular_obs, [my_obs])
|
pe.reweight(my_irregular_obs, [my_obs])
|
||||||
|
|
||||||
|
my_merged_obs = my_obs + pe.Obs([np.random.rand(1000)], ['q'])
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.reweight(my_merged_obs, [my_merged_obs])
|
||||||
|
|
||||||
|
|
||||||
def test_merge_obs():
|
def test_merge_obs():
|
||||||
my_obs1 = pe.Obs([np.random.rand(100)], ['t'])
|
my_obs1 = pe.Obs([np.random.normal(1, .1, 100)], ['t|1'])
|
||||||
my_obs2 = pe.Obs([np.random.rand(100)], ['q'], idl=[range(1, 200, 2)])
|
my_obs2 = pe.Obs([np.random.normal(1, .1, 100)], ['t|2'], idl=[range(1, 200, 2)])
|
||||||
merged = pe.merge_obs([my_obs1, my_obs2])
|
merged = pe.merge_obs([my_obs1, my_obs2])
|
||||||
diff = merged - my_obs2 - my_obs1
|
diff = merged - (my_obs2 + my_obs1) / 2
|
||||||
assert diff == -(my_obs1.value + my_obs2.value) / 2
|
assert np.isclose(0, diff.value, atol=1e-16)
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.merge_obs([my_obs1, my_obs1])
|
pe.merge_obs([my_obs1, my_obs1])
|
||||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.merge_obs([my_obs1, my_covobs])
|
pe.merge_obs([my_obs1, my_covobs])
|
||||||
|
my_obs3 = pe.Obs([np.random.rand(100)], ['q|2'], idl=[range(1, 200, 2)])
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.merge_obs([my_obs1, my_obs3])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -519,23 +541,26 @@ def test_correlate():
|
||||||
assert corr1 == corr2
|
assert corr1 == corr2
|
||||||
|
|
||||||
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
|
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_obs1, my_obs3)
|
pe.correlate(my_obs1, my_obs3)
|
||||||
|
|
||||||
my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
|
my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_obs1, my_obs4)
|
pe.correlate(my_obs1, my_obs4)
|
||||||
|
|
||||||
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
||||||
my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
||||||
corr3 = pe.correlate(my_obs5, my_obs6)
|
corr3 = pe.correlate(my_obs5, my_obs6)
|
||||||
assert my_obs5.idl == corr3.idl
|
assert my_obs5.idl == corr3.idl
|
||||||
|
my_obs7 = pe.Obs([np.random.rand(99)], ['q'])
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
pe.correlate(my_obs1, my_obs7)
|
||||||
|
|
||||||
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
|
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_obs1, my_new_obs)
|
pe.correlate(my_obs1, my_new_obs)
|
||||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_covobs, my_covobs)
|
pe.correlate(my_covobs, my_covobs)
|
||||||
r_obs = pe.reweight(my_obs1, [my_obs1])[0]
|
r_obs = pe.reweight(my_obs1, [my_obs1])[0]
|
||||||
with pytest.warns(RuntimeWarning):
|
with pytest.warns(RuntimeWarning):
|
||||||
|
|
@ -554,11 +579,11 @@ def test_merge_idx():
|
||||||
|
|
||||||
for j in range(5):
|
for j in range(5):
|
||||||
idll = [range(1, int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
|
idll = [range(1, int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
|
||||||
assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
|
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll))
|
||||||
|
|
||||||
for j in range(5):
|
for j in range(5):
|
||||||
idll = [range(int(round(np.random.uniform(1, 28))), int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
|
idll = [range(int(round(np.random.uniform(1, 28))), int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
|
||||||
assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
|
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll))
|
||||||
|
|
||||||
idl = [list(np.arange(1, 14)) + list(range(16, 100, 4)), range(4, 604, 4), [2, 4, 5, 6, 8, 9, 12, 24], range(1, 20, 1), range(50, 789, 7)]
|
idl = [list(np.arange(1, 14)) + list(range(16, 100, 4)), range(4, 604, 4), [2, 4, 5, 6, 8, 9, 12, 24], range(1, 20, 1), range(50, 789, 7)]
|
||||||
new_idx = pe.obs._merge_idx(idl)
|
new_idx = pe.obs._merge_idx(idl)
|
||||||
|
|
@ -669,14 +694,14 @@ def test_gamma_method_irregular():
|
||||||
assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue)
|
assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue)
|
||||||
|
|
||||||
arr2 = np.random.normal(1, .2, size=N)
|
arr2 = np.random.normal(1, .2, size=N)
|
||||||
afull = pe.Obs([arr, arr2], ['a1', 'a2'])
|
afull = pe.Obs([arr], ['a1']) + pe.Obs([arr2], ['a2'])
|
||||||
|
|
||||||
configs = np.ones_like(arr2)
|
configs = np.ones_like(arr2)
|
||||||
for i in np.random.uniform(0, len(arr2), size=int(.8*N)):
|
for i in np.random.uniform(0, len(arr2), size=int(.8*N)):
|
||||||
configs[int(i)] = 0
|
configs[int(i)] = 0
|
||||||
zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0]
|
zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0]
|
||||||
idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1]
|
idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1]
|
||||||
a = pe.Obs([zero_arr, zero_arr2], ['a1', 'a2'], idl=[idx, idx2])
|
a = pe.Obs([zero_arr], ['a1'], idl=[idx]) + pe.Obs([zero_arr2], ['a2'], idl=[idx2])
|
||||||
|
|
||||||
afull.gamma_method()
|
afull.gamma_method()
|
||||||
a.gamma_method()
|
a.gamma_method()
|
||||||
|
|
@ -762,7 +787,7 @@ def test_gamma_method_irregular():
|
||||||
my_obs.gm()
|
my_obs.gm()
|
||||||
idl += [range(1, 400, 4)]
|
idl += [range(1, 400, 4)]
|
||||||
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
|
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_obs.gm()
|
my_obs.gm()
|
||||||
|
|
||||||
# check cases where tau is large compared to the chain length
|
# check cases where tau is large compared to the chain length
|
||||||
|
|
@ -1010,7 +1035,7 @@ def test_correlation_intersection_of_idls():
|
||||||
|
|
||||||
|
|
||||||
def test_covariance_non_identical_objects():
|
def test_covariance_non_identical_objects():
|
||||||
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"])
|
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000)], ["ens|r1", "ens|r2"]) + pe.Obs([np.random.normal(1.0, 0.1, 732)], ['ens2'])
|
||||||
obs1.gamma_method()
|
obs1.gamma_method()
|
||||||
obs2 = obs1 + 1e-18
|
obs2 = obs1 + 1e-18
|
||||||
obs2.gamma_method()
|
obs2.gamma_method()
|
||||||
|
|
@ -1063,6 +1088,27 @@ def test_covariance_reorder_non_overlapping_data():
|
||||||
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
|
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
|
||||||
|
|
||||||
|
|
||||||
|
def test_sort_corr():
|
||||||
|
xd = {
|
||||||
|
'b': [1, 2, 3],
|
||||||
|
'a': [2.2, 4.4],
|
||||||
|
'c': [3.7, 5.1]
|
||||||
|
}
|
||||||
|
|
||||||
|
yd = {k : pe.cov_Obs(xd[k], [.2 * o for o in xd[k]], k) for k in xd}
|
||||||
|
key_orig = list(yd.keys())
|
||||||
|
y_all = np.concatenate([np.array(yd[key]) for key in key_orig])
|
||||||
|
[o.gm() for o in y_all]
|
||||||
|
cov = pe.covariance(y_all)
|
||||||
|
|
||||||
|
key_ls = key_sorted = sorted(key_orig)
|
||||||
|
y_sorted = np.concatenate([np.array(yd[key]) for key in key_sorted])
|
||||||
|
[o.gm() for o in y_sorted]
|
||||||
|
cov_sorted = pe.covariance(y_sorted)
|
||||||
|
retcov = pe.obs.sort_corr(cov, key_orig, yd)
|
||||||
|
assert np.sum(retcov - cov_sorted) == 0
|
||||||
|
|
||||||
|
|
||||||
def test_empty_obs():
|
def test_empty_obs():
|
||||||
o = pe.Obs([np.random.rand(100)], ['test'])
|
o = pe.Obs([np.random.rand(100)], ['test'])
|
||||||
q = o + pe.Obs([], [], means=[])
|
q = o + pe.Obs([], [], means=[])
|
||||||
|
|
@ -1073,6 +1119,9 @@ def test_reweight_method():
|
||||||
obs1 = pe.pseudo_Obs(0.2, 0.01, 'test')
|
obs1 = pe.pseudo_Obs(0.2, 0.01, 'test')
|
||||||
rw = pe.pseudo_Obs(0.999, 0.001, 'test')
|
rw = pe.pseudo_Obs(0.999, 0.001, 'test')
|
||||||
assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0]
|
assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0]
|
||||||
|
rw2 = pe.pseudo_Obs(0.999, 0.001, 'test2')
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
obs1.reweight(rw2)
|
||||||
|
|
||||||
|
|
||||||
def test_jackknife():
|
def test_jackknife():
|
||||||
|
|
@ -1089,7 +1138,7 @@ def test_jackknife():
|
||||||
|
|
||||||
assert np.allclose(tmp_jacks, my_obs.export_jackknife())
|
assert np.allclose(tmp_jacks, my_obs.export_jackknife())
|
||||||
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_new_obs.export_jackknife()
|
my_new_obs.export_jackknife()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,10 +24,10 @@ def build_test_environment(path, env_type, cfgs, reps):
|
||||||
os.mkdir(path + "/data_c/data_c_r"+str(i))
|
os.mkdir(path + "/data_c/data_c_r"+str(i))
|
||||||
for j in range(1, cfgs+1):
|
for j in range(1, cfgs+1):
|
||||||
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1", path + "/data_c/data_c_r"+str(i)+"/data_c_r"+str(i)+"_n"+str(j))
|
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1", path + "/data_c/data_c_r"+str(i)+"/data_c_r"+str(i)+"_n"+str(j))
|
||||||
elif env_type == "a":
|
elif env_type in ["a", "apf"]:
|
||||||
for i in range(1, reps):
|
for i in range(1, reps):
|
||||||
for corr in ["f_1", "f_A", "F_V0"]:
|
for corr in ["f_1", "f_A", "F_V0"]:
|
||||||
shutil.copy(path + "/data_a/data_a_r0." + corr, path + "/data_a/data_a_r" + str(i) + "." + corr)
|
shutil.copy(path + "/data_" + env_type + "/data_" + env_type + "_r0." + corr, path + "/data_" + env_type + "/data_" + env_type + "_r" + str(i) + "." + corr)
|
||||||
|
|
||||||
|
|
||||||
def test_o_bb(tmp_path):
|
def test_o_bb(tmp_path):
|
||||||
|
|
@ -276,6 +276,28 @@ def test_a_bb(tmp_path):
|
||||||
assert f_1[0].value == 351.1941525454502
|
assert f_1[0].value == 351.1941525454502
|
||||||
|
|
||||||
|
|
||||||
|
def test_a_bb_external_idl_func(tmp_path):
|
||||||
|
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||||
|
def extract_idl(s: str) -> int:
|
||||||
|
return int(s.split("n")[-1])
|
||||||
|
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bb", cfg_func=extract_idl)
|
||||||
|
print(f_1)
|
||||||
|
assert len(f_1) == 1
|
||||||
|
assert list(f_1[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||||
|
assert f_1[0].value == 351.1941525454502
|
||||||
|
|
||||||
|
|
||||||
|
def test_a_bb_external_idl_func_postfix(tmp_path):
|
||||||
|
build_test_environment(str(tmp_path), "apf", 5, 3)
|
||||||
|
def extract_idl(s: str) -> int:
|
||||||
|
return int(s.split("n")[-1][:-5])
|
||||||
|
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_apf", "data_apf", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bb", cfg_func=extract_idl)
|
||||||
|
print(f_1)
|
||||||
|
assert len(f_1) == 1
|
||||||
|
assert list(f_1[0].shape.keys()) == ["data_apf_|r0", "data_apf_|r1", "data_apf_|r2"]
|
||||||
|
assert f_1[0].value == 351.1941525454502
|
||||||
|
|
||||||
|
|
||||||
def test_a_bi(tmp_path):
|
def test_a_bi(tmp_path):
|
||||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a")
|
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a")
|
||||||
|
|
@ -287,6 +309,32 @@ def test_a_bi(tmp_path):
|
||||||
assert f_A[2].value == -41.025094911185185
|
assert f_A[2].value == -41.025094911185185
|
||||||
|
|
||||||
|
|
||||||
|
def test_a_bi_external_idl_func(tmp_path):
|
||||||
|
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||||
|
def extract_idl(s: str) -> int:
|
||||||
|
return int(s.split("n")[-1])
|
||||||
|
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a", cfg_func=extract_idl)
|
||||||
|
print(f_A)
|
||||||
|
assert len(f_A) == 3
|
||||||
|
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||||
|
assert f_A[0].value == 65.4711887279723
|
||||||
|
assert f_A[1].value == 1.0447210336915187
|
||||||
|
assert f_A[2].value == -41.025094911185185
|
||||||
|
|
||||||
|
|
||||||
|
def test_a_bi_external_idl_func_postfix(tmp_path):
|
||||||
|
build_test_environment(str(tmp_path), "apf", 5, 3)
|
||||||
|
def extract_idl(s: str) -> int:
|
||||||
|
return int(s.split("n")[-1][:-5])
|
||||||
|
f_A = sfin.read_sfcf(str(tmp_path) + "/data_apf", "data_apf", "f_A", quarks="lquark lquark", wf=0, version="2.0a", cfg_func=extract_idl)
|
||||||
|
print(f_A)
|
||||||
|
assert len(f_A) == 3
|
||||||
|
assert list(f_A[0].shape.keys()) == ["data_apf_|r0", "data_apf_|r1", "data_apf_|r2"]
|
||||||
|
assert f_A[0].value == 65.4711887279723
|
||||||
|
assert f_A[1].value == 1.0447210336915187
|
||||||
|
assert f_A[2].value == -41.025094911185185
|
||||||
|
|
||||||
|
|
||||||
def test_a_bi_files(tmp_path):
|
def test_a_bi_files(tmp_path):
|
||||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a", files=["data_a_r0.f_A", "data_a_r1.f_A", "data_a_r2.f_A"])
|
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a", files=["data_a_r0.f_A", "data_a_r1.f_A", "data_a_r2.f_A"])
|
||||||
|
|
@ -316,6 +364,31 @@ def test_a_bib(tmp_path):
|
||||||
assert f_V0[2] == 683.6776090081005
|
assert f_V0[2] == 683.6776090081005
|
||||||
|
|
||||||
|
|
||||||
|
def test_a_bib_external_idl_func(tmp_path):
|
||||||
|
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||||
|
def extract_idl(s: str) -> int:
|
||||||
|
return int(s.split("n")[-1])
|
||||||
|
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bib", cfg_func=extract_idl)
|
||||||
|
print(f_V0)
|
||||||
|
assert len(f_V0) == 3
|
||||||
|
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||||
|
assert f_V0[0] == 683.6776090085115
|
||||||
|
assert f_V0[1] == 661.3188585582334
|
||||||
|
assert f_V0[2] == 683.6776090081005
|
||||||
|
|
||||||
|
def test_a_bib_external_idl_func_postfix(tmp_path):
|
||||||
|
build_test_environment(str(tmp_path), "apf", 5, 3)
|
||||||
|
def extract_idl(s: str) -> int:
|
||||||
|
return int(s.split("n")[-1][:-5])
|
||||||
|
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_apf", "data_apf", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bib", cfg_func=extract_idl)
|
||||||
|
print(f_V0)
|
||||||
|
assert len(f_V0) == 3
|
||||||
|
assert list(f_V0[0].shape.keys()) == ["data_apf_|r0", "data_apf_|r1", "data_apf_|r2"]
|
||||||
|
assert f_V0[0] == 683.6776090085115
|
||||||
|
assert f_V0[1] == 661.3188585582334
|
||||||
|
assert f_V0[2] == 683.6776090081005
|
||||||
|
|
||||||
|
|
||||||
def test_simple_multi_a(tmp_path):
|
def test_simple_multi_a(tmp_path):
|
||||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_a", "data_a", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0a", corr_type_list=["bib"])
|
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_a", "data_a", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0a", corr_type_list=["bib"])
|
||||||
|
|
@ -387,3 +460,33 @@ def test_find_correlator():
|
||||||
found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False)
|
found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False)
|
||||||
assert found_start == 21
|
assert found_start == 21
|
||||||
assert found_T == 3
|
assert found_T == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_rep_name():
|
||||||
|
names = ['data_r0', 'data_r1', 'data_r2']
|
||||||
|
new_names = sfin._get_rep_names(names)
|
||||||
|
assert len(new_names) == 3
|
||||||
|
assert new_names[0] == 'data_|r0'
|
||||||
|
assert new_names[1] == 'data_|r1'
|
||||||
|
assert new_names[2] == 'data_|r2'
|
||||||
|
names = ['data_q0', 'data_q1', 'data_q2']
|
||||||
|
new_names = sfin._get_rep_names(names, rep_sep='q')
|
||||||
|
assert len(new_names) == 3
|
||||||
|
assert new_names[0] == 'data_|q0'
|
||||||
|
assert new_names[1] == 'data_|q1'
|
||||||
|
assert new_names[2] == 'data_|q2'
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_appended_rep_name():
|
||||||
|
names = ['data_r0.f_1', 'data_r1.f_1', 'data_r2.f_1']
|
||||||
|
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1')
|
||||||
|
assert len(new_names) == 3
|
||||||
|
assert new_names[0] == 'data_|r0'
|
||||||
|
assert new_names[1] == 'data_|r1'
|
||||||
|
assert new_names[2] == 'data_|r2'
|
||||||
|
names = ['data_q0.f_1', 'data_q1.f_1', 'data_q2.f_1']
|
||||||
|
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1', rep_sep='q')
|
||||||
|
assert len(new_names) == 3
|
||||||
|
assert new_names[0] == 'data_|q0'
|
||||||
|
assert new_names[1] == 'data_|q1'
|
||||||
|
assert new_names[2] == 'data_|q2'
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue