diff --git a/tests/test_fits.py b/tests/test_fits.py index 470c6a74..461136ce 100644 --- a/tests/test_fits.py +++ b/tests/test_fits.py @@ -1,16 +1,13 @@ import autograd.numpy as np -import os -import random -import string -import copy import math import scipy.optimize -from scipy.odr import ODR, Model, Data, RealData +from scipy.odr import ODR, Model, RealData import pyerrors as pe import pytest np.random.seed(0) + def test_standard_fit(): dim = 10 + int(30 * np.random.rand()) x = np.arange(dim) @@ -69,7 +66,7 @@ def test_odr_fit(): data = RealData([o.value for o in ox], [o.value for o in oy], sx=[o.dvalue for o in ox], sy=[o.dvalue for o in oy]) model = Model(func) - odr = ODR(data, model, [0,0], partol=np.finfo(np.float64).eps) + odr = ODR(data, model, [0, 0], partol=np.finfo(np.float64).eps) odr.set_job(fit_type=0, deriv=1) output = odr.run() @@ -79,8 +76,8 @@ def test_odr_fit(): for i in range(2): beta[i].gamma_method(e_tag=5, S=1.0) assert math.isclose(beta[i].value, output.beta[i], rel_tol=1e-5) - assert math.isclose(output.cov_beta[i,i], beta[i].dvalue**2, rel_tol=2.5e-1), str(output.cov_beta[i,i]) + ' ' + str(beta[i].dvalue**2) - assert math.isclose(pe.covariance(beta[0], beta[1]), output.cov_beta[0,1], rel_tol=2.5e-1) + assert math.isclose(output.cov_beta[i, i], beta[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(beta[i].dvalue ** 2) + assert math.isclose(pe.covariance(beta[0], beta[1]), output.cov_beta[0, 1], rel_tol=2.5e-1) pe.Obs.e_tag_global = 0 @@ -94,7 +91,7 @@ def test_odr_derivatives(): loc_xvalue = n + np.random.normal(0.0, x_err) x.append(pe.pseudo_Obs(loc_xvalue, x_err, str(n))) y.append(pe.pseudo_Obs((lambda x: x ** 2 - 1)(loc_xvalue) + - np.random.normal(0.0, y_err), y_err, str(n))) + np.random.normal(0.0, y_err), y_err, str(n))) def func(a, x): return a[0] + a[1] * x ** 2 @@ -103,4 +100,4 @@ def test_odr_derivatives(): tfit = pe.fits.fit_general(x, y, func, base_step=0.1, step_ratio=1.1, num_steps=20) assert np.abs(np.max(np.array(list(fit1[1].deltas.values())) - - np.array(list(tfit[1].deltas.values())))) < 10e-8 + - np.array(list(tfit[1].deltas.values())))) < 10e-8 diff --git a/tests/test_linalg.py b/tests/test_linalg.py index e13c80d5..042c1d3a 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -1,18 +1,11 @@ -import sys -sys.path.append('..') import autograd.numpy as np -import os -import random import math -import string -import copy -import scipy.optimize -from scipy.odr import ODR, Model, Data, RealData import pyerrors as pe import pytest np.random.seed(0) + def test_matrix_functions(): dim = 3 + int(4 * np.random.rand()) print(dim) @@ -55,4 +48,3 @@ def test_matrix_functions(): tmp[j].gamma_method() assert math.isclose(tmp[j].value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) assert math.isclose(tmp[j].dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) - diff --git a/tests/test_pyerrors.py b/tests/test_pyerrors.py index fcc00e6d..0835fb08 100644 --- a/tests/test_pyerrors.py +++ b/tests/test_pyerrors.py @@ -8,6 +8,7 @@ import pytest np.random.seed(0) + def test_dump(): value = np.random.normal(5, 10) dvalue = np.abs(np.random.normal(0, 1)) @@ -38,8 +39,8 @@ def test_function_overloading(): lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0])] for i, f in enumerate(fs): - t1 = f([a,b]) - t2 = pe.derived_observable(f, [a,b]) + t1 = f([a, b]) + t2 = pe.derived_observable(f, [a, b]) c = t2 - t1 assert c.value == 0.0, str(i) assert np.all(np.abs(c.deltas['e1']) < 1e-14), str(i) diff --git a/tests/test_roots.py b/tests/test_roots.py index dce0eb45..c1bdc7ea 100644 --- a/tests/test_roots.py +++ b/tests/test_roots.py @@ -4,6 +4,7 @@ import pytest np.random.seed(0) + def test_root_linear(): def root_function(x, d): @@ -16,4 +17,3 @@ def test_root_linear(): assert np.isclose(my_root.value, value) difference = my_obs - my_root assert all(np.isclose(0.0, difference.deltas['t'])) -