Resolved merge conflict in tests

This commit is contained in:
Simon Kuberski 2022-06-19 01:46:31 +02:00
commit c9789a34e6
27 changed files with 695 additions and 142 deletions

View file

@ -1,6 +1,7 @@
import os
import numpy as np
import scipy
import matplotlib.pyplot as plt
import pyerrors as pe
import pytest
@ -246,6 +247,21 @@ def test_matrix_corr():
corr_mat.Eigenvalue(2, state=0)
def test_corr_none_entries():
a = pe.pseudo_Obs(1.0, 0.1, 'a')
l = np.asarray([[a, a], [a, a]])
n = np.asarray([[None, None], [None, None]])
x = [l, n]
matr = pe.Corr(x)
matr.projected(np.asarray([1.0, 0.0]))
matr * 2 - 2 * matr
matr * matr + matr ** 2 / matr
for func in [np.sqrt, np.log, np.exp, np.sin, np.cos, np.tan, np.sinh, np.cosh, np.tanh]:
func(matr)
def test_GEVP_warnings():
corr_aa = _gen_corr(1)
corr_ab = 0.5 * corr_aa
@ -332,6 +348,15 @@ def test_matrix_symmetric():
assert np.all([np.all(o == o.T) for o in sym_corr_mat])
t_obs = pe.pseudo_Obs(1.0, 0.1, 'test')
o_mat = np.array([[t_obs, t_obs], [t_obs, t_obs]])
corr1 = pe.Corr([o_mat, None, o_mat])
corr2 = pe.Corr([o_mat, np.array([[None, None], [None, None]]), o_mat])
corr3 = pe.Corr([o_mat, np.array([[t_obs, None], [None, t_obs]], dtype=object), o_mat])
corr1.matrix_symmetric()
corr2.matrix_symmetric()
corr3.matrix_symmetric()
def test_GEVP_solver():
@ -347,6 +372,17 @@ def test_GEVP_solver():
assert np.allclose(sp_vecs, pe.correlators._GEVP_solver(mat1, mat2), atol=1e-14)
def test_GEVP_none_entries():
t_obs = pe.pseudo_Obs(1.0, 0.1, 'test')
t_obs2 = pe.pseudo_Obs(0.1, 0.1, 'test')
o_mat = np.array([[t_obs, t_obs2], [t_obs2, t_obs2]])
n_arr = np.array([[None, None], [None, None]])
corr = pe.Corr([o_mat, o_mat, o_mat, o_mat, o_mat, o_mat, None, o_mat, n_arr, None, o_mat])
corr.GEVP(t0=2)
def test_hankel():
corr_content = []
for t in range(8):
@ -405,6 +441,7 @@ def test_spaghetti_plot():
corr.spaghetti_plot(True)
corr.spaghetti_plot(False)
plt.close('all')
def _gen_corr(val, samples=2000):

View file

@ -149,8 +149,10 @@ def test_correlated_fit():
return p[1] * anp.exp(-p[0] * x)
fitp = pe.least_squares(x, data, fitf, expected_chisquare=True)
assert np.isclose(fitp.chisquare / fitp.dof, fitp.chisquare_by_dof, atol=1e-14)
fitpc = pe.least_squares(x, data, fitf, correlated_fit=True)
assert np.isclose(fitpc.chisquare / fitpc.dof, fitpc.chisquare_by_dof, atol=1e-14)
for i in range(2):
diff = fitp[i] - fitpc[i]
diff.gamma_method()
@ -171,12 +173,35 @@ def test_fit_corr_independent():
y = a[0] * anp.exp(-a[1] * x)
return y
out = pe.least_squares(x, oy, func)
out_corr = pe.least_squares(x, oy, func, correlated_fit=True)
for method in ["Levenberg-Marquardt", "migrad", "Nelder-Mead"]:
out = pe.least_squares(x, oy, func, method=method)
out_corr = pe.least_squares(x, oy, func, correlated_fit=True, method=method)
assert np.isclose(out.chisquare, out_corr.chisquare)
assert (out[0] - out_corr[0]).is_zero(atol=1e-5)
assert (out[1] - out_corr[1]).is_zero(atol=1e-5)
assert np.isclose(out.chisquare, out_corr.chisquare)
assert np.isclose(out.dof, out_corr.dof)
assert np.isclose(out.chisquare_by_dof, out_corr.chisquare_by_dof)
assert (out[0] - out_corr[0]).is_zero(atol=1e-5)
assert (out[1] - out_corr[1]).is_zero(atol=1e-5)
def test_linear_fit_guesses():
for err in [10, 0.1, 0.001]:
xvals = []
yvals = []
for x in range(1, 8, 2):
xvals.append(x)
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
lin_func = lambda a, x: a[0] + a[1] * x
with pytest.raises(Exception):
pe.least_squares(xvals, yvals, lin_func)
[o.gamma_method() for o in yvals];
with pytest.raises(Exception):
pe.least_squares(xvals, yvals, lin_func, initial_guess=[5])
bad_guess = pe.least_squares(xvals, yvals, lin_func, initial_guess=[999, 999])
good_guess = pe.least_squares(xvals, yvals, lin_func, initial_guess=[0, 1])
assert np.isclose(bad_guess.chisquare, good_guess.chisquare, atol=1e-8)
assert np.all([(go - ba).is_zero(atol=1e-6) for (go, ba) in zip(good_guess, bad_guess)])
def test_total_least_squares():
@ -218,7 +243,7 @@ def test_total_least_squares():
beta[i].gamma_method(S=1.0)
assert math.isclose(beta[i].value, output.beta[i], rel_tol=1e-5)
assert math.isclose(output.cov_beta[i, i], beta[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(beta[i].dvalue ** 2)
assert math.isclose(pe.covariance([beta[0], beta[1]])[0, 1], output.cov_beta[0, 1], rel_tol=2.5e-1)
assert math.isclose(pe.covariance([beta[0], beta[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1)
out = pe.total_least_squares(ox, oy, func, const_par=[beta[1]])
@ -241,7 +266,7 @@ def test_total_least_squares():
betac[i].gamma_method(S=1.0)
assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3)
assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2)
assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=2.5e-1)
assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1)
outc = pe.total_least_squares(oxc, oyc, func, const_par=[betac[1]])
@ -256,7 +281,7 @@ def test_total_least_squares():
betac[i].gamma_method(S=1.0)
assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3)
assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2)
assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=2.5e-1)
assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1)
outc = pe.total_least_squares(oxc, oy, func, const_par=[betac[1]])
@ -376,6 +401,80 @@ def test_error_band():
pe.fits.error_band(x, f, fitp.fit_parameters)
def test_fit_vs_jackknife():
od = 0.9999999999
cov1 = np.array([[1, od, od], [od, 1.0, od], [od, od, 1.0]])
cov1 *= 0.05
nod = -0.4
cov2 = np.array([[1, nod, nod], [nod, 1.0, nod], [nod, nod, 1.0]])
cov2 *= 0.05
cov3 = np.identity(3)
cov3 *= 0.05
samples = 500
for i, cov in enumerate([cov1, cov2, cov3]):
dat = pe.misc.gen_correlated_data(np.arange(1, 4), cov, 'test', 0.5, samples=samples)
[o.gamma_method(S=0) for o in dat];
func = lambda a, x: a[0] + a[1] * x
fr = pe.least_squares(np.arange(1, 4), dat, func)
fr.gamma_method(S=0)
jd = np.array([o.export_jackknife() for o in dat]).T
jfr = []
for jacks in jd:
def chisqfunc_residuals(p):
model = func(p, np.arange(1, 4))
chisq = ((jacks - model) / [o.dvalue for o in dat])
return chisq
tf = scipy.optimize.least_squares(chisqfunc_residuals, [0.0, 0.0], method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
jfr.append(tf.x)
ajfr = np.array(jfr).T
err = np.array([np.sqrt(np.var(ajfr[j][1:], ddof=0) * (samples - 1)) for j in range(2)])
assert np.allclose(err, [o.dvalue for o in fr], atol=1e-8)
def test_correlated_fit_vs_jackknife():
od = 0.999999
cov1 = np.array([[1, od, od], [od, 1.0, od], [od, od, 1.0]])
cov1 *= 0.1
nod = -0.44
cov2 = np.array([[1, nod, nod], [nod, 1.0, nod], [nod, nod, 1.0]])
cov2 *= 0.1
cov3 = np.identity(3)
cov3 *= 0.01
samples = 250
x_val = np.arange(1, 6, 2)
for i, cov in enumerate([cov1, cov2, cov3]):
dat = pe.misc.gen_correlated_data(x_val + x_val ** 2 + np.random.normal(0.0, 0.1, 3), cov, 'test', 0.5, samples=samples)
[o.gamma_method(S=0) for o in dat];
dat
func = lambda a, x: a[0] * x + a[1] * x ** 2
fr = pe.least_squares(x_val, dat, func, correlated_fit=True, silent=True)
[o.gamma_method(S=0) for o in fr]
cov = pe.covariance(dat)
chol = np.linalg.cholesky(cov)
chol_inv = np.linalg.inv(chol)
jd = np.array([o.export_jackknife() for o in dat]).T
jfr = []
for jacks in jd:
def chisqfunc_residuals(p):
model = func(p, x_val)
chisq = np.dot(chol_inv, (jacks - model))
return chisq
tf = scipy.optimize.least_squares(chisqfunc_residuals, [0.0, 0.0], method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
jfr.append(tf.x)
ajfr = np.array(jfr).T
err = np.array([np.sqrt(np.var(ajfr[j][1:], ddof=0) * (samples - 1)) for j in range(2)])
assert np.allclose(err, [o.dvalue for o in fr], atol=1e-7)
assert np.allclose(ajfr.T[0], [o.value for o in fr], atol=1e-8)
def test_fit_no_autograd():
dim = 10
x = np.arange(dim)

View file

@ -354,3 +354,55 @@ def test_dobsio():
if isinstance(ol[i], pe.Obs):
for name in ol[i].r_values:
assert(np.isclose(ol[i].r_values[name], rl[i].r_values[name]))
def test_reconstruct_non_linear_r_obs(tmp_path):
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
to = np.log(to ** 2) / to
to.dump((tmp_path / "test_equality").as_posix())
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
assert assert_equal_Obs(to, ro)
def test_reconstruct_non_linear_r_obs_list(tmp_path):
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
to = np.log(to ** 2) / to
for to_list in [[to, to, to], np.array([to, to, to])]:
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())
ro_list = pe.input.json.load_json((tmp_path / "test_equality_list").as_posix())
for oa, ob in zip(to_list, ro_list):
assert assert_equal_Obs(oa, ob)
def assert_equal_Obs(to, ro):
for kw in ["N", "cov_names", "covobs", "ddvalue", "dvalue", "e_content",
"e_names", "idl", "mc_names", "names",
"reweighted", "shape", "tag"]:
if not getattr(to, kw) == getattr(ro, kw):
print(kw, "does not match.")
return False
for kw in ["value"]:
if not np.isclose(getattr(to, kw), getattr(ro, kw), atol=1e-14):
print(kw, "does not match.")
return False
for kw in ["r_values", "deltas"]:
for (k, v), (k2, v2) in zip(getattr(to, kw).items(), getattr(ro, kw).items()):
assert k == k2
if not np.allclose(v, v2, atol=1e-14):
print(kw, "does not match.")
return False
m_to = getattr(to, "is_merged")
m_ro = getattr(ro, "is_merged")
if not m_to == m_ro:
if not (all(value is False for value in m_ro.values()) and all(value is False for value in m_to.values())):
print("is_merged", "does not match.")
return False
return True

View file

@ -1,6 +1,7 @@
import autograd.numpy as np
import os
import copy
import matplotlib.pyplot as plt
import pyerrors as pe
import pytest
@ -56,6 +57,7 @@ def test_Obs_exceptions():
one.gamma_method()
with pytest.raises(Exception):
one.plot_piechart()
plt.close('all')
def test_dump():
value = np.random.normal(5, 10)
@ -368,6 +370,7 @@ def test_utils():
assert my_obs < (my_obs + 1)
float(my_obs)
str(my_obs)
plt.close('all')
def test_cobs():
@ -515,6 +518,35 @@ def test_merge_idx():
assert pe.obs._merge_idx([range(500, 6050, 50), range(500, 6250, 250)]) == range(500, 6250, 50)
def test_intersection_idx():
assert pe.obs._intersection_idx([range(1, 100), range(1, 100), range(1, 100)]) == range(1, 100)
assert pe.obs._intersection_idx([range(1, 100, 10), range(1, 100, 2)]) == range(1, 100, 10)
assert pe.obs._intersection_idx([range(10, 1010, 10), range(10, 1010, 50)]) == range(10, 1010, 50)
assert pe.obs._intersection_idx([range(500, 6050, 50), range(500, 6250, 250)]) == range(500, 6050, 250)
for ids in [[list(range(1, 80, 3)), list(range(1, 100, 2))], [range(1, 80, 3), range(1, 100, 2), range(1, 100, 7)]]:
assert list(pe.obs._intersection_idx(ids)) == pe.obs._intersection_idx([list(o) for o in ids])
def test_merge_intersection():
for idl_list in [[range(1, 100), range(1, 100), range(1, 100)],
[range(4, 80, 6), range(4, 80, 6)],
[[0, 2, 8, 19, 205], [0, 2, 8, 19, 205]]]:
assert pe.obs._merge_idx(idl_list) == pe.obs._intersection_idx(idl_list)
def test_intersection_collapse():
range1 = range(1, 2000, 2)
range2 = range(2, 2001, 8)
obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1])
obs_merge = obs1 + pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2])
intersection = pe.obs._intersection_idx([o.idl["ens"] for o in [obs1, obs_merge]])
coll = pe.obs._collapse_deltas_for_merge(obs_merge.deltas["ens"], obs_merge.idl["ens"], len(obs_merge.idl["ens"]), range1)
assert np.all(coll == obs1.deltas["ens"])
def test_irregular_error_propagation():
obs_list = [pe.Obs([np.random.rand(100)], ['t']),
pe.Obs([np.random.rand(50)], ['t'], idl=[range(1, 100, 2)]),
@ -619,6 +651,26 @@ def test_covariance_is_variance():
assert np.isclose(test_obs.dvalue ** 2, pe.covariance([test_obs, test_obs])[0, 1])
def test_covariance_vs_numpy():
N = 1078
data1 = np.random.normal(2.5, 0.2, N)
data2 = np.random.normal(0.5, 0.08, N)
data3 = np.random.normal(-178, 5, N)
uncorr = np.row_stack([data1, data2, data3])
corr = np.random.multivariate_normal([0.0, 17, -0.0487], [[1.0, 0.6, -0.22], [0.6, 0.8, 0.01], [-0.22, 0.01, 1.9]], N).T
for X in [uncorr, corr]:
obs1 = pe.Obs([X[0]], ["ens1"])
obs2 = pe.Obs([X[1]], ["ens1"])
obs3 = pe.Obs([X[2]], ["ens1"])
obs1.gamma_method(S=0.0)
obs2.gamma_method(S=0.0)
obs3.gamma_method(S=0.0)
pe_cov = pe.covariance([obs1, obs2, obs3])
np_cov = np.cov(X) / N
assert np.allclose(pe_cov, np_cov, atol=1e-14)
def test_covariance_symmetry():
value1 = np.random.normal(5, 10)
dvalue1 = np.abs(np.random.normal(0, 1))
@ -687,6 +739,23 @@ def test_covariance_factorizing():
assert np.isclose(pe.covariance([mt0, tt[1]])[0, 1], -pe.covariance(tt)[0, 1])
def test_covariance_smooth_eigenvalues():
for c_coeff in range(0, 14, 2):
length = 14
sm = 5
t_fac = 1.5
tt = pe.misc.gen_correlated_data(np.zeros(length), 1 - 0.1 ** c_coeff * np.ones((length, length)) + 0.1 ** c_coeff * np.diag(np.ones(length)), 'test', tau=0.5 + t_fac * np.random.rand(length), samples=200)
[o.gamma_method() for o in tt]
full_corr = pe.covariance(tt, correlation=True)
cov = pe.covariance(tt, smooth=sm, correlation=True)
full_evals = np.linalg.eigh(full_corr)[0]
sm_length = np.where(full_evals < np.mean(full_evals[:-sm]))[0][-1]
evals = np.linalg.eigh(cov)[0]
assert np.all(np.isclose(evals[:sm_length], evals[0], atol=1e-8))
def test_covariance_alternation():
length = 12
t_fac = 2.5
@ -729,6 +798,86 @@ def test_covariance_idl():
pe.covariance([obs1, obs2])
def test_correlation_intersection_of_idls():
range1 = range(1, 2000, 2)
range2 = range(2, 2001, 2)
obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1])
obs2_a = 0.4 * pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1]) + 0.6 * obs1
obs1.gamma_method()
obs2_a.gamma_method()
cov1 = pe.covariance([obs1, obs2_a])
corr1 = pe.covariance([obs1, obs2_a], correlation=True)
obs2_b = obs2_a + pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2])
obs2_b.gamma_method()
cov2 = pe.covariance([obs1, obs2_b])
corr2 = pe.covariance([obs1, obs2_b], correlation=True)
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
assert cov1[0, 1] > cov2[0, 1]
obs2_c = pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2])
obs2_c.gamma_method()
assert np.isclose(0, pe.covariance([obs1, obs2_c])[0, 1], atol=1e-14)
def test_covariance_non_identical_objects():
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"])
obs1.gamma_method()
obs2 = obs1 + 1e-18
obs2.gamma_method()
assert obs1 == obs2
assert obs1 is not obs2
assert np.allclose(np.ones((2, 2)), pe.covariance([obs1, obs2], correlation=True), atol=1e-14)
def test_covariance_additional_non_overlapping_data():
range1 = range(1, 20, 2)
data2 = np.random.normal(0.0, 0.1, len(range1))
obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1])
obs2_a = pe.Obs([data2], ["ens"], idl=[range1])
obs1.gamma_method()
obs2_a.gamma_method()
corr1 = pe.covariance([obs1, obs2_a], correlation=True)
added_data = np.random.normal(0.0, 0.1, len(range1))
added_data -= np.mean(added_data) - np.mean(data2)
data2_extended = np.ravel([data2, added_data], 'F')
obs2_b = pe.Obs([data2_extended], ["ens"])
obs2_b.gamma_method()
corr2 = pe.covariance([obs1, obs2_b], correlation=True)
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
def test_coavariance_reorder_non_overlapping_data():
range1 = range(1, 20, 2)
range2 = range(1, 41, 2)
obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1])
obs2_b = pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2])
obs1.gamma_method()
obs2_b.gamma_method()
corr1 = pe.covariance([obs1, obs2_b], correlation=True)
deltas = list(obs2_b.deltas['ens'][:len(range1)]) + sorted(obs2_b.deltas['ens'][len(range1):])
obs2_a = pe.Obs([obs2_b.value + np.array(deltas)], ["ens"], idl=[range2])
obs2_a.gamma_method()
corr2 = pe.covariance([obs1, obs2_a], correlation=True)
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
def test_empty_obs():
o = pe.Obs([np.random.rand(100)], ['test'])
q = o + pe.Obs([], [], means=[])