From 359c9c06daa97e8eaee93c22c7f93538b7a1cc02 Mon Sep 17 00:00:00 2001 From: Simon Kuberski Date: Tue, 30 Nov 2021 13:32:50 +0100 Subject: [PATCH] Bugfix for covobs, fit tests for covobs added --- pyerrors/covobs.py | 2 +- pyerrors/obs.py | 32 ++++++++++++++--------------- tests/fits_test.py | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 17 deletions(-) diff --git a/pyerrors/covobs.py b/pyerrors/covobs.py index 87b0526d..3615fe5c 100644 --- a/pyerrors/covobs.py +++ b/pyerrors/covobs.py @@ -41,7 +41,7 @@ class Covobs: raise Exception('Have to specify position of cov-element belonging to mean!') else: if pos > self.N: - raise Exception('pos %d too large for covariance matrix with dimension %dx%d!' % (pos, self.N, self.N)) + raise Exception('pos %d too large for covariance matrix with dimension %dx%d!' % (pos, self.N, self.N)) self.grad = np.zeros((self.N, 1)) self.grad[pos] = 1. else: diff --git a/pyerrors/obs.py b/pyerrors/obs.py index 38a72a1b..d0a66ab2 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -80,7 +80,7 @@ class Obs: raise TypeError('All names have to be strings.') if min(len(x) for x in samples) <= 4: raise Exception('Samples have to have at least 5 entries.') - + if names: self.names = sorted(names) else: @@ -324,19 +324,19 @@ class Obs: self.e_windowsize[e_name] = n break - self._dvalue += self.e_dvalue[e_name] ** 2 - self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + self._dvalue += self.e_dvalue[e_name] ** 2 + self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 for e_name in self.cov_names: self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) self.e_ddvalue[e_name] = 0 self._dvalue += self.e_dvalue[e_name]**2 - self._dvalue = np.sqrt(self.dvalue) + self._dvalue = np.sqrt(self._dvalue) if self._dvalue == 0.0: self.ddvalue = 0.0 else: - self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue + self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue return def _calc_gamma(self, deltas, idx, shape, w_max, fft): @@ -388,8 +388,8 @@ class Obs: if self.value == 0.0: percentage = np.nan else: - percentage = np.abs(self.dvalue / self.value) * 100 - print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue, self.ddvalue, percentage)) + percentage = np.abs(self._dvalue / self.value) * 100 + print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) if len(self.e_names) > 1: print(' Ensemble errors:') for e_name in self.mc_names: @@ -447,7 +447,7 @@ class Obs: Works only properly when the gamma method was run. """ - return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue + return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue def is_zero(self, rtol=1.e-5, atol=1.e-8): """Checks whether the observable is zero within a given tolerance. @@ -575,10 +575,10 @@ class Obs: ensemble to the error and returns a dictionary containing the fractions.""" if not hasattr(self, 'e_dvalue'): raise Exception('Run the gamma method first.') - if self.dvalue == 0.0: + if self._dvalue == 0.0: raise Exception('Error is 0.0') labels = self.e_names - sizes = [i ** 2 for i in list(self.e_dvalue.values())] / self.dvalue ** 2 + sizes = [i ** 2 for i in list(self.e_dvalue.values())] / self._dvalue ** 2 fig1, ax1 = plt.subplots() ax1.pie(sizes, labels=labels, startangle=90, normalize=True) ax1.axis('equal') @@ -636,15 +636,15 @@ class Obs: return 'Obs[' + str(self) + ']' def __str__(self): - if self.dvalue == 0.0: + if self._dvalue == 0.0: return str(self.value) - fexp = np.floor(np.log10(self.dvalue)) + fexp = np.floor(np.log10(self._dvalue)) if fexp < 0.0: - return '{:{form}}({:2.0f})'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') + return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') elif fexp == 0.0: - return '{:.1f}({:1.1f})'.format(self.value, self.dvalue) + return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) else: - return '{:.0f}({:2.0f})'.format(self.value, self.dvalue) + return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) # Overload comparisons def __lt__(self, other): @@ -1151,7 +1151,7 @@ def derived_observable(func, data, **kwargs): else: new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) - new_covobs = {name: Covobs(obs.covobs[name].value, obs.covobs[name].cov, obs.covobs[name].name, grad=new_grad[name]) for name in new_grad} + new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} new_samples = [] new_means = [] diff --git a/tests/fits_test.py b/tests/fits_test.py index bdce9952..45e98e35 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -46,6 +46,19 @@ def test_least_squares(): assert((out.fit_parameters[0] - beta[0]).is_zero()) assert((out.fit_parameters[1] - beta[1]).is_zero()) + oyc = [] + for i, item in enumerate(x): + oyc.append(pe.cov_Obs(y[i], yerr[i]**2, 'cov' + str(i))) + + outc = pe.least_squares(x, oyc, func) + betac = outc.fit_parameters + + for i in range(2): + betac[i].gamma_method(S=1.0) + assert math.isclose(betac[i].value, popt[i], abs_tol=1e-5) + assert math.isclose(pcov[i, i], betac[i].dvalue ** 2, abs_tol=1e-3) + assert math.isclose(pe.covariance(betac[0], betac[1]), pcov[0, 1], abs_tol=1e-3) + num_samples = 400 N = 10 @@ -135,6 +148,44 @@ def test_total_least_squares(): assert(diff / beta[0] < 1e-3 * beta[0].dvalue) assert((out.fit_parameters[1] - beta[1]).is_zero()) + oxc = [] + for i, item in enumerate(x): + oxc.append(pe.cov_Obs(x[i], xerr[i]**2, 'covx' + str(i))) + + oyc = [] + for i, item in enumerate(x): + oyc.append(pe.cov_Obs(y[i], yerr[i]**2, 'covy' + str(i))) + + outc = pe.total_least_squares(oxc, oyc, func) + betac = outc.fit_parameters + + for i in range(2): + betac[i].gamma_method(S=1.0) + assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3) + assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2) + assert math.isclose(pe.covariance(betac[0], betac[1]), output.cov_beta[0, 1], rel_tol=2.5e-1) + + outc = pe.total_least_squares(oxc, oyc, func, const_par=[betac[1]]) + + diffc = outc.fit_parameters[0] - betac[0] + assert(diffc / betac[0] < 1e-3 * betac[0].dvalue) + assert((outc.fit_parameters[1] - betac[1]).is_zero()) + + outc = pe.total_least_squares(oxc, oy, func) + betac = outc.fit_parameters + + for i in range(2): + betac[i].gamma_method(S=1.0) + assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3) + assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2) + assert math.isclose(pe.covariance(betac[0], betac[1]), output.cov_beta[0, 1], rel_tol=2.5e-1) + + outc = pe.total_least_squares(oxc, oy, func, const_par=[betac[1]]) + + diffc = outc.fit_parameters[0] - betac[0] + assert(diffc / betac[0] < 1e-3 * betac[0].dvalue) + assert((outc.fit_parameters[1] - betac[1]).is_zero()) + def test_odr_derivatives(): x = []