Merge branch 'develop' into documentation

This commit is contained in:
fjosw 2021-12-07 08:31:54 +00:00
commit 78435f533f
4 changed files with 6 additions and 69 deletions

1
.gitignore vendored
View file

@ -8,3 +8,4 @@ examples/B1k2_pcac_plateau.p
examples/Untitled.* examples/Untitled.*
core.* core.*
*.swp *.swp
htmlcov

View file

@ -678,7 +678,7 @@ def qqplot(x, o_y, func, p):
plt.xlabel('Theoretical quantiles') plt.xlabel('Theoretical quantiles')
plt.ylabel('Ordered Values') plt.ylabel('Ordered Values')
plt.legend() plt.legend()
plt.show() plt.draw()
def residual_plot(x, y, func, fit_res): def residual_plot(x, y, func, fit_res):
@ -707,7 +707,7 @@ def residual_plot(x, y, func, fit_res):
ax1.set_xlim([xstart, xstop]) ax1.set_xlim([xstart, xstop])
ax1.set_ylabel('Residuals') ax1.set_ylabel('Residuals')
plt.subplots_adjust(wspace=None, hspace=None) plt.subplots_adjust(wspace=None, hspace=None)
plt.show() plt.draw()
def covariance_matrix(y): def covariance_matrix(y):
@ -782,7 +782,7 @@ def ks_test(obs=None):
loc_max_diff = np.argmax(np.abs(diffs)) loc_max_diff = np.argmax(np.abs(diffs))
loc = Xs[loc_max_diff] loc = Xs[loc_max_diff]
plt.annotate(s='', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) plt.annotate(s='', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
plt.show() plt.draw()
print(scipy.stats.kstest(Qs, 'uniform')) print(scipy.stats.kstest(Qs, 'uniform'))

View file

@ -1546,70 +1546,6 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
return dvalue return dvalue
def covariance3(obs1, obs2, correlation=False, **kwargs):
"""Another alternative implementation of the covariance of two observables.
covariance2(obs, obs) is equal to obs.dvalue ** 2
Currently only works if ensembles are identical.
The gamma method has to be applied first to both observables.
If abs(covariance2(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
is constrained to the maximum value in order to make sure that covariance
matrices are positive semidefinite.
Keyword arguments
-----------------
correlation -- if true the correlation instead of the covariance is
returned (default False)
plot -- if true, the integrated autocorrelation time for each ensemble is
plotted.
"""
for name in sorted(set(obs1.names + obs2.names)):
if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None):
raise Exception('Shapes of ensemble', name, 'do not fit')
if (1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], _merge_idx([obs1.idl[name], obs2.idl[name]])]]))):
raise Exception('Shapes of ensemble', name, 'do not fit')
if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
raise Exception('The gamma method has to be applied to both Obs first.')
tau_exp = []
S = []
for e_name in sorted(set(obs1.e_names + obs2.e_names)):
t_1 = obs1.tau_exp.get(e_name)
t_2 = obs2.tau_exp.get(e_name)
if t_1 is None:
t_1 = 0
if t_2 is None:
t_2 = 0
tau_exp.append(max(t_1, t_2))
S_1 = obs1.S.get(e_name)
S_2 = obs2.S.get(e_name)
if S_1 is None:
S_1 = Obs.S_global
if S_2 is None:
S_2 = Obs.S_global
S.append(max(S_1, S_2))
check_obs = obs1 + obs2
check_obs.gamma_method(tau_exp=tau_exp, S=S)
if kwargs.get('plot'):
check_obs.plot_tauint()
check_obs.plot_rho()
cov = (check_obs.dvalue ** 2 - obs1.dvalue ** 2 - obs2.dvalue ** 2) / 2
if np.abs(cov / obs1.dvalue / obs2.dvalue) > 1.0:
cov = np.sign(cov) * obs1.dvalue * obs2.dvalue
if correlation:
cov = cov / obs1.dvalue / obs2.dvalue
return cov
def pseudo_Obs(value, dvalue, name, samples=1000): def pseudo_Obs(value, dvalue, name, samples=1000):
"""Generate a pseudo Obs with given value, dvalue and name """Generate a pseudo Obs with given value, dvalue and name

View file

@ -29,7 +29,7 @@ def test_least_squares():
y = a[0] * np.exp(-a[1] * x) y = a[0] * np.exp(-a[1] * x)
return y return y
out = pe.least_squares(x, oy, func) out = pe.least_squares(x, oy, func, expected_chisquare=True, resplot=True, qqplot=True)
beta = out.fit_parameters beta = out.fit_parameters
for i in range(2): for i in range(2):
@ -133,7 +133,7 @@ def test_total_least_squares():
odr.set_job(fit_type=0, deriv=1) odr.set_job(fit_type=0, deriv=1)
output = odr.run() output = odr.run()
out = pe.total_least_squares(ox, oy, func) out = pe.total_least_squares(ox, oy, func, expected_chisquare=True)
beta = out.fit_parameters beta = out.fit_parameters
for i in range(2): for i in range(2):