Merge branch 'develop' into feature/eliminate_derived_array

This commit is contained in:
Fabian Joswig 2021-12-06 15:30:14 +00:00
commit 3ae6959bac
6 changed files with 72 additions and 22 deletions

View file

@ -301,8 +301,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
result = []
for i in range(n_parms):
result.append(derived_observable(lambda x, **kwargs: x[0], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
result[-1]._value = out.beta[i]
result.append(derived_observable(lambda my_var, **kwargs: my_var[0] / x.ravel()[0].value * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
output.fit_parameters = result + const_par
@ -419,8 +418,7 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs):
result = []
for i in range(n_parms):
result.append(derived_observable(lambda x, **kwargs: x[0], list(y) + list(loc_priors), man_grad=list(deriv[i])))
result[-1]._value = params[i]
result.append(derived_observable(lambda x, **kwargs: x[0] / y[0].value * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i])))
output.fit_parameters = result
output.chisquare = chisqfunc(np.asarray(params))
@ -614,8 +612,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
result = []
for i in range(n_parms):
result.append(derived_observable(lambda x, **kwargs: x[0], list(y), man_grad=list(deriv[i])))
result[-1]._value = fit_result.x[i]
result.append(derived_observable(lambda x, **kwargs: x[0] / y[0].value * fit_result.x[i], list(y), man_grad=list(deriv[i])))
output.fit_parameters = result + const_par

View file

@ -74,10 +74,15 @@ class Obs:
if idl is not None:
if len(idl) != len(names):
raise Exception('Length of idl incompatible with samples and names.')
if len(names) != len(set(names)):
raise Exception('names are not unique.')
if not all(isinstance(x, str) for x in names):
raise TypeError('All names have to be strings.')
name_length = len(names)
if name_length > 1:
if name_length != len(set(names)):
raise Exception('names are not unique.')
if not all(isinstance(x, str) for x in names):
raise TypeError('All names have to be strings.')
else:
if not isinstance(names[0], str):
raise TypeError('All names have to be strings.')
if min(len(x) for x in samples) <= 4:
raise Exception('Samples have to have at least 5 entries.')
@ -623,9 +628,9 @@ class Obs:
name = self.names[0]
full_data = self.deltas[name] + self.r_values[name]
n = full_data.size
mean = np.mean(full_data)
mean = self.value
tmp_jacks = np.zeros(n + 1)
tmp_jacks[0] = self.value
tmp_jacks[0] = mean
tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
return tmp_jacks
@ -1259,22 +1264,22 @@ def reweight(weight, obs, **kwargs):
for i in range(len(obs)):
if len(obs[i].cov_names):
raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
if sorted(weight.names) != sorted(obs[i].names):
if not set(obs[i].names).issubset(weight.names):
raise Exception('Error: Ensembles do not fit')
for name in weight.names:
for name in obs[i].names:
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
new_samples = []
w_deltas = {}
for name in sorted(weight.names):
for name in sorted(obs[i].names):
w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
tmp_obs = Obs(new_samples, sorted(weight.names), idl=[obs[i].idl[name] for name in sorted(weight.names)])
tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
if kwargs.get('all_configs'):
new_weight = weight
else:
new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(weight.names)], sorted(weight.names), idl=[obs[i].idl[name] for name in sorted(weight.names)])
new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
result.append(derived_observable(lambda x, **kwargs: x[0] / x[1], [tmp_obs, new_weight], **kwargs))
result[-1].reweighted = True

View file

@ -33,6 +33,5 @@ def find_root(d, func, guess=1.0, **kwargs):
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
deriv = - da / dx
res = derived_observable(lambda x, **kwargs: x[0], [d], man_grad=[deriv])
res._value = root[0]
res = derived_observable(lambda x, **kwargs: x[0] / d.value * root[0], [d], man_grad=[deriv])
return res