From eedad7dedacbe952900a96b5019201f55fb6e84e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 25 Oct 2021 21:59:12 +0100 Subject: [PATCH] docstrings and tests added to linalg module --- pyerrors/linalg.py | 28 ++++++++++++---------------- tests/test_linalg.py | 10 ++++++++-- tests/test_roots.py | 2 +- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/pyerrors/linalg.py b/pyerrors/linalg.py index 4d2aea61..0f08d72b 100644 --- a/pyerrors/linalg.py +++ b/pyerrors/linalg.py @@ -6,15 +6,13 @@ from autograd import jacobian import autograd.numpy as anp # Thinly-wrapped numpy from .pyerrors import derived_observable, CObs, Obs - -# This code block is directly taken from the current master branch of autograd and remains -# only until the new version is released on PyPi from functools import partial from autograd.extend import defvjp def derived_array(func, data, **kwargs): - """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. + """Construct a derived Obs according to func(data, **kwargs) of matrix value data + using automatic differentiation. Parameters ---------- @@ -25,20 +23,9 @@ def derived_array(func, data, **kwargs): Keyword arguments ----------------- - num_grad -- if True, numerical derivatives are used instead of autograd - (default False). To control the numerical differentiation the - kwargs of numdifftools.step_generators.MaxStepGenerator - can be used. man_grad -- manually supply a list or an array which contains the jacobian of func. Use cautiously, supplying the wrong derivative will not be intercepted. - - Notes - ----- - For simple mathematical operations it can be practical to use anonymous - functions. For the ratio of two observables one can e.g. use - - new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) """ data = np.asarray(data) @@ -125,6 +112,11 @@ def derived_array(func, data, **kwargs): def matmul(*operands): + """Matrix multiply all operands. + + Supports real and complex valued matrices and is faster compared to + standard multiplication via the @ operator. + """ if any(isinstance(o[0, 0], CObs) for o in operands): extended_operands = [] for op in operands: @@ -171,10 +163,12 @@ def matmul(*operands): def inv(x): + """Inverse of Obs or CObs valued matrices.""" return _mat_mat_op(anp.linalg.inv, x) def cholesky(x): + """Cholesky decompostion of Obs or CObs valued matrices.""" return _mat_mat_op(anp.linalg.cholesky, x) @@ -270,7 +264,7 @@ def svd(obs, **kwargs): return (u, s, vh) -def slog_det(obs, **kwargs): +def slogdet(obs, **kwargs): """Computes the determinant of a matrix of Obs via np.linalg.slogdet.""" def _mat(x): dim = int(np.sqrt(len(x))) @@ -501,6 +495,8 @@ def _num_diff_svd(obs, **kwargs): return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1])) +# This code block is directly taken from the current master branch of autograd and remains +# only until the new version is released on PyPi _dot = partial(anp.einsum, '...ij,...jk->...ik') diff --git a/tests/test_linalg.py b/tests/test_linalg.py index c441426d..6e3455f6 100644 --- a/tests/test_linalg.py +++ b/tests/test_linalg.py @@ -87,8 +87,7 @@ def test_complex_matrix_inverse(): def test_matrix_functions(): - dim = 3 + int(4 * np.random.rand()) - print(dim) + dim = 4 matrix = [] for i in range(dim): row = [] @@ -125,6 +124,13 @@ def test_matrix_functions(): for j in range(dim): assert tmp[j].is_zero() + # Check svd + u, v, vh = pe.linalg.svd(sym) + diff = sym - u @ np.diag(v) @ vh + + for (i, j), entry in np.ndenumerate(diff): + assert entry.is_zero() + def test_complex_matrix_operations(): dimension = 4 diff --git a/tests/test_roots.py b/tests/test_roots.py index 8d3a8191..d7c4ed1f 100644 --- a/tests/test_roots.py +++ b/tests/test_roots.py @@ -16,4 +16,4 @@ def test_root_linear(): assert np.isclose(my_root.value, value) difference = my_obs - my_root - assert np.allclose(0.0, difference.deltas['t']) + assert difference.is_zero()