diff --git a/pyerrors/input/__init__.py b/pyerrors/input/__init__.py index 3b585614..e8cfff08 100644 --- a/pyerrors/input/__init__.py +++ b/pyerrors/input/__init__.py @@ -10,4 +10,5 @@ from . import hadrons from . import json from . import misc from . import openQCD +from . import pandas from . import sfcf diff --git a/pyerrors/input/pandas.py b/pyerrors/input/pandas.py new file mode 100644 index 00000000..caf3e0b6 --- /dev/null +++ b/pyerrors/input/pandas.py @@ -0,0 +1,75 @@ +import warnings +import gzip +import pandas as pd +from ..obs import Obs +from ..correlators import Corr +from .json import create_json_string, import_json_string + + +def dump_df(df, fname, gz=True): + """Exports a pandas DataFrame containing Obs valued columns to a (gzipped) csv file. + + Before making use of pandas to_csv functionality Obs objects are serialized via the standardized + json format of pyerrors. + + Parameters + ---------- + df : pandas.DataFrame + Dataframe to be dumped to a file. + fname : str + Filename of the output file. + gz : bool + If True, the output is a gzipped csv file. If False, the output is a csv file. + """ + + out = df.copy() + for column in out: + if isinstance(out[column][0], (Obs, Corr)): + out[column] = out[column].transform(lambda x: create_json_string(x, indent=0)) + + if not fname.endswith('.csv'): + fname += '.csv' + + if gz is True: + if not fname.endswith('.gz'): + fname += '.gz' + out.to_csv(fname, index=False, compression='gzip') + else: + out.to_csv(fname, index=False) + + +def load_df(fname, auto_gamma=False, gz=True): + """Imports a pandas DataFrame from a csv.(gz) file in which Obs objects are serialized as json strings. + + Parameters + ---------- + fname : str + Filename of the input file. + auto_gamma : bool + If True applies the gamma_method to all imported Obs objects with the default parameters for + the error analysis. Default False. + gz : bool + If True, assumes that data is gzipped. If False, assumes JSON file. + """ + + if not fname.endswith('.csv') and not fname.endswith('.gz'): + fname += '.csv' + + if gz is True: + if not fname.endswith('.gz'): + fname += '.gz' + with gzip.open(fname) as f: + re_import = pd.read_csv(f) + else: + if fname.endswith('.gz'): + warnings.warn("Trying to read from %s without unzipping!" % fname, UserWarning) + re_import = pd.read_csv(fname) + + for column in re_import.select_dtypes(include="object"): + if isinstance(re_import[column][0], str): + if re_import[column][0][:20] == '{"program":"pyerrors': + re_import[column] = re_import[column].transform(lambda x: import_json_string(x, verbose=False)) + if auto_gamma is True: + re_import[column].apply(lambda x: x.gamma_method()) + + return re_import diff --git a/setup.py b/setup.py index 0c00aad5..33bde5bc 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ setup(name='pyerrors', license="MIT", packages=find_packages(), python_requires='>=3.6.0', - install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5py>=3', 'lxml>=4', 'python-rapidjson>=1'], + install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5py>=3', 'lxml>=4', 'python-rapidjson>=1', 'pandas>=1.1'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', diff --git a/tests/fits_test.py b/tests/fits_test.py index c578a86d..013239f5 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -61,7 +61,6 @@ def test_least_squares(): beta[i].gamma_method(S=1.0) assert math.isclose(beta[i].value, popt[i], abs_tol=1e-5) assert math.isclose(pcov[i, i], beta[i].dvalue ** 2, abs_tol=1e-3) - assert math.isclose(pe.covariance([beta[0], beta[1]])[0, 1], pcov[0, 1], abs_tol=1e-3) chi2_pyerrors = np.sum(((f(x, *[o.value for o in beta]) - y) / yerr) ** 2) / (len(x) - 2) chi2_scipy = np.sum(((f(x, *popt) - y) / yerr) ** 2) / (len(x) - 2) @@ -82,7 +81,6 @@ def test_least_squares(): betac[i].gamma_method(S=1.0) assert math.isclose(betac[i].value, popt[i], abs_tol=1e-5) assert math.isclose(pcov[i, i], betac[i].dvalue ** 2, abs_tol=1e-3) - assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], pcov[0, 1], abs_tol=1e-3) def test_alternative_solvers(): @@ -243,7 +241,6 @@ def test_total_least_squares(): beta[i].gamma_method(S=1.0) assert math.isclose(beta[i].value, output.beta[i], rel_tol=1e-5) assert math.isclose(output.cov_beta[i, i], beta[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(beta[i].dvalue ** 2) - assert math.isclose(pe.covariance([beta[0], beta[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1) out = pe.total_least_squares(ox, oy, func, const_par=[beta[1]]) @@ -266,7 +263,6 @@ def test_total_least_squares(): betac[i].gamma_method(S=1.0) assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3) assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2) - assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1) outc = pe.total_least_squares(oxc, oyc, func, const_par=[betac[1]]) @@ -281,7 +277,6 @@ def test_total_least_squares(): betac[i].gamma_method(S=1.0) assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3) assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2) - assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1) outc = pe.total_least_squares(oxc, oy, func, const_par=[betac[1]]) diff --git a/tests/pandas_test.py b/tests/pandas_test.py new file mode 100644 index 00000000..658f4375 --- /dev/null +++ b/tests/pandas_test.py @@ -0,0 +1,30 @@ +import numpy as np +import pandas as pd +import pyerrors as pe + +def test_df_export_import(tmp_path): + my_dict = {"int": 1, + "float": -0.01, + "Obs1": pe.pseudo_Obs(87, 21, "test_ensemble"), + "Obs2": pe.pseudo_Obs(-87, 21, "test_ensemble2")} + for gz in [True, False]: + my_df = pd.DataFrame([my_dict] * 10) + + pe.input.pandas.dump_df(my_df, (tmp_path / 'df_output').as_posix(), gz=gz) + reconstructed_df = pe.input.pandas.load_df((tmp_path / 'df_output').as_posix(), auto_gamma=True, gz=gz) + assert np.all(my_df == reconstructed_df) + + pe.input.pandas.load_df((tmp_path / 'df_output.csv').as_posix(), gz=gz) + + +def test_df_Corr(tmp_path): + + my_corr = pe.Corr([pe.pseudo_Obs(-0.48, 0.04, "test"), pe.pseudo_Obs(-0.154, 0.03, "test")]) + + my_dict = {"int": 1, + "float": -0.01, + "Corr": my_corr} + my_df = pd.DataFrame([my_dict] * 5) + + pe.input.pandas.dump_df(my_df, (tmp_path / 'df_output').as_posix()) + reconstructed_df = pe.input.pandas.load_df((tmp_path / 'df_output').as_posix(), auto_gamma=True)