From 3cf6bca02735b507ead339957a0038045f01c373 Mon Sep 17 00:00:00 2001 From: Tim <16023856+bayesfactor@users.noreply.github.com> Date: Tue, 10 Jul 2018 22:42:23 -0700 Subject: [PATCH 01/65] improved parsing for PEIS files I found some new column types and a new placement of the ole_timestamp for PEIS files --- galvani/BioLogic.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index e5bdb17..7fa4017 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -212,6 +212,10 @@ def VMPdata_dtype_from_colIDs(colIDs): dtype_dict['Capacitance discharge/µF'] = ' 40000 and ole_timestamp1 < 50000: ole_timestamp = ole_timestamp1 elif ole_timestamp2 > 40000 and ole_timestamp2 < 50000: ole_timestamp = ole_timestamp2 elif ole_timestamp3 > 40000 and ole_timestamp3 < 50000: ole_timestamp = ole_timestamp3 + elif ole_timestamp4 > 40000 and ole_timestamp4 < 50000: + ole_timestamp = ole_timestamp4 + else: raise ValueError("Could not find timestamp in the LOG module") From 42ff0d010e26d23da3461d591555dcabd6e92a33 Mon Sep 17 00:00:00 2001 From: bcolsen Date: Thu, 4 Oct 2018 15:17:40 -0600 Subject: [PATCH 02/65] added more column types --- galvani/BioLogic.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 7fa4017..a421539 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -186,6 +186,8 @@ def VMPdata_dtype_from_colIDs(colIDs): dtype_dict['(Q-Qo)/mA.h'] = ' Date: Thu, 4 Oct 2018 16:59:57 -0600 Subject: [PATCH 03/65] energy indexes --- galvani/BioLogic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index a421539..fa35210 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -208,6 +208,10 @@ def VMPdata_dtype_from_colIDs(colIDs): dtype_dict['I Range'] = ' Date: Sun, 10 Mar 2019 09:21:57 +0100 Subject: [PATCH 04/65] Changed res2sqlite to have a main() function --- scripts/res2sqlite.py | 74 ++++++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/scripts/res2sqlite.py b/scripts/res2sqlite.py index c42c16a..0234a7b 100755 --- a/scripts/res2sqlite.py +++ b/scripts/res2sqlite.py @@ -350,7 +350,7 @@ CREATE VIEW IF NOT EXISTS Capacity_View """ -def mdb_get_data_text(filename, table): +def mdb_get_data_text(s3db, filename, table): print("Reading %s..." % table) try: mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], @@ -373,7 +373,7 @@ def mdb_get_data_text(filename, table): mdb_sql.terminate() -def mdb_get_data_numeric(filename, table): +def mdb_get_data_numeric(s3db, filename, table): print("Reading %s..." % table) try: mdb_sql = sp.Popen(['mdb-export', filename, table], @@ -392,45 +392,55 @@ def mdb_get_data_numeric(filename, table): mdb_sql.terminate() -def mdb_get_data(filename, table): +def mdb_get_data(s3db, filename, table): if table in mdb_tables_text: - mdb_get_data_text(filename, table) + mdb_get_data_text(s3db, filename, table) elif table in mdb_tables_numeric: - mdb_get_data_numeric(filename, table) + mdb_get_data_numeric(s3db, filename, table) else: raise ValueError("'%s' is in neither mdb_tables_text nor mdb_tables_numeric" % table) -## Main part of the script +def convert_arbin_to_sqlite(input_file, output_file): + """Read data from an Arbin .res data file and write to a sqlite file. -parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export") -parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen -parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect - -args = parser.parse_args() - -s3db = sqlite3.connect(args.output_file) - - -for table in reversed(mdb_tables + mdb_5_23_tables): - s3db.execute('DROP TABLE IF EXISTS "%s";' % table) - -for table in mdb_tables: - s3db.executescript(mdb_create_scripts[table]) - mdb_get_data(args.input_file, table) - if table in mdb_create_indices: - print("Creating indices for %s..." % table) - s3db.executescript(mdb_create_indices[table]) - -if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"): - for table in mdb_5_23_tables: + Any data currently in the sqlite file will be erased! + """ + s3db = sqlite3.connect(output_file) + + + for table in reversed(mdb_tables + mdb_5_23_tables): + s3db.execute('DROP TABLE IF EXISTS "%s";' % table) + + for table in mdb_tables: s3db.executescript(mdb_create_scripts[table]) - mdb_get_data(args.input_file, table) + mdb_get_data(s3db, input_file, table) if table in mdb_create_indices: + print("Creating indices for %s..." % table) s3db.executescript(mdb_create_indices[table]) + + if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"): + for table in mdb_5_23_tables: + s3db.executescript(mdb_create_scripts[table]) + mdb_get_data(input_file, table) + if table in mdb_create_indices: + s3db.executescript(mdb_create_indices[table]) + + print("Creating helper table for capacity and energy totals...") + s3db.executescript(helper_table_script) + + print("Vacuuming database...") + s3db.executescript("VACUUM; ANALYZE;") -print("Creating helper table for capacity and energy totals...") -s3db.executescript(helper_table_script) -print("Vacuuming database...") -s3db.executescript("VACUUM; ANALYZE;") +def main(argv=None): + parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export") + parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen + parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect + + args = parser.parse_args(argv) + convert_arbin_to_sqlite(args.input_file, args.output_file) + + +if __name__ == '__main__': + main() From e5e75ff2f0131cf87d2e8616d9f8b3ce211236d7 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 09:28:09 +0100 Subject: [PATCH 05/65] Make res2sqlite.py an entry_point rather than a script --- {scripts => galvani}/res2sqlite.py | 0 setup.py | 5 ++++- 2 files changed, 4 insertions(+), 1 deletion(-) rename {scripts => galvani}/res2sqlite.py (100%) diff --git a/scripts/res2sqlite.py b/galvani/res2sqlite.py similarity index 100% rename from scripts/res2sqlite.py rename to galvani/res2sqlite.py diff --git a/setup.py b/setup.py index 40130ce..0d26328 100644 --- a/setup.py +++ b/setup.py @@ -8,6 +8,7 @@ setup( description='Open and process battery charger log data files', url='https://github.com/chatcannon/galvani', author='Chris Kerr', + author_email='chris.kerr@mykolab.ch', license='GPLv3+', classifiers=[ 'Development Status :: 3 - Alpha', @@ -16,6 +17,8 @@ setup( 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English'], packages=['galvani'], - scripts=['scripts/res2sqlite.py'], # TODO make this use entry_points + entry_points={'console_scripts': [ + 'res2sqlite = galvani.res2sqlite:main', + ]}, install_requires=['numpy'] ) From ef95863735d2d33e12e99322d0293e6765846901 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 09:51:34 +0100 Subject: [PATCH 06/65] Use README.md for long_description --- setup.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/setup.py b/setup.py index 0d26328..f1dd8b7 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,18 @@ # -*- coding: utf-8 -*- +import os.path + from setuptools import setup +with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: + readme = f.read() + setup( name='galvani', version='0.0.1a1', description='Open and process battery charger log data files', + long_description=readme, + long_description_content_type="text/markdown", url='https://github.com/chatcannon/galvani', author='Chris Kerr', author_email='chris.kerr@mykolab.ch', From bf24fbfa00c253a9ec5e5562229218b8ed3fb908 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 09:56:00 +0100 Subject: [PATCH 07/65] Minor formatting tweaks --- setup.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index f1dd8b7..1553c7b 100644 --- a/setup.py +++ b/setup.py @@ -22,10 +22,11 @@ setup( 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', - 'Natural Language :: English'], + 'Natural Language :: English', + ], packages=['galvani'], entry_points={'console_scripts': [ 'res2sqlite = galvani.res2sqlite:main', ]}, - install_requires=['numpy'] + install_requires=['numpy'], ) From d5f7cfd301ff8679d3f8eb0b3568fdcfff3b3c61 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 10:30:14 +0100 Subject: [PATCH 08/65] Add python 3.7 to tox.ini and travis.yml --- .travis.yml | 1 + tox.ini | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a3627fd..c77f38c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ cache: python: - "2.7" - "3.5" + - "3.7" install: - pip install tox-travis - sh get_testdata.sh diff --git a/tox.ini b/tox.ini index 09f70f4..bb55395 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py35 +envlist = py27,py35,py37 [testenv] deps=nose commands=nosetests From 85eb7249ad1439be091a1d97cbe1b071b24acef1 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 10:34:02 +0100 Subject: [PATCH 09/65] Use break instead of StopIteration - fixes #17 --- galvani/BioLogic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index e5bdb17..07172b5 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -234,7 +234,7 @@ def read_VMP_modules(fileobj, read_module_data=True): while True: module_magic = fileobj.read(len(b'MODULE')) if len(module_magic) == 0: # end of file - raise StopIteration + break elif module_magic != b'MODULE': raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic) From 803c4f3dc2493593af749aac122e5aed2281e651 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 10:38:26 +0100 Subject: [PATCH 10/65] Disable Python 3.7 Travis testing --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index c77f38c..e84d8f2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ cache: python: - "2.7" - "3.5" - - "3.7" +# - "3.7" # Python 3.7 is not available on travis CI yet install: - pip install tox-travis - sh get_testdata.sh From 757f56826bd273369c15c20882b4d24884de152c Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 10 Mar 2019 10:41:56 +0100 Subject: [PATCH 11/65] Release version 0.0.1 --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 1553c7b..8b6538b 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: setup( name='galvani', - version='0.0.1a1', + version='0.0.1', description='Open and process battery charger log data files', long_description=readme, long_description_content_type="text/markdown", @@ -18,7 +18,7 @@ setup( author_email='chris.kerr@mykolab.ch', license='GPLv3+', classifiers=[ - 'Development Status :: 3 - Alpha', + 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', From 880b4a0a2d7c2c16c8eb3b7861d5005d40bc1728 Mon Sep 17 00:00:00 2001 From: Tim <16023856+bayesfactor@users.noreply.github.com> Date: Mon, 11 Mar 2019 10:23:26 -0700 Subject: [PATCH 12/65] new column types Introduced new column types that show up in GEIS files --- galvani/BioLogic.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 004f302..0fa07f0 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -184,6 +184,8 @@ def VMPdata_dtype_from_colIDs(colIDs): dtype_dict['I/mA'] = ' Date: Sat, 16 Mar 2019 12:59:23 +0100 Subject: [PATCH 13/65] Replace `nose.eq_` with `assert x == y` --- tests/test_BioLogic.py | 52 ++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index 3cce358..409979c 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -6,7 +6,7 @@ from datetime import date, datetime import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal -from nose.tools import ok_, eq_, raises +from nose.tools import ok_, raises from galvani import MPTfile, MPRfile from galvani.BioLogic import MPTfileCSV, str3 # not exported @@ -16,11 +16,12 @@ testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata') def test_open_MPT(): mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt')) - eq_(comments, []) - eq_(mpt1.dtype.names, ("mode", "ox/red", "error", "control changes", - "Ns changes", "counter inc.", "time/s", - "control/V/mA", "Ewe/V", "dQ/mA.h", "P/W", - "I/mA", "(Q-Qo)/mA.h", "x")) + assert comments == [] + assert mpt1.dtype.names == ( + "mode", "ox/red", "error", "control changes", "Ns changes", + "counter inc.", "time/s", "control/V/mA", "Ewe/V", "dQ/mA.h", "P/W", + "I/mA", "(Q-Qo)/mA.h", "x", + ) @raises(ValueError) @@ -30,11 +31,12 @@ def test_open_MPT_fails_for_bad_file(): def test_open_MPT_csv(): mpt1, comments = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpt')) - eq_(comments, []) - eq_(mpt1.fieldnames, ["mode", "ox/red", "error", "control changes", - "Ns changes", "counter inc.", "time/s", - "control/V/mA", "Ewe/V", "dq/mA.h", "P/W", - "/mA", "(Q-Qo)/mA.h", "x"]) + assert comments == [] + assert mpt1.fieldnames == [ + "mode", "ox/red", "error", "control changes", "Ns changes", + "counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h", "P/W", + "/mA", "(Q-Qo)/mA.h", "x", + ] @raises(ValueError) @@ -43,44 +45,44 @@ def test_open_MPT_csv_fails_for_bad_file(): def test_open_MPR1(): - mpr1 = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) + mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) ## Check the dates as a basic test that it has been read properly - eq_(mpr1.startdate, date(2011, 10, 29)) - eq_(mpr1.enddate, date(2011, 10, 31)) + assert mpr.startdate == date(2011, 10, 29) + assert mpr.enddate == date(2011, 10, 31) def test_open_MPR2(): - mpr2 = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr')) + mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr')) ## Check the dates as a basic test that it has been read properly - eq_(mpr2.startdate, date(2012, 9, 27)) - eq_(mpr2.enddate, date(2012, 9, 27)) + assert mpr.startdate == date(2012, 9, 27) + assert mpr.enddate == date(2012, 9, 27) def test_open_MPR3(): mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic3.mpr')) ## Check the dates as a basic test that it has been read properly - eq_(mpr.startdate, date(2013, 3, 27)) - eq_(mpr.enddate, date(2013, 3, 27)) + assert mpr.startdate == date(2013, 3, 27) + assert mpr.enddate == date(2013, 3, 27) def test_open_MPR4(): mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr')) ## Check the dates as a basic test that it has been read properly - eq_(mpr.startdate, date(2011, 11, 1)) - eq_(mpr.enddate, date(2011, 11, 2)) + assert mpr.startdate == date(2011, 11, 1) + assert mpr.enddate == date(2011, 11, 2) def test_open_MPR5(): mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr')) ## Check the dates as a basic test that it has been read properly - eq_(mpr.startdate, date(2013, 1, 28)) - eq_(mpr.enddate, date(2013, 1, 28)) + assert mpr.startdate == date(2013, 1, 28) + assert mpr.enddate == date(2013, 1, 28) def test_open_MPR6(): mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr')) ## Check the dates as a basic test that it has been read properly - eq_(mpr.startdate, date(2012, 9, 11)) + assert mpr.startdate == date(2012, 9, 11) ## no end date because no VMP LOG module @@ -139,7 +141,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision try: - eq_(timestamp_from_comments(comments), mpr.timestamp) + assert timestamp_from_comments(comments) == mpr.timestamp except AttributeError: pass From b110162763ddc345ed58842460148ddaf8398bfa Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 16 Mar 2019 13:03:54 +0100 Subject: [PATCH 14/65] Replace `nose.raises` with `pytest.raises` --- tests/test_BioLogic.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index 409979c..40eb3ca 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -6,7 +6,7 @@ from datetime import date, datetime import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal -from nose.tools import ok_, raises +import pytest from galvani import MPTfile, MPRfile from galvani.BioLogic import MPTfileCSV, str3 # not exported @@ -24,9 +24,9 @@ def test_open_MPT(): ) -@raises(ValueError) def test_open_MPT_fails_for_bad_file(): - mpt1 = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) + with pytest.raises(ValueError, match='Bad first line'): + MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) def test_open_MPT_csv(): @@ -39,9 +39,9 @@ def test_open_MPT_csv(): ] -@raises(ValueError) def test_open_MPT_csv_fails_for_bad_file(): - mpt1 = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr')) + with pytest.raises((ValueError, UnicodeDecodeError)): + MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr')) def test_open_MPR1(): @@ -86,9 +86,9 @@ def test_open_MPR6(): ## no end date because no VMP LOG module -@raises(ValueError) def test_open_MPR_fails_for_bad_file(): - mpr1 = MPRfile(os.path.join(testdata_dir, 'arbin1.res')) + with pytest.raises(ValueError, match='Invalid magic for .mpr file'): + MPRfile(os.path.join(testdata_dir, 'arbin1.res')) def timestamp_from_comments(comments): From a618f75bb645092472fc388405997176cc53c402 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 16 Mar 2019 13:10:08 +0100 Subject: [PATCH 15/65] Change testing config to use pytest instead of nosetests Closes #5 --- setup.py | 1 + tox.ini | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 8b6538b..7821385 100644 --- a/setup.py +++ b/setup.py @@ -29,4 +29,5 @@ setup( 'res2sqlite = galvani.res2sqlite:main', ]}, install_requires=['numpy'], + tests_require=['pytest'], ) diff --git a/tox.ini b/tox.ini index bb55395..ca5b805 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] envlist = py27,py35,py37 [testenv] -deps=nose -commands=nosetests +deps=pytest +commands=pytest From d1e8616f1ed771f5d672328a70913e0075d81baa Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 16 Mar 2019 13:35:13 +0100 Subject: [PATCH 16/65] Use `pytest.mark.parametrize` to combine test cases --- tests/test_BioLogic.py | 111 ++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 74 deletions(-) diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index 40eb3ca..de0da1d 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -2,7 +2,7 @@ import os.path import re -from datetime import date, datetime +from datetime import datetime import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal @@ -44,46 +44,23 @@ def test_open_MPT_csv_fails_for_bad_file(): MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr')) -def test_open_MPR1(): - mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) - ## Check the dates as a basic test that it has been read properly - assert mpr.startdate == date(2011, 10, 29) - assert mpr.enddate == date(2011, 10, 31) - - -def test_open_MPR2(): - mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr')) - ## Check the dates as a basic test that it has been read properly - assert mpr.startdate == date(2012, 9, 27) - assert mpr.enddate == date(2012, 9, 27) - - -def test_open_MPR3(): - mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic3.mpr')) - ## Check the dates as a basic test that it has been read properly - assert mpr.startdate == date(2013, 3, 27) - assert mpr.enddate == date(2013, 3, 27) - - -def test_open_MPR4(): - mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr')) - ## Check the dates as a basic test that it has been read properly - assert mpr.startdate == date(2011, 11, 1) - assert mpr.enddate == date(2011, 11, 2) - - -def test_open_MPR5(): - mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr')) - ## Check the dates as a basic test that it has been read properly - assert mpr.startdate == date(2013, 1, 28) - assert mpr.enddate == date(2013, 1, 28) - - -def test_open_MPR6(): - mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr')) - ## Check the dates as a basic test that it has been read properly - assert mpr.startdate == date(2012, 9, 11) - ## no end date because no VMP LOG module +@pytest.mark.parametrize('filename, startdate, enddate', [ + ('bio_logic1.mpr', '2011-10-29', '2011-10-31'), + ('bio_logic2.mpr', '2012-09-27', '2012-09-27'), + ('bio_logic3.mpr', '2013-03-27', '2013-03-27'), + ('bio_logic4.mpr', '2011-11-01', '2011-11-02'), + ('bio_logic5.mpr', '2013-01-28', '2013-01-28'), + # bio_logic6.mpr has no end date because it does not have a VMP LOG module + ('bio_logic6.mpr', '2012-09-11', None), +]) +def test_MPR_dates(filename, startdate, enddate): + """Check that the start and end dates in .mpr files are read correctly.""" + mpr = MPRfile(os.path.join(testdata_dir, filename)) + assert mpr.startdate.strftime('%Y-%m-%d') == startdate + if enddate: + mpr.enddate.strftime('%Y-%m-%d') == enddate + else: + assert not hasattr(mpr, 'enddate') def test_open_MPR_fails_for_bad_file(): @@ -146,25 +123,26 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): pass -def test_MPR1_matches_MPT1(): - mpr1 = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) - mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt')) - assert_MPR_matches_MPT(mpr1, mpt1, comments) +@pytest.mark.parametrize('basename', [ + 'bio_logic1', + 'bio_logic2', + # No bio_logic3.mpt file + 'bio_logic4', + # bio_logic5 and bio_logic6 are special cases + 'CV_C01', + '121_CA_455nm_6V_30min_C01', +]) +def test_MPR_matches_MPT(basename): + """Check the MPR parser against the MPT parser. - -def test_MPR2_matches_MPT2(): - mpr2 = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr')) - mpt2, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic2.mpt')) - assert_MPR_matches_MPT(mpr2, mpt2, comments) - - -## No bio_logic3.mpt file - - -def test_MPR4_matches_MPT4(): - mpr4 = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr')) - mpt4, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic4.mpt')) - assert_MPR_matches_MPT(mpr4, mpt4, comments) + Load a binary .mpr file and a text .mpt file which should contain + exactly the same data. Check that the loaded data actually match. + """ + binpath = os.path.join(testdata_dir, basename + '.mpr') + txtpath = os.path.join(testdata_dir, basename + '.mpt') + mpr = MPRfile(binpath) + mpt, comments = MPTfile(txtpath) + assert_MPR_matches_MPT(mpr, mpt, comments) def test_MPR5_matches_MPT5(): @@ -180,18 +158,3 @@ def test_MPR6_matches_MPT6(): mpt, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic6.mpt')) mpr.data = mpr.data[:958] # .mpt file is incomplete assert_MPR_matches_MPT(mpr, mpt, comments) - - -## Tests for issue #1 -- new dtypes ## - - -def test_CV_C01(): - mpr = MPRfile(os.path.join(testdata_dir, 'CV_C01.mpr')) - mpt, comments = MPTfile(os.path.join(testdata_dir, 'CV_C01.mpt')) - assert_MPR_matches_MPT(mpr, mpt, comments) - - -def test_CA_455nm(): - mpr = MPRfile(os.path.join(testdata_dir, '121_CA_455nm_6V_30min_C01.mpr')) - mpt, comments = MPTfile(os.path.join(testdata_dir, '121_CA_455nm_6V_30min_C01.mpt')) - assert_MPR_matches_MPT(mpr, mpt, comments) From 1bcbc16bab8a69649805b3ecb0f8abd2c42246f6 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 16 Mar 2019 13:41:49 +0100 Subject: [PATCH 17/65] Use `np.frombuffer` instead of `np.fromstring` Fixes #22 --- galvani/BioLogic.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 0fa07f0..c63342d 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -275,7 +275,7 @@ def read_VMP_modules(fileobj, read_module_data=True): if len(hdr_bytes) < VMPmodule_hdr.itemsize: raise IOError("Unexpected end of file while reading module header") - hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1) + hdr = np.frombuffer(hdr_bytes, dtype=VMPmodule_hdr, count=1) hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names)) hdr_dict['offset'] = fileobj.tell() if read_module_data: @@ -326,17 +326,17 @@ class MPRfile: data_module, = (m for m in modules if m['shortname'] == b'VMP data ') maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG '] - n_data_points = np.fromstring(data_module['data'][:4], dtype=' 40000 and ole_timestamp1 < 50000: From b08c2f44350d29c5d6335f5b6ac4aa3dddd5bb14 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 16 Mar 2019 13:43:26 +0100 Subject: [PATCH 18/65] Use `array.item()` instead of `np.asscalar()` --- galvani/BioLogic.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index c63342d..df954d2 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -327,8 +327,7 @@ class MPRfile: maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG '] n_data_points = np.frombuffer(data_module['data'][:4], dtype=' Date: Sat, 16 Mar 2019 13:50:50 +0100 Subject: [PATCH 19/65] Fix 'invalid escape sequence' warnings --- galvani/BioLogic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index df954d2..a34ad13 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -70,7 +70,8 @@ def MPTfile(file_or_path): if magic != b'EC-Lab ASCII FILE\r\n': raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) - nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file)) + # TODO use rb'string' here once Python 2 is no longer supported + nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) @@ -107,7 +108,7 @@ def MPTfileCSV(file_or_path): if magic.rstrip() != 'EC-Lab ASCII FILE': raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) - nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file)) + nb_headers_match = re.match(r'Nb header lines : (\d+)\s*$', next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) From 7a5887fb381c9e960aaaa7c754d6082e22b28dab Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 30 Mar 2019 15:31:18 +0100 Subject: [PATCH 20/65] Update existing get_testdata links to HTTPS version --- get_testdata.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/get_testdata.sh b/get_testdata.sh index 639930e..c571c6f 100755 --- a/get_testdata.sh +++ b/get_testdata.sh @@ -7,20 +7,20 @@ mkdir -p tests/testdata cd tests/testdata /usr/bin/wget --continue -i - < Date: Sat, 30 Mar 2019 15:34:22 +0100 Subject: [PATCH 21/65] Add new test data file with different date format This tests for issue #20 Thanks @JBWarrington for providing this file --- get_testdata.sh | 1 + tests/test_BioLogic.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/get_testdata.sh b/get_testdata.sh index c571c6f..792223a 100755 --- a/get_testdata.sh +++ b/get_testdata.sh @@ -23,4 +23,5 @@ https://files.figshare.com/1780529/121_CA_455nm_6V_30min_C01.mpr https://files.figshare.com/1780530/121_CA_455nm_6V_30min_C01.mpt https://files.figshare.com/1780526/CV_C01.mpr https://files.figshare.com/1780527/CV_C01.mpt +https://files.figshare.com/14752538/C019P-0ppb-A_C01.mpr END_FILELIST diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index de0da1d..f42e85d 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -52,13 +52,15 @@ def test_open_MPT_csv_fails_for_bad_file(): ('bio_logic5.mpr', '2013-01-28', '2013-01-28'), # bio_logic6.mpr has no end date because it does not have a VMP LOG module ('bio_logic6.mpr', '2012-09-11', None), + # C019P-0ppb-A_C01.mpr stores the date in a different format + ('C019P-0ppb-A_C01.mpr', '2019-03-14', '2019-03-14'), ]) def test_MPR_dates(filename, startdate, enddate): """Check that the start and end dates in .mpr files are read correctly.""" mpr = MPRfile(os.path.join(testdata_dir, filename)) assert mpr.startdate.strftime('%Y-%m-%d') == startdate if enddate: - mpr.enddate.strftime('%Y-%m-%d') == enddate + assert mpr.enddate.strftime('%Y-%m-%d') == enddate else: assert not hasattr(mpr, 'enddate') From b977115d6e60b289cf8eca2b58727e9939f54d9b Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 30 Mar 2019 15:35:26 +0100 Subject: [PATCH 22/65] Also try parsing dates as '%m-%d-%y' - fixes #20 --- galvani/BioLogic.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index a34ad13..c68769c 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -360,12 +360,18 @@ class MPRfile: self.cols = column_types self.npts = n_data_points - tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y') + try: + tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y') + except ValueError: + tm = time.strptime(str3(settings_mod['date']), '%m-%d-%y') self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday) if maybe_log_module: log_module, = maybe_log_module - tm = time.strptime(str3(log_module['date']), '%m/%d/%y') + try: + tm = time.strptime(str3(log_module['date']), '%m/%d/%y') + except ValueError: + tm = time.strptime(str3(log_module['date']), '%m-%d-%y') self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday) ## There is a timestamp at either 465 or 469 bytes From 85cc3f523e3cc5244100046a9ee55c1de3f47374 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sat, 30 Mar 2019 15:52:44 +0100 Subject: [PATCH 23/65] Release version 0.0.2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7821385..d881fd3 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: setup( name='galvani', - version='0.0.1', + version='0.0.2', description='Open and process battery charger log data files', long_description=readme, long_description_content_type="text/markdown", From d6d6bf1ac743a811e5722e4f9cbe62de4aa80d6d Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Tue, 2 Apr 2019 21:34:34 +0200 Subject: [PATCH 24/65] Use a pytest fixture to locate the testdata directory --- tests/conftest.py | 11 +++++++++++ tests/test_BioLogic.py | 20 +++++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..1d2e5ad --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,11 @@ +"""Helpers for pytest tests.""" + +import os + +import pytest + + +@pytest.fixture(scope='session') +def testdata_dir(): + """Path to the testdata directory.""" + return os.path.join(os.path.dirname(__file__), 'testdata') diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index f42e85d..fc504e6 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -11,10 +11,8 @@ import pytest from galvani import MPTfile, MPRfile from galvani.BioLogic import MPTfileCSV, str3 # not exported -testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata') - -def test_open_MPT(): +def test_open_MPT(testdata_dir): mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt')) assert comments == [] assert mpt1.dtype.names == ( @@ -24,12 +22,12 @@ def test_open_MPT(): ) -def test_open_MPT_fails_for_bad_file(): +def test_open_MPT_fails_for_bad_file(testdata_dir): with pytest.raises(ValueError, match='Bad first line'): MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr')) -def test_open_MPT_csv(): +def test_open_MPT_csv(testdata_dir): mpt1, comments = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpt')) assert comments == [] assert mpt1.fieldnames == [ @@ -39,7 +37,7 @@ def test_open_MPT_csv(): ] -def test_open_MPT_csv_fails_for_bad_file(): +def test_open_MPT_csv_fails_for_bad_file(testdata_dir): with pytest.raises((ValueError, UnicodeDecodeError)): MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr')) @@ -55,7 +53,7 @@ def test_open_MPT_csv_fails_for_bad_file(): # C019P-0ppb-A_C01.mpr stores the date in a different format ('C019P-0ppb-A_C01.mpr', '2019-03-14', '2019-03-14'), ]) -def test_MPR_dates(filename, startdate, enddate): +def test_MPR_dates(testdata_dir, filename, startdate, enddate): """Check that the start and end dates in .mpr files are read correctly.""" mpr = MPRfile(os.path.join(testdata_dir, filename)) assert mpr.startdate.strftime('%Y-%m-%d') == startdate @@ -65,7 +63,7 @@ def test_MPR_dates(filename, startdate, enddate): assert not hasattr(mpr, 'enddate') -def test_open_MPR_fails_for_bad_file(): +def test_open_MPR_fails_for_bad_file(testdata_dir): with pytest.raises(ValueError, match='Invalid magic for .mpr file'): MPRfile(os.path.join(testdata_dir, 'arbin1.res')) @@ -134,7 +132,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): 'CV_C01', '121_CA_455nm_6V_30min_C01', ]) -def test_MPR_matches_MPT(basename): +def test_MPR_matches_MPT(testdata_dir, basename): """Check the MPR parser against the MPT parser. Load a binary .mpr file and a text .mpt file which should contain @@ -147,7 +145,7 @@ def test_MPR_matches_MPT(basename): assert_MPR_matches_MPT(mpr, mpt, comments) -def test_MPR5_matches_MPT5(): +def test_MPR5_matches_MPT5(testdata_dir): mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr')) mpt, comments = MPTfile((re.sub(b'\tXXX\t', b'\t0\t', line) for line in open(os.path.join(testdata_dir, 'bio_logic5.mpt'), @@ -155,7 +153,7 @@ def test_MPR5_matches_MPT5(): assert_MPR_matches_MPT(mpr, mpt, comments) -def test_MPR6_matches_MPT6(): +def test_MPR6_matches_MPT6(testdata_dir): mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr')) mpt, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic6.mpt')) mpr.data = mpr.data[:958] # .mpt file is incomplete From 5530a7a8ff1360a0517cf1cc994283fe6f17732e Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Tue, 2 Apr 2019 22:05:41 +0200 Subject: [PATCH 25/65] Add a simple test for loading Arbin .res files --- tests/test_Arbin.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 tests/test_Arbin.py diff --git a/tests/test_Arbin.py b/tests/test_Arbin.py new file mode 100644 index 0000000..9ceec8e --- /dev/null +++ b/tests/test_Arbin.py @@ -0,0 +1,26 @@ +"""Tests for loading Arbin .res files.""" + +import os +import sqlite3 +import subprocess + +import pytest + +from galvani import res2sqlite + + +# TODO - change to subprocess.DEVNULL when python 2 support is removed +have_mdbtools = (subprocess.call(['which', 'mdb-export'], stdout=None) == 0) + + +@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools') +@pytest.mark.parametrize('basename', ['arbin1']) +def test_convert_Arbin_to_sqlite(testdata_dir, tmpdir, basename): + """Convert an Arbin file to SQLite using the functional interface.""" + res_file = os.path.join(testdata_dir, basename + '.res') + sqlite_file = os.path.join(str(tmpdir), basename + '.s3db') + res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file) + assert os.path.isfile(sqlite_file) + with sqlite3.connect(sqlite_file) as conn: + csr = conn.execute('SELECT * FROM Channel_Normal_Table;') + csr.fetchone() From a1b73867ea9a3cc50572ad779c12630bb6651176 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Tue, 2 Apr 2019 22:06:26 +0200 Subject: [PATCH 26/65] Add a test that a sensible error is raised when MDBTools is not found This is the error that happens in issue #23 --- tests/test_Arbin.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_Arbin.py b/tests/test_Arbin.py index 9ceec8e..97c34a7 100644 --- a/tests/test_Arbin.py +++ b/tests/test_Arbin.py @@ -3,6 +3,7 @@ import os import sqlite3 import subprocess +import sys import pytest @@ -13,6 +14,19 @@ from galvani import res2sqlite have_mdbtools = (subprocess.call(['which', 'mdb-export'], stdout=None) == 0) +@pytest.mark.skipif(have_mdbtools, reason='This tests the failure when mdbtools is not installed') +def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir): + """Checks that the conversion fails with an appropriate error message.""" + res_file = os.path.join(testdata_dir, 'arbin1.res') + sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db') + if sys.version_info >= (3, 3): + expected_exception = FileNotFoundError + else: + expected_exception = OSError + with pytest.raises(expected_exception, match="No such file or directory: 'mdb-export'"): + res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file) + + @pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools') @pytest.mark.parametrize('basename', ['arbin1']) def test_convert_Arbin_to_sqlite(testdata_dir, tmpdir, basename): From 557e755f03e4706aca1ba3cadd043e7677d4427e Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Tue, 2 Apr 2019 23:09:53 +0200 Subject: [PATCH 27/65] Move Popen call outside the try/finally block Ensure that all variables used in the except and finally blocks are always defined - fixes #23 In Python 3, Popen objects can be used as contextmanagers, but not in Python 2.7 --- galvani/res2sqlite.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 0234a7b..92cf630 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -352,10 +352,14 @@ CREATE VIEW IF NOT EXISTS Capacity_View def mdb_get_data_text(s3db, filename, table): print("Reading %s..." % table) + # TODO after dropping Python 2 support - use Popen as contextmanager + mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], + bufsize=-1, stdin=None, stdout=sp.PIPE, + universal_newlines=True) try: - mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], - bufsize=-1, stdin=None, stdout=sp.PIPE, - universal_newlines=True) + # Initialize values to avoid NameError in except clause + mdb_output = '' + insert_match = None mdb_output = mdb_sql.stdout.read() while len(mdb_output) > 0: insert_match = re.match(r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', @@ -365,7 +369,8 @@ def mdb_get_data_text(s3db, filename, table): s3db.commit() except: print("Error while importing %s" % table) - print("Remaining mdb-export output:", mdb_output) + if mdb_output: + print("Remaining mdb-export output:", mdb_output) if insert_match: print("insert_re match:", insert_match) raise @@ -375,10 +380,11 @@ def mdb_get_data_text(s3db, filename, table): def mdb_get_data_numeric(s3db, filename, table): print("Reading %s..." % table) + # TODO after dropping Python 2 support - use Popen as contextmanager + mdb_sql = sp.Popen(['mdb-export', filename, table], + bufsize=-1, stdin=None, stdout=sp.PIPE, + universal_newlines=True) try: - mdb_sql = sp.Popen(['mdb-export', filename, table], - bufsize=-1, stdin=None, stdout=sp.PIPE, - universal_newlines=True) mdb_csv = csv.reader(mdb_sql.stdout) mdb_headers = next(mdb_csv) quoted_headers = ['"%s"' % h for h in mdb_headers] From 6a8fbe71a4575b48f4528a5d4b0244f1c87e0831 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Tue, 2 Apr 2019 23:01:32 +0200 Subject: [PATCH 28/65] Add some tests for the `res2sqlite` command-line tool Check that the --help option works even if mdbtools is not installed --- tests/test_Arbin.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/test_Arbin.py b/tests/test_Arbin.py index 97c34a7..9032a9b 100644 --- a/tests/test_Arbin.py +++ b/tests/test_Arbin.py @@ -14,6 +14,15 @@ from galvani import res2sqlite have_mdbtools = (subprocess.call(['which', 'mdb-export'], stdout=None) == 0) +def test_res2sqlite_help(): + """Test running `res2sqlite --help`. + + This should work even when mdbtools is not installed. + """ + help_output = subprocess.check_output(['res2sqlite', '--help']) + assert b'Convert Arbin .res files to sqlite3 databases' in help_output + + @pytest.mark.skipif(have_mdbtools, reason='This tests the failure when mdbtools is not installed') def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir): """Checks that the conversion fails with an appropriate error message.""" @@ -29,7 +38,7 @@ def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir): @pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools') @pytest.mark.parametrize('basename', ['arbin1']) -def test_convert_Arbin_to_sqlite(testdata_dir, tmpdir, basename): +def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename): """Convert an Arbin file to SQLite using the functional interface.""" res_file = os.path.join(testdata_dir, basename + '.res') sqlite_file = os.path.join(str(tmpdir), basename + '.s3db') @@ -38,3 +47,15 @@ def test_convert_Arbin_to_sqlite(testdata_dir, tmpdir, basename): with sqlite3.connect(sqlite_file) as conn: csr = conn.execute('SELECT * FROM Channel_Normal_Table;') csr.fetchone() + + +@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools') +def test_convert_cmdline(testdata_dir, tmpdir): + """Checks that the conversion fails with an appropriate error message.""" + res_file = os.path.join(testdata_dir, 'arbin1.res') + sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db') + subprocess.check_call(['res2sqlite', res_file, sqlite_file]) + assert os.path.isfile(sqlite_file) + with sqlite3.connect(sqlite_file) as conn: + csr = conn.execute('SELECT * FROM Channel_Normal_Table;') + csr.fetchone() From 846a5b31491f9bfbf1e4ba155345c4d994aaa3c4 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Wed, 3 Apr 2019 08:13:01 +0200 Subject: [PATCH 29/65] Catch FileNotFoundError from Popen and re-raise a more helpful message --- galvani/res2sqlite.py | 26 ++++++++++++++++++++------ tests/test_Arbin.py | 7 +------ 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 92cf630..c2fab8d 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -353,9 +353,16 @@ CREATE VIEW IF NOT EXISTS Capacity_View def mdb_get_data_text(s3db, filename, table): print("Reading %s..." % table) # TODO after dropping Python 2 support - use Popen as contextmanager - mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], - bufsize=-1, stdin=None, stdout=sp.PIPE, - universal_newlines=True) + try: + mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], + bufsize=-1, stdin=None, stdout=sp.PIPE, + universal_newlines=True) + except OSError as e: + if e.errno == 2: + raise RuntimeError('Could not locate the `mdb-export` executable. ' + 'Check that mdbtools is properly installed.') + else: + raise try: # Initialize values to avoid NameError in except clause mdb_output = '' @@ -381,9 +388,16 @@ def mdb_get_data_text(s3db, filename, table): def mdb_get_data_numeric(s3db, filename, table): print("Reading %s..." % table) # TODO after dropping Python 2 support - use Popen as contextmanager - mdb_sql = sp.Popen(['mdb-export', filename, table], - bufsize=-1, stdin=None, stdout=sp.PIPE, - universal_newlines=True) + try: + mdb_sql = sp.Popen(['mdb-export', filename, table], + bufsize=-1, stdin=None, stdout=sp.PIPE, + universal_newlines=True) + except OSError as e: + if e.errno == 2: + raise RuntimeError('Could not locate the `mdb-export` executable. ' + 'Check that mdbtools is properly installed.') + else: + raise try: mdb_csv = csv.reader(mdb_sql.stdout) mdb_headers = next(mdb_csv) diff --git a/tests/test_Arbin.py b/tests/test_Arbin.py index 9032a9b..f21bd50 100644 --- a/tests/test_Arbin.py +++ b/tests/test_Arbin.py @@ -3,7 +3,6 @@ import os import sqlite3 import subprocess -import sys import pytest @@ -28,11 +27,7 @@ def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir): """Checks that the conversion fails with an appropriate error message.""" res_file = os.path.join(testdata_dir, 'arbin1.res') sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db') - if sys.version_info >= (3, 3): - expected_exception = FileNotFoundError - else: - expected_exception = OSError - with pytest.raises(expected_exception, match="No such file or directory: 'mdb-export'"): + with pytest.raises(RuntimeError, match="Could not locate the `mdb-export` executable."): res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file) From 4381b02242673187dfd998a34d291c709d3490f6 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Wed, 3 Apr 2019 08:16:55 +0200 Subject: [PATCH 30/65] Add .pytest_cache to Travis cache --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index e84d8f2..c4301f2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,7 @@ language: python cache: directories: - .tox + - .pytest_cache - tests/testdata python: - "2.7" From 4ba61aa5d8e31bedc7c871c4466274d97b6d3eeb Mon Sep 17 00:00:00 2001 From: bcolsen Date: Tue, 9 Apr 2019 17:23:16 -0600 Subject: [PATCH 31/65] added parsing of loop_modules --- galvani/BioLogic.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index c68769c..2c2c413 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -325,6 +325,7 @@ class MPRfile: self.modules = modules settings_mod, = (m for m in modules if m['shortname'] == b'VMP Set ') data_module, = (m for m in modules if m['shortname'] == b'VMP data ') + maybe_loop_module = [m for m in modules if m['shortname'] == b'VMP loop '] maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG '] n_data_points = np.frombuffer(data_module['data'][:4], dtype=' Date: Tue, 9 Apr 2019 17:48:38 -0600 Subject: [PATCH 32/65] biologic file with duplicate columns --- galvani/BioLogic.py | 150 +++++++++++++++++--------------------------- 1 file changed, 58 insertions(+), 92 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index c68769c..3174299 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -144,14 +144,59 @@ VMPmodule_hdr = np.dtype([('shortname', 'S10'), ('version', ' ?? + 9: ('Ece/V', '/mA', '/V', ', I don't see the difference - elif colID in (6, 77): - dtype_dict['Ewe/V'] = ', 8 is either I or ?? - elif colID in (8, 76): - dtype_dict['I/mA'] = ' Date: Thu, 11 Apr 2019 11:59:18 -0600 Subject: [PATCH 33/65] flag fix --- galvani/BioLogic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 3174299..a307f74 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -195,8 +195,9 @@ def VMPdata_dtype_from_colIDs(colIDs): flags2_dict = OrderedDict() for colID in colIDs: if colID in (1, 2, 3, 21, 31, 65): - type_list.append('u1') - field_list.append('flags') + if 'flags' not in field_list: + type_list.append('u1') + field_list.append('flags') if colID == 1: flags_dict['mode'] = (np.uint8(0x03), np.uint8) elif colID == 2: From ef1ea9a2f46c8e7f3bdd7c3b2338d1f4393e5ca2 Mon Sep 17 00:00:00 2001 From: bcolsen Date: Thu, 11 Apr 2019 17:30:05 -0600 Subject: [PATCH 34/65] Default loop to none and trim the trailing zeros --- galvani/BioLogic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 2c2c413..78229d7 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -311,6 +311,7 @@ class MPRfile: """ def __init__(self, file_or_path): + self.loop_index = None if isinstance(file_or_path, str): mpr_file = open(file_or_path, 'rb') else: @@ -372,6 +373,7 @@ class MPRfile: if loop_module['version'] == 0: self.loop_index = np.fromstring(loop_module['data'][4:], dtype=' Date: Fri, 3 May 2019 18:39:42 +0200 Subject: [PATCH 35/65] Formatting --- galvani/BioLogic.py | 89 +++++++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 44 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 41052f5..9bc9ce4 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -144,48 +144,49 @@ VMPmodule_hdr = np.dtype([('shortname', 'S10'), ('version', ' ?? - 9: ('Ece/V', '/mA', '/V', ' ?? + 9: ('Ece/V', '/mA', '/V', ' Date: Fri, 3 May 2019 18:47:02 +0200 Subject: [PATCH 36/65] Add a VMPdata_colID_flags_map dict --- galvani/BioLogic.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 9bc9ce4..b79f51d 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -188,6 +188,17 @@ VMPdata_colID_dtype_map = { 480: ('NSR I/%', ' Date: Fri, 3 May 2019 19:12:18 +0200 Subject: [PATCH 37/65] Refactor the VMPdata_dtype_from_colIDs function --- galvani/BioLogic.py | 49 ++++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index b79f51d..dbee311 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -9,7 +9,7 @@ import csv from os import SEEK_SET import time from datetime import date, datetime, timedelta -from collections import OrderedDict +from collections import defaultdict, OrderedDict import numpy as np @@ -201,29 +201,46 @@ VMPdata_colID_flag_map = { def VMPdata_dtype_from_colIDs(colIDs): + """Get a numpy record type from a list of column ID numbers. + + The binary layout of the data in the MPR file is described by the sequence + of column ID numbers in the file header. This function converts that + sequence into a numpy dtype which can then be used to load data from the + file with np.frombuffer(). + + Some column IDs refer to small values which are packed into a single byte. + The second return value is a dict describing the bit masks with which to + extract these columns from the flags byte. + + """ type_list = [] - field_list = [] + field_name_counts = defaultdict(int) flags_dict = OrderedDict() flags2_dict = OrderedDict() for colID in colIDs: if colID in VMPdata_colID_flag_map: - if 'flags' not in field_list: - type_list.append('u1') - field_list.append('flags') + # Some column IDs represent boolean flags or small integers + # These are all packed into a single 'flags' byte whose position + # in the overall record is determined by the position of the first + # column ID of flag type. If there are several flags present, + # there is still only one 'flags' int + if 'flags' not in field_name_counts: + type_list.append(('flags', 'u1')) + field_name_counts['flags'] = 1 flag_name, flag_mask, flag_type = VMPdata_colID_flag_map[colID] flags_dict[flag_name] = (np.uint8(flag_mask), flag_type) + elif colID in VMPdata_colID_dtype_map: + field_name, field_type = VMPdata_colID_dtype_map[colID] + field_name_counts[field_name] += 1 + count = field_name_counts[field_name] + if count > 1: + unique_field_name = '%s %d' % (field_name, count) + else: + unique_field_name = field_name + type_list.append((unique_field_name, field_type)) else: - try: - field = VMPdata_colID_dtype_map[colID][0] - if field in field_list: - field += str(len(field_list)) - field_list.append(field) - type_list.append(VMPdata_colID_dtype_map[colID][1]) - except KeyError: - print(list(zip(field_list, type_list))) - raise NotImplementedError("column type %d not implemented" - % colID) - return np.dtype(list(zip(field_list, type_list))), flags_dict, flags2_dict + raise NotImplementedError("column type %d not implemented" % colID) + return np.dtype(type_list), flags_dict, flags2_dict def read_VMP_modules(fileobj, read_module_data=True): From c401aca741ab57a0eae3d9431ad1972b12a2f9a0 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Fri, 3 May 2019 19:15:34 +0200 Subject: [PATCH 38/65] Get rid of flags2_dict as flags2 doesn't actually exist --- galvani/BioLogic.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index dbee311..e644e68 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -216,7 +216,6 @@ def VMPdata_dtype_from_colIDs(colIDs): type_list = [] field_name_counts = defaultdict(int) flags_dict = OrderedDict() - flags2_dict = OrderedDict() for colID in colIDs: if colID in VMPdata_colID_flag_map: # Some column IDs represent boolean flags or small integers @@ -228,6 +227,9 @@ def VMPdata_dtype_from_colIDs(colIDs): type_list.append(('flags', 'u1')) field_name_counts['flags'] = 1 flag_name, flag_mask, flag_type = VMPdata_colID_flag_map[colID] + # TODO what happens if a flag colID has already been seen + # i.e. if flag_name is already present in flags_dict? + # Does it create a second 'flags' byte in the record? flags_dict[flag_name] = (np.uint8(flag_mask), flag_type) elif colID in VMPdata_colID_dtype_map: field_name, field_type = VMPdata_colID_dtype_map[colID] @@ -240,7 +242,7 @@ def VMPdata_dtype_from_colIDs(colIDs): type_list.append((unique_field_name, field_type)) else: raise NotImplementedError("column type %d not implemented" % colID) - return np.dtype(type_list), flags_dict, flags2_dict + return np.dtype(type_list), flags_dict def read_VMP_modules(fileobj, read_module_data=True): @@ -336,7 +338,7 @@ class MPRfile: else: assert(not any(remaining_headers)) - self.dtype, self.flags_dict, self.flags2_dict = VMPdata_dtype_from_colIDs(column_types) + self.dtype, self.flags_dict = VMPdata_dtype_from_colIDs(column_types) self.data = np.frombuffer(main_data, dtype=self.dtype) assert(self.data.shape[0] == n_data_points) @@ -407,8 +409,5 @@ class MPRfile: if flagname in self.flags_dict: mask, dtype = self.flags_dict[flagname] return np.array(self.data['flags'] & mask, dtype=dtype) - elif flagname in self.flags2_dict: - mask, dtype = self.flags2_dict[flagname] - return np.array(self.data['flags2'] & mask, dtype=dtype) else: raise AttributeError("Flag '%s' not present" % flagname) From 61e2ac8f5778af49126c398dc8f8909594ece70a Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Fri, 3 May 2019 19:46:45 +0200 Subject: [PATCH 39/65] Added unit tests for the VMPdata_dtype_from_colIDs function --- tests/test_BioLogic.py | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index f42e85d..c8bf163 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -8,7 +8,7 @@ import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest -from galvani import MPTfile, MPRfile +from galvani import BioLogic, MPTfile, MPRfile from galvani.BioLogic import MPTfileCSV, str3 # not exported testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata') @@ -44,6 +44,37 @@ def test_open_MPT_csv_fails_for_bad_file(): MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr')) +def test_colID_map_uniqueness(): + """Check some uniqueness properties of the VMPdata_colID_xyz maps.""" + field_colIDs = set(BioLogic.VMPdata_colID_dtype_map.keys()) + flag_colIDs = set(BioLogic.VMPdata_colID_flag_map.keys()) + field_names = [v[0] for v in BioLogic.VMPdata_colID_dtype_map.values()] + flag_names = [v[0] for v in BioLogic.VMPdata_colID_flag_map.values()] + assert not field_colIDs.intersection(flag_colIDs) + # 'I/mA' and 'dQ/mA.h' are duplicated + # assert len(set(field_names)) == len(field_names) + assert len(set(flag_names)) == len(flag_names) + assert not set(field_names).intersection(flag_names) + + +@pytest.mark.parametrize('colIDs, expected', [ + ([1, 2, 3], [('flags', 'u1')]), + ([4, 6], [('time/s', ' Date: Fri, 3 May 2019 20:30:12 +0200 Subject: [PATCH 40/65] Fixed some flake8 warnings in BioLogic.py --- galvani/BioLogic.py | 47 ++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 78229d7..da3519c 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -71,19 +71,20 @@ def MPTfile(file_or_path): raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) # TODO use rb'string' here once Python 2 is no longer supported - nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file)) + nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', + next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) - ## The 'magic number' line, the 'Nb headers' line and the column headers - ## make three lines. Every additional line is a comment line. + # The 'magic number' line, the 'Nb headers' line and the column headers + # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] fieldnames = str3(next(mpt_file)).strip().split('\t') record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames))) - ## Must be able to parse files where commas are used for decimal points + # Must be able to parse files where commas are used for decimal points converter_dict = dict(((i, comma_converter) for i in range(len(fieldnames)))) mpt_array = np.loadtxt(mpt_file, dtype=record_type, @@ -113,8 +114,8 @@ def MPTfileCSV(file_or_path): if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) - ## The 'magic number' line, the 'Nb headers' line and the column headers - ## make three lines. Every additional line is a comment line. + # The 'magic number' line, the 'Nb headers' line and the column headers + # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab') @@ -270,7 +271,8 @@ def read_VMP_modules(fileobj, read_module_data=True): if len(module_magic) == 0: # end of file break elif module_magic != b'MODULE': - raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic) + raise ValueError("Found %r, expecting start of new VMP MODULE" + % module_magic) hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize) if len(hdr_bytes) < VMPmodule_hdr.itemsize: @@ -294,6 +296,9 @@ def read_VMP_modules(fileobj, read_module_data=True): fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET) +MPR_MAGIC = b'BIO-LOGIC MODULAR FILE\x1a'.ljust(48) + b'\x00\x00\x00\x00' + + class MPRfile: """Bio-Logic .mpr file @@ -316,10 +321,8 @@ class MPRfile: mpr_file = open(file_or_path, 'rb') else: mpr_file = file_or_path - - mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00' - magic = mpr_file.read(len(mpr_magic)) - if magic != mpr_magic: + magic = mpr_file.read(len(MPR_MAGIC)) + if magic != MPR_MAGIC: raise ValueError('Invalid magic for .mpr file: %s' % magic) modules = list(read_VMP_modules(mpr_file)) @@ -340,7 +343,7 @@ class MPRfile: elif data_module['version'] == 2: column_types = np.frombuffer(data_module['data'][5:], dtype=' 40000 and ole_timestamp4 < 50000: ole_timestamp = ole_timestamp4 - + else: raise ValueError("Could not find timestamp in the LOG module") @@ -414,10 +417,10 @@ class MPRfile: ole_timedelta = timedelta(days=ole_timestamp[0]) self.timestamp = ole_base + ole_timedelta if self.startdate != self.timestamp.date(): - raise ValueError("""Date mismatch: - Start date: %s - End date: %s - Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp)) + raise ValueError("Date mismatch:\n" + + " Start date: %s\n" % self.startdate + + " End date: %s\n" % self.enddate + + " Timestamp: %s\n" % self.timestamp) def get_flag(self, flagname): if flagname in self.flags_dict: From 2a36713b064777e5ca94975c5041bf447abf3f66 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Fri, 3 May 2019 20:37:27 +0200 Subject: [PATCH 41/65] Fixed some flake8 warnings in res2sqlite.py --- galvani/res2sqlite.py | 71 ++++++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 0234a7b..843129c 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -7,8 +7,8 @@ import csv import argparse -## The following scripts are adapted from the result of running -## $ mdb-schema oracle +# The following scripts are adapted from the result of running +# $ mdb-schema oracle mdb_tables = ["Version_Table", "Global_Table", "Resume_Table", "Channel_Normal_Table", "Channel_Statistic_Table", @@ -126,7 +126,8 @@ CREATE TABLE Channel_Statistic_Table -- Version 1.14 ends here, version 5.23 continues Charge_Time REAL DEFAULT NULL, Discharge_Time REAL DEFAULT NULL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, "Auxiliary_Table": """ CREATE TABLE Auxiliary_Table @@ -137,7 +138,8 @@ CREATE TABLE Auxiliary_Table Data_Type INTEGER, X REAL, "dX/dt" REAL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, "Event_Table": """ CREATE TABLE Event_Table @@ -220,7 +222,8 @@ CREATE TABLE Smart_Battery_Data_Table ChargingCurrent REAL DEFAULT NULL, ChargingVoltage REAL DEFAULT NULL, ManufacturerData REAL DEFAULT NULL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, ## The following tables are not present in version 1.14 'MCell_Aci_Data_Table': """ @@ -233,7 +236,8 @@ CREATE TABLE MCell_Aci_Data_Table Phase_Shift REAL, Voltage REAL, Current REAL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) );""", 'Aux_Global_Data_Table': """ CREATE TABLE Aux_Global_Data_Table @@ -288,7 +292,8 @@ CREATE TABLE Smart_Battery_Clock_Stretch_Table VCELL3 INTEGER, VCELL2 INTEGER, VCELL1 INTEGER, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) );"""} mdb_create_indices = { @@ -306,18 +311,21 @@ CREATE TEMPORARY TABLE capacity_helper( Discharge_Capacity REAL NOT NULL, Charge_Energy REAL NOT NULL, Discharge_Energy REAL NOT NULL, - FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) + FOREIGN KEY (Test_ID, Cycle_Index) + REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) ); -INSERT INTO capacity_helper - SELECT Test_ID, Cycle_Index, max(Charge_Capacity), max(Discharge_Capacity), max(Charge_Energy), max(Discharge_Energy) - FROM Channel_Normal_Table +INSERT INTO capacity_helper + SELECT Test_ID, Cycle_Index, + max(Charge_Capacity), max(Discharge_Capacity), + max(Charge_Energy), max(Discharge_Energy) + FROM Channel_Normal_Table GROUP BY Test_ID, Cycle_Index; --- ## Alternative way of selecting ## --- select * --- from Channel_Normal_Table as a join Channel_Normal_Table as b --- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1 +-- ## Alternative way of selecting ## +-- select * +-- from Channel_Normal_Table as a join Channel_Normal_Table as b +-- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1 -- and a.Charge_Capacity < b.Charge_Capacity); DROP TABLE IF EXISTS Capacity_Sum_Table; @@ -328,12 +336,15 @@ CREATE TABLE Capacity_Sum_Table( Discharge_Capacity_Sum REAL NOT NULL, Charge_Energy_Sum REAL NOT NULL, Discharge_Energy_Sum REAL NOT NULL, - FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) + FOREIGN KEY (Test_ID, Cycle_Index) + REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) ); -INSERT INTO Capacity_Sum_Table - SELECT a.Test_ID, a.Cycle_Index, total(b.Charge_Capacity), total(b.Discharge_Capacity), total(b.Charge_Energy), total(b.Discharge_Energy) - FROM capacity_helper AS a LEFT JOIN capacity_helper AS b +INSERT INTO Capacity_Sum_Table + SELECT a.Test_ID, a.Cycle_Index, + total(b.Charge_Capacity), total(b.Discharge_Capacity), + total(b.Charge_Energy), total(b.Discharge_Energy) + FROM capacity_helper AS a LEFT JOIN capacity_helper AS b ON (a.Test_ID = b.Test_ID AND a.Cycle_Index > b.Cycle_Index) GROUP BY a.Test_ID, a.Cycle_Index; @@ -352,14 +363,17 @@ CREATE VIEW IF NOT EXISTS Capacity_View def mdb_get_data_text(s3db, filename, table): print("Reading %s..." % table) + insert_pattern = re.compile( + r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', + re.IGNORECASE + ) try: mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], bufsize=-1, stdin=None, stdout=sp.PIPE, universal_newlines=True) mdb_output = mdb_sql.stdout.read() while len(mdb_output) > 0: - insert_match = re.match(r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', - mdb_output, re.IGNORECASE) + insert_match = insert_pattern.match(mdb_output) s3db.execute(insert_match.group()) mdb_output = mdb_output[insert_match.end():] s3db.commit() @@ -407,28 +421,29 @@ def convert_arbin_to_sqlite(input_file, output_file): Any data currently in the sqlite file will be erased! """ s3db = sqlite3.connect(output_file) - - + for table in reversed(mdb_tables + mdb_5_23_tables): s3db.execute('DROP TABLE IF EXISTS "%s";' % table) - + for table in mdb_tables: s3db.executescript(mdb_create_scripts[table]) mdb_get_data(s3db, input_file, table) if table in mdb_create_indices: print("Creating indices for %s..." % table) s3db.executescript(mdb_create_indices[table]) - - if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"): + + csr = s3db.execute("SELECT Version_Schema_Field FROM Version_Table;") + version_text, = csr.fetchone() + if (version_text == "Results File 5.23"): for table in mdb_5_23_tables: s3db.executescript(mdb_create_scripts[table]) mdb_get_data(input_file, table) if table in mdb_create_indices: s3db.executescript(mdb_create_indices[table]) - + print("Creating helper table for capacity and energy totals...") s3db.executescript(helper_table_script) - + print("Vacuuming database...") s3db.executescript("VACUUM; ANALYZE;") From 1c8335289acbcf85083eb808278dfc0943050884 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 08:33:00 +0200 Subject: [PATCH 42/65] Add flake8 configuration --- .flake8 | 2 ++ tox.ini | 3 +++ 2 files changed, 5 insertions(+) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..ac8f3fb --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +# This file will be ignored - see http://flake8.pycqa.org/en/2.6.0/config.html#per-project +# Edit the [flake8] section in tox.ini instead diff --git a/tox.ini b/tox.ini index ca5b805..1657e13 100644 --- a/tox.ini +++ b/tox.ini @@ -3,3 +3,6 @@ envlist = py27,py35,py37 [testenv] deps=pytest commands=pytest + +[flake8] +exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__ From ed43de132601b842cc09ad40801152fa8d2f52cd Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 08:35:41 +0200 Subject: [PATCH 43/65] Fix flake8 warnings on comments style --- galvani/res2sqlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 843129c..1ad5314 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -225,7 +225,7 @@ CREATE TABLE Smart_Battery_Data_Table FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, - ## The following tables are not present in version 1.14 + # The following tables are not present in version 1.14 'MCell_Aci_Data_Table': """ CREATE TABLE MCell_Aci_Data_Table ( From d137bfccef2ee8411fa3ff2cdfe8e02564aa2a61 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 09:10:41 +0200 Subject: [PATCH 44/65] Add flake8 to tox.ini --- tox.ini | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 1657e13..a6ded79 100644 --- a/tox.ini +++ b/tox.ini @@ -1,8 +1,12 @@ [tox] envlist = py27,py35,py37 [testenv] -deps=pytest -commands=pytest +deps = + flake8 + pytest +commands = + flake8 + pytest [flake8] exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__ From 3440047dc271369efcd4b8d3972a20afe4f1311d Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 08:44:53 +0200 Subject: [PATCH 45/65] Fixed flake8 warning about lambda assignment --- galvani/BioLogic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 4870330..4eeac7f 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -10,6 +10,7 @@ from os import SEEK_SET import time from datetime import date, datetime, timedelta from collections import defaultdict, OrderedDict +import functools import numpy as np @@ -18,7 +19,7 @@ if sys.version_info.major <= 2: str3 = str from string import maketrans else: - str3 = lambda b: str(b, encoding='ascii') + str3 = functools.partial(str, encoding='ascii') maketrans = bytes.maketrans From 8abab57c06ad18d5afaeaba54909841a6d266650 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 09:20:33 +0200 Subject: [PATCH 46/65] Fixed some more flake8 warnings --- galvani/__init__.py | 4 +++- galvani/res2sqlite.py | 9 ++++++--- tests/test_BioLogic.py | 6 +++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/galvani/__init__.py b/galvani/__init__.py index 1949033..ce98e1d 100644 --- a/galvani/__init__.py +++ b/galvani/__init__.py @@ -1 +1,3 @@ -from .BioLogic import MPTfile, MPRfile +from .BioLogic import MPRfile, MPTfile + +__all__ = ['MPRfile', 'MPTfile'] diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 1ad5314..793092a 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -377,7 +377,7 @@ def mdb_get_data_text(s3db, filename, table): s3db.execute(insert_match.group()) mdb_output = mdb_output[insert_match.end():] s3db.commit() - except: + except BaseException: print("Error while importing %s" % table) print("Remaining mdb-export output:", mdb_output) if insert_match: @@ -398,8 +398,11 @@ def mdb_get_data_numeric(s3db, filename, table): quoted_headers = ['"%s"' % h for h in mdb_headers] joined_headers = ', '.join(quoted_headers) joined_placemarks = ', '.join(['?' for h in mdb_headers]) - insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(table, - joined_headers, joined_placemarks) + insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format( + table, + joined_headers, + joined_placemarks, + ) s3db.executemany(insert_stmt, mdb_csv) s3db.commit() finally: diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index c8bf163..7fe2cb6 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -129,7 +129,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): assert_array_equal(mpr.get_flag("control changes"), mpt["control changes"]) if "Ns changes" in mpt.dtype.fields: assert_array_equal(mpr.get_flag("Ns changes"), mpt["Ns changes"]) - ## Nothing uses the 0x40 bit of the flags + # Nothing uses the 0x40 bit of the flags assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."]) assert_array_almost_equal(mpr.data["time/s"], @@ -146,10 +146,10 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): assert_field_matches("dQ/mA.h", decimal=17) # 64 bit float precision assert_field_matches("P/W", decimal=10) # 32 bit float precision for 1.xxE-5 assert_field_matches("I/mA", decimal=6) # 32 bit float precision - + assert_field_exact("cycle number") assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision - + try: assert timestamp_from_comments(comments) == mpr.timestamp except AttributeError: From aab135391a377c616e9886cc676f8bab05baf342 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Wed, 15 May 2019 07:09:11 +0200 Subject: [PATCH 47/65] Change max-line-length to 100 and refactor all longer lines --- galvani/res2sqlite.py | 16 +++++++++++----- tox.ini | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 793092a..f31193f 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -353,10 +353,14 @@ DROP TABLE capacity_helper; CREATE VIEW IF NOT EXISTS Capacity_View AS SELECT Test_ID, Data_Point, Test_Time, Step_Time, DateTime, Step_Index, Cycle_Index, Current, Voltage, "dV/dt", - Discharge_Capacity + Discharge_Capacity_Sum - Charge_Capacity - Charge_Capacity_Sum AS Net_Capacity, - Discharge_Capacity + Discharge_Capacity_Sum + Charge_Capacity + Charge_Capacity_Sum AS Gross_Capacity, - Discharge_Energy + Discharge_Energy_Sum - Charge_Energy - Charge_Energy_Sum AS Net_Energy, - Discharge_Energy + Discharge_Energy_Sum + Charge_Energy + Charge_Energy_Sum AS Gross_Energy + ( (Discharge_Capacity + Discharge_Capacity_Sum) + - (Charge_Capacity + Charge_Capacity_Sum) ) AS Net_Capacity, + ( (Discharge_Capacity + Discharge_Capacity_Sum) + + (Charge_Capacity + Charge_Capacity_Sum) ) AS Gross_Capacity, + ( (Discharge_Energy + Discharge_Energy_Sum) + - (Charge_Energy + Charge_Energy_Sum) ) AS Net_Energy, + ( (Discharge_Energy + Discharge_Energy_Sum) + + (Charge_Energy + Charge_Energy_Sum) ) AS Gross_Energy FROM Channel_Normal_Table NATURAL JOIN Capacity_Sum_Table; """ @@ -452,7 +456,9 @@ def convert_arbin_to_sqlite(input_file, output_file): def main(argv=None): - parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export") + parser = argparse.ArgumentParser( + description="Convert Arbin .res files to sqlite3 databases using mdb-export", + ) parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect diff --git a/tox.ini b/tox.ini index a6ded79..8ccfc09 100644 --- a/tox.ini +++ b/tox.ini @@ -10,3 +10,4 @@ commands = [flake8] exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__ +max-line-length = 100 From 0c0b48ddcc0f01534455f8fa840537b6a0ea8d05 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 2 Jun 2019 13:34:39 +0200 Subject: [PATCH 48/65] Release version 0.1.0 (#33) Update package URL to the new echemdata repo --- setup.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index d881fd3..226bb44 100644 --- a/setup.py +++ b/setup.py @@ -9,11 +9,11 @@ with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: setup( name='galvani', - version='0.0.2', + version='0.1.0', description='Open and process battery charger log data files', long_description=readme, long_description_content_type="text/markdown", - url='https://github.com/chatcannon/galvani', + url='https://github.com/echemdata/galvani', author='Chris Kerr', author_email='chris.kerr@mykolab.ch', license='GPLv3+', @@ -25,9 +25,11 @@ setup( 'Natural Language :: English', ], packages=['galvani'], - entry_points={'console_scripts': [ + entry_points={ + 'console_scripts': [ 'res2sqlite = galvani.res2sqlite:main', - ]}, + ], + }, install_requires=['numpy'], tests_require=['pytest'], ) From 72d79146e65b4f7fc598d78cb3df07824bd65b92 Mon Sep 17 00:00:00 2001 From: bcolsen Date: Tue, 8 Oct 2019 13:23:18 -0600 Subject: [PATCH 49/65] Added initial support for VMP data module version 3 --- galvani/BioLogic.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 4eeac7f..be2996f 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -180,6 +180,7 @@ VMPdata_colID_dtype_map = { 172: ('Cp/µF', '/V', ' Date: Sun, 16 Feb 2020 09:33:24 +0200 Subject: [PATCH 50/65] Remove Python 2 from tox and travis tests --- .travis.yml | 1 - tox.ini | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c4301f2..7f81f49 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,7 +6,6 @@ cache: - .pytest_cache - tests/testdata python: - - "2.7" - "3.5" # - "3.7" # Python 3.7 is not available on travis CI yet install: diff --git a/tox.ini b/tox.ini index 8ccfc09..5573e87 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py35,py37 +envlist = py35,py37 [testenv] deps = flake8 From 9ba43ecc2e48bbf8564a4e7006ee3095cc8032c5 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 16 Feb 2020 09:46:06 +0200 Subject: [PATCH 51/65] Add python_requires to setup.py --- setup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.py b/setup.py index 226bb44..9b3fa91 100644 --- a/setup.py +++ b/setup.py @@ -23,6 +23,8 @@ setup( 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Natural Language :: English', + 'Programming Language :: Python :: 3 :: Only', + 'Topic :: Scientific/Engineering :: Chemistry', ], packages=['galvani'], entry_points={ @@ -30,6 +32,7 @@ setup( 'res2sqlite = galvani.res2sqlite:main', ], }, + python_requires='>=3.5', install_requires=['numpy'], tests_require=['pytest'], ) From c2e7a1602f153cb08c908e96f10578bbce731bb3 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 16 Feb 2020 09:49:40 +0200 Subject: [PATCH 52/65] Remove maketrans compatibility code --- galvani/BioLogic.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index be2996f..3191c07 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -17,10 +17,8 @@ import numpy as np if sys.version_info.major <= 2: str3 = str - from string import maketrans else: str3 = functools.partial(str, encoding='ascii') - maketrans = bytes.maketrans def fieldname_to_dtype(fieldname): @@ -49,10 +47,10 @@ def fieldname_to_dtype(fieldname): raise ValueError("Invalid column header: %s" % fieldname) -def comma_converter(float_string): - """Convert numbers to floats whether the decimal point is '.' or ','""" - trans_table = maketrans(b',', b'.') - return float(float_string.translate(trans_table)) +def comma_converter(float_text): + """Convert text to float whether the decimal point is '.' or ','""" + trans_table = bytes.maketrans(b',', b'.') + return float(float_text.translate(trans_table)) def MPTfile(file_or_path): From 87825b78917b1a6c17f7722aec393cdda6678c97 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 16 Feb 2020 09:59:46 +0200 Subject: [PATCH 53/65] Remove str3 compatibility function --- galvani/BioLogic.py | 19 ++++++------------- tests/test_BioLogic.py | 4 ++-- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 3191c07..58c7b51 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -10,17 +10,10 @@ from os import SEEK_SET import time from datetime import date, datetime, timedelta from collections import defaultdict, OrderedDict -import functools import numpy as np -if sys.version_info.major <= 2: - str3 = str -else: - str3 = functools.partial(str, encoding='ascii') - - def fieldname_to_dtype(fieldname): """Converts a column header from the MPT file into a tuple of canonical name and appropriate numpy dtype""" @@ -53,7 +46,7 @@ def comma_converter(float_text): return float(float_text.translate(trans_table)) -def MPTfile(file_or_path): +def MPTfile(file_or_path, encoding='ascii'): """Opens .mpt files as numpy record arrays Checks for the correct headings, skips any comments and returns a @@ -80,7 +73,7 @@ def MPTfile(file_or_path): # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] - fieldnames = str3(next(mpt_file)).strip().split('\t') + fieldnames = next(mpt_file).decode(encoding).strip().split('\t') record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames))) # Must be able to parse files where commas are used for decimal points @@ -356,9 +349,9 @@ class MPRfile: self.npts = n_data_points try: - tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y') + tm = time.strptime(settings_mod['date'].decode('ascii'), '%m/%d/%y') except ValueError: - tm = time.strptime(str3(settings_mod['date']), '%m-%d-%y') + tm = time.strptime(settings_mod['date'].decode('ascii'), '%m-%d-%y') self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday) if maybe_loop_module: @@ -374,9 +367,9 @@ class MPRfile: if maybe_log_module: log_module, = maybe_log_module try: - tm = time.strptime(str3(log_module['date']), '%m/%d/%y') + tm = time.strptime(log_module['date'].decode('ascii'), '%m/%d/%y') except ValueError: - tm = time.strptime(str3(log_module['date']), '%m-%d-%y') + tm = time.strptime(log_module['date'].decode('ascii'), '%m-%d-%y') self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday) # There is a timestamp at either 465 or 469 bytes diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index ac0694b..4ee5f94 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -9,7 +9,7 @@ from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest from galvani import BioLogic, MPTfile, MPRfile -from galvani.BioLogic import MPTfileCSV, str3 # not exported +from galvani.BioLogic import MPTfileCSV # not exported def test_open_MPT(testdata_dir): @@ -103,7 +103,7 @@ def timestamp_from_comments(comments): for line in comments: time_match = re.match(b'Acquisition started on : ([0-9/]+ [0-9:]+)', line) if time_match: - timestamp = datetime.strptime(str3(time_match.group(1)), + timestamp = datetime.strptime(time_match.group(1).decode('ascii'), '%m/%d/%Y %H:%M:%S') return timestamp raise AttributeError("No timestamp in comments") From 599413c42f494345ee1609557c91f97202a77f11 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 16 Feb 2020 10:01:53 +0200 Subject: [PATCH 54/65] Remove remaining Python 2 compatibility from BioLogic code --- galvani/BioLogic.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 58c7b51..29e50f0 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -3,7 +3,6 @@ __all__ = ['MPTfileCSV', 'MPTfile'] -import sys import re import csv from os import SEEK_SET @@ -62,8 +61,7 @@ def MPTfile(file_or_path, encoding='ascii'): if magic != b'EC-Lab ASCII FILE\r\n': raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) - # TODO use rb'string' here once Python 2 is no longer supported - nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', + nb_headers_match = re.match(rb'Nb header lines : (\d+)\s*$', next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: @@ -333,10 +331,7 @@ class MPRfile: raise ValueError("Unrecognised version for data module: %d" % data_module['version']) - if sys.version_info.major <= 2: - assert(all((b == '\x00' for b in remaining_headers))) - else: - assert(not any(remaining_headers)) + assert(not any(remaining_headers)) self.dtype, self.flags_dict = VMPdata_dtype_from_colIDs(column_types) self.data = np.frombuffer(main_data, dtype=self.dtype) From b2fb092ea3246ebb70d367f4b144dc4b61ef3599 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 16 Feb 2020 14:28:57 +0200 Subject: [PATCH 55/65] Remove compability code for Python 2 subprocess module --- galvani/res2sqlite.py | 65 ++++++++++++++++++++----------------------- tests/test_Arbin.py | 4 +-- 2 files changed, 32 insertions(+), 37 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index a72fa84..eff978a 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -371,27 +371,28 @@ def mdb_get_data_text(s3db, filename, table): r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', re.IGNORECASE ) - # TODO after dropping Python 2 support - use Popen as contextmanager try: - mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], - bufsize=-1, stdin=None, stdout=sp.PIPE, - universal_newlines=True) + # Initialize values to avoid NameError in except clause + mdb_output = '' + insert_match = None + with sp.Popen(['mdb-export', '-I', 'postgres', filename, table], + bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE, + universal_newlines=True) as mdb_sql: + + mdb_output = mdb_sql.stdout.read() + while len(mdb_output) > 0: + insert_match = insert_pattern.match(mdb_output) + s3db.execute(insert_match.group()) + mdb_output = mdb_output[insert_match.end():] + mdb_output += mdb_sql.stdout.read() + s3db.commit() + except OSError as e: if e.errno == 2: raise RuntimeError('Could not locate the `mdb-export` executable. ' 'Check that mdbtools is properly installed.') else: raise - try: - # Initialize values to avoid NameError in except clause - mdb_output = '' - insert_match = None - mdb_output = mdb_sql.stdout.read() - while len(mdb_output) > 0: - insert_match = insert_pattern.match(mdb_output) - s3db.execute(insert_match.group()) - mdb_output = mdb_output[insert_match.end():] - s3db.commit() except BaseException: print("Error while importing %s" % table) if mdb_output: @@ -399,38 +400,32 @@ def mdb_get_data_text(s3db, filename, table): if insert_match: print("insert_re match:", insert_match) raise - finally: - mdb_sql.terminate() def mdb_get_data_numeric(s3db, filename, table): print("Reading %s..." % table) - # TODO after dropping Python 2 support - use Popen as contextmanager try: - mdb_sql = sp.Popen(['mdb-export', filename, table], - bufsize=-1, stdin=None, stdout=sp.PIPE, - universal_newlines=True) + with sp.Popen(['mdb-export', filename, table], + bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE, + universal_newlines=True) as mdb_sql: + mdb_csv = csv.reader(mdb_sql.stdout) + mdb_headers = next(mdb_csv) + quoted_headers = ['"%s"' % h for h in mdb_headers] + joined_headers = ', '.join(quoted_headers) + joined_placemarks = ', '.join(['?' for h in mdb_headers]) + insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format( + table, + joined_headers, + joined_placemarks, + ) + s3db.executemany(insert_stmt, mdb_csv) + s3db.commit() except OSError as e: if e.errno == 2: raise RuntimeError('Could not locate the `mdb-export` executable. ' 'Check that mdbtools is properly installed.') else: raise - try: - mdb_csv = csv.reader(mdb_sql.stdout) - mdb_headers = next(mdb_csv) - quoted_headers = ['"%s"' % h for h in mdb_headers] - joined_headers = ', '.join(quoted_headers) - joined_placemarks = ', '.join(['?' for h in mdb_headers]) - insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format( - table, - joined_headers, - joined_placemarks, - ) - s3db.executemany(insert_stmt, mdb_csv) - s3db.commit() - finally: - mdb_sql.terminate() def mdb_get_data(s3db, filename, table): diff --git a/tests/test_Arbin.py b/tests/test_Arbin.py index f21bd50..c2210eb 100644 --- a/tests/test_Arbin.py +++ b/tests/test_Arbin.py @@ -9,8 +9,8 @@ import pytest from galvani import res2sqlite -# TODO - change to subprocess.DEVNULL when python 2 support is removed -have_mdbtools = (subprocess.call(['which', 'mdb-export'], stdout=None) == 0) +have_mdbtools = (subprocess.call(['which', 'mdb-export'], + stdout=subprocess.DEVNULL) == 0) def test_res2sqlite_help(): From 81fbb3dde366ca0a6acb1aa06388d0049d6bad18 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 16 Feb 2020 14:35:34 +0200 Subject: [PATCH 56/65] Test with all Python versions 3.5-3.8 --- .travis.yml | 4 +++- tox.ini | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7f81f49..8f313d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,9 @@ cache: - tests/testdata python: - "3.5" -# - "3.7" # Python 3.7 is not available on travis CI yet + - "3.6" + - "3.7" + - "3.8" install: - pip install tox-travis - sh get_testdata.sh diff --git a/tox.ini b/tox.ini index 5573e87..591010a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py35,py37 +envlist = py35,py36,py37,py38 [testenv] deps = flake8 From c703f31da271c52b64f787a21dbbbfa5f498828d Mon Sep 17 00:00:00 2001 From: nhshetty-99 <61260096+nhshetty-99@users.noreply.github.com> Date: Sat, 27 Jun 2020 22:40:31 -0400 Subject: [PATCH 57/65] Added colIDs 74 and 462 to VMPdata_colID_dtype_map --- galvani/BioLogic.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index be2996f..c67a2d6 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -176,6 +176,8 @@ VMPdata_colID_dtype_map = { 125: ('Capacitance charge/µF', ' Date: Sun, 28 Jun 2020 13:06:11 -0400 Subject: [PATCH 58/65] Changed colID 74 and 462 order from original addition --- galvani/BioLogic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index c67a2d6..455a4e7 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -169,6 +169,7 @@ VMPdata_colID_dtype_map = { 38: ('-Im(Z)/Ohm', '/mA', '/V', '/V', ' Date: Fri, 3 Jul 2020 14:15:37 -0400 Subject: [PATCH 59/65] Added column 469 to BioLogic.py --- galvani/BioLogic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 455a4e7..0696058 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -185,6 +185,7 @@ VMPdata_colID_dtype_map = { 462: ('Temperature/°C', ' Date: Mon, 21 Oct 2019 15:28:06 -0600 Subject: [PATCH 60/65] Added RCMP column and column debug info --- galvani/BioLogic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 3bffec9..bb36234 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -166,6 +166,7 @@ VMPdata_colID_dtype_map = { 125: ('Capacitance charge/µF', ' Date: Tue, 7 Jul 2020 17:54:21 -0600 Subject: [PATCH 61/65] changed error to report previous column name --- galvani/BioLogic.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index bb36234..1758ab5 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -237,10 +237,8 @@ def VMPdata_dtype_from_colIDs(colIDs): unique_field_name = field_name type_list.append((unique_field_name, field_type)) else: - print('ColIDs: ', - [colID for colID in colIDs if colID not in VMPdata_colID_flag_map]) - print('Types: ', type_list[1:]) - raise NotImplementedError("column type %d not implemented" % colID) + raise NotImplementedError("Column ID {cid} after column {prev} " + "is unknown".format(cid=colID, prev=type_list[-1][0])) return np.dtype(type_list), flags_dict From 18e8a450fa5e8ea95c53cf80c945114f84a8191b Mon Sep 17 00:00:00 2001 From: bcolsen Date: Tue, 7 Jul 2020 18:07:30 -0600 Subject: [PATCH 62/65] pep8 fix --- galvani/BioLogic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 1758ab5..0a0db5f 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -238,7 +238,7 @@ def VMPdata_dtype_from_colIDs(colIDs): type_list.append((unique_field_name, field_type)) else: raise NotImplementedError("Column ID {cid} after column {prev} " - "is unknown".format(cid=colID, prev=type_list[-1][0])) + "is unknown".format(cid=colID, prev=type_list[-1][0])) return np.dtype(type_list), flags_dict From 18a1ce6848e342b6275bde7a7d4ecbe1373b4cc9 Mon Sep 17 00:00:00 2001 From: bcolsen Date: Tue, 7 Jul 2020 18:13:53 -0600 Subject: [PATCH 63/65] fix pep --- galvani/BioLogic.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 0a0db5f..3654b02 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -238,7 +238,9 @@ def VMPdata_dtype_from_colIDs(colIDs): type_list.append((unique_field_name, field_type)) else: raise NotImplementedError("Column ID {cid} after column {prev} " - "is unknown".format(cid=colID, prev=type_list[-1][0])) + "is unknown" + .format(cid=colID, + prev=type_list[-1][0])) return np.dtype(type_list), flags_dict From 0c5348deebf04e6a4bcb67a7f644a795fe107603 Mon Sep 17 00:00:00 2001 From: Peter Attia Date: Mon, 14 Sep 2020 20:52:30 -0400 Subject: [PATCH 64/65] Update BioLogic.py Add more impedance-related entries to `VMPdata_colID_dtype_map` --- galvani/BioLogic.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 3654b02..5e90f19 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -161,6 +161,11 @@ VMPdata_colID_dtype_map = { 74: ('Energy/W.h', '/mA', '/V', '/V', '/V', ' Date: Thu, 8 Oct 2020 09:12:38 -0700 Subject: [PATCH 65/65] Update README.md Show quick example of how to use --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index c31ed89..d3f2eaf 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,14 @@ Read proprietary file formats from electrochemical test stations Use the `MPRfile` class from BioLogic.py (exported in the main package) +```` +from galvani import BioLogic +import pandas as pd + +mpr_file = BioLogic.MPRfile('test.mpr') +df = pd.DataFrame(mpr_file.data) +```` + ## Arbin .res files ## Use the res2sqlite.py script to convert the .res file to a sqlite3 database