From f2b62265b97d19ed6bfb3f788d24ffc095595453 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Fri, 3 May 2019 20:30:12 +0200 Subject: [PATCH 1/8] Fixed some flake8 warnings in BioLogic.py --- galvani/BioLogic.py | 47 ++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 78229d7..da3519c 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -71,19 +71,20 @@ def MPTfile(file_or_path): raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) # TODO use rb'string' here once Python 2 is no longer supported - nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file)) + nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', + next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) - ## The 'magic number' line, the 'Nb headers' line and the column headers - ## make three lines. Every additional line is a comment line. + # The 'magic number' line, the 'Nb headers' line and the column headers + # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] fieldnames = str3(next(mpt_file)).strip().split('\t') record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames))) - ## Must be able to parse files where commas are used for decimal points + # Must be able to parse files where commas are used for decimal points converter_dict = dict(((i, comma_converter) for i in range(len(fieldnames)))) mpt_array = np.loadtxt(mpt_file, dtype=record_type, @@ -113,8 +114,8 @@ def MPTfileCSV(file_or_path): if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) - ## The 'magic number' line, the 'Nb headers' line and the column headers - ## make three lines. Every additional line is a comment line. + # The 'magic number' line, the 'Nb headers' line and the column headers + # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab') @@ -270,7 +271,8 @@ def read_VMP_modules(fileobj, read_module_data=True): if len(module_magic) == 0: # end of file break elif module_magic != b'MODULE': - raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic) + raise ValueError("Found %r, expecting start of new VMP MODULE" + % module_magic) hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize) if len(hdr_bytes) < VMPmodule_hdr.itemsize: @@ -294,6 +296,9 @@ def read_VMP_modules(fileobj, read_module_data=True): fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET) +MPR_MAGIC = b'BIO-LOGIC MODULAR FILE\x1a'.ljust(48) + b'\x00\x00\x00\x00' + + class MPRfile: """Bio-Logic .mpr file @@ -316,10 +321,8 @@ class MPRfile: mpr_file = open(file_or_path, 'rb') else: mpr_file = file_or_path - - mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00' - magic = mpr_file.read(len(mpr_magic)) - if magic != mpr_magic: + magic = mpr_file.read(len(MPR_MAGIC)) + if magic != MPR_MAGIC: raise ValueError('Invalid magic for .mpr file: %s' % magic) modules = list(read_VMP_modules(mpr_file)) @@ -340,7 +343,7 @@ class MPRfile: elif data_module['version'] == 2: column_types = np.frombuffer(data_module['data'][5:], dtype=' 40000 and ole_timestamp4 < 50000: ole_timestamp = ole_timestamp4 - + else: raise ValueError("Could not find timestamp in the LOG module") @@ -414,10 +417,10 @@ class MPRfile: ole_timedelta = timedelta(days=ole_timestamp[0]) self.timestamp = ole_base + ole_timedelta if self.startdate != self.timestamp.date(): - raise ValueError("""Date mismatch: - Start date: %s - End date: %s - Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp)) + raise ValueError("Date mismatch:\n" + + " Start date: %s\n" % self.startdate + + " End date: %s\n" % self.enddate + + " Timestamp: %s\n" % self.timestamp) def get_flag(self, flagname): if flagname in self.flags_dict: From 2a36713b064777e5ca94975c5041bf447abf3f66 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Fri, 3 May 2019 20:37:27 +0200 Subject: [PATCH 2/8] Fixed some flake8 warnings in res2sqlite.py --- galvani/res2sqlite.py | 71 ++++++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 0234a7b..843129c 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -7,8 +7,8 @@ import csv import argparse -## The following scripts are adapted from the result of running -## $ mdb-schema oracle +# The following scripts are adapted from the result of running +# $ mdb-schema oracle mdb_tables = ["Version_Table", "Global_Table", "Resume_Table", "Channel_Normal_Table", "Channel_Statistic_Table", @@ -126,7 +126,8 @@ CREATE TABLE Channel_Statistic_Table -- Version 1.14 ends here, version 5.23 continues Charge_Time REAL DEFAULT NULL, Discharge_Time REAL DEFAULT NULL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, "Auxiliary_Table": """ CREATE TABLE Auxiliary_Table @@ -137,7 +138,8 @@ CREATE TABLE Auxiliary_Table Data_Type INTEGER, X REAL, "dX/dt" REAL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, "Event_Table": """ CREATE TABLE Event_Table @@ -220,7 +222,8 @@ CREATE TABLE Smart_Battery_Data_Table ChargingCurrent REAL DEFAULT NULL, ChargingVoltage REAL DEFAULT NULL, ManufacturerData REAL DEFAULT NULL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, ## The following tables are not present in version 1.14 'MCell_Aci_Data_Table': """ @@ -233,7 +236,8 @@ CREATE TABLE MCell_Aci_Data_Table Phase_Shift REAL, Voltage REAL, Current REAL, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) );""", 'Aux_Global_Data_Table': """ CREATE TABLE Aux_Global_Data_Table @@ -288,7 +292,8 @@ CREATE TABLE Smart_Battery_Clock_Stretch_Table VCELL3 INTEGER, VCELL2 INTEGER, VCELL1 INTEGER, - FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) + FOREIGN KEY (Test_ID, Data_Point) + REFERENCES Channel_Normal_Table (Test_ID, Data_Point) );"""} mdb_create_indices = { @@ -306,18 +311,21 @@ CREATE TEMPORARY TABLE capacity_helper( Discharge_Capacity REAL NOT NULL, Charge_Energy REAL NOT NULL, Discharge_Energy REAL NOT NULL, - FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) + FOREIGN KEY (Test_ID, Cycle_Index) + REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) ); -INSERT INTO capacity_helper - SELECT Test_ID, Cycle_Index, max(Charge_Capacity), max(Discharge_Capacity), max(Charge_Energy), max(Discharge_Energy) - FROM Channel_Normal_Table +INSERT INTO capacity_helper + SELECT Test_ID, Cycle_Index, + max(Charge_Capacity), max(Discharge_Capacity), + max(Charge_Energy), max(Discharge_Energy) + FROM Channel_Normal_Table GROUP BY Test_ID, Cycle_Index; --- ## Alternative way of selecting ## --- select * --- from Channel_Normal_Table as a join Channel_Normal_Table as b --- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1 +-- ## Alternative way of selecting ## +-- select * +-- from Channel_Normal_Table as a join Channel_Normal_Table as b +-- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1 -- and a.Charge_Capacity < b.Charge_Capacity); DROP TABLE IF EXISTS Capacity_Sum_Table; @@ -328,12 +336,15 @@ CREATE TABLE Capacity_Sum_Table( Discharge_Capacity_Sum REAL NOT NULL, Charge_Energy_Sum REAL NOT NULL, Discharge_Energy_Sum REAL NOT NULL, - FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) + FOREIGN KEY (Test_ID, Cycle_Index) + REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index) ); -INSERT INTO Capacity_Sum_Table - SELECT a.Test_ID, a.Cycle_Index, total(b.Charge_Capacity), total(b.Discharge_Capacity), total(b.Charge_Energy), total(b.Discharge_Energy) - FROM capacity_helper AS a LEFT JOIN capacity_helper AS b +INSERT INTO Capacity_Sum_Table + SELECT a.Test_ID, a.Cycle_Index, + total(b.Charge_Capacity), total(b.Discharge_Capacity), + total(b.Charge_Energy), total(b.Discharge_Energy) + FROM capacity_helper AS a LEFT JOIN capacity_helper AS b ON (a.Test_ID = b.Test_ID AND a.Cycle_Index > b.Cycle_Index) GROUP BY a.Test_ID, a.Cycle_Index; @@ -352,14 +363,17 @@ CREATE VIEW IF NOT EXISTS Capacity_View def mdb_get_data_text(s3db, filename, table): print("Reading %s..." % table) + insert_pattern = re.compile( + r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', + re.IGNORECASE + ) try: mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table], bufsize=-1, stdin=None, stdout=sp.PIPE, universal_newlines=True) mdb_output = mdb_sql.stdout.read() while len(mdb_output) > 0: - insert_match = re.match(r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', - mdb_output, re.IGNORECASE) + insert_match = insert_pattern.match(mdb_output) s3db.execute(insert_match.group()) mdb_output = mdb_output[insert_match.end():] s3db.commit() @@ -407,28 +421,29 @@ def convert_arbin_to_sqlite(input_file, output_file): Any data currently in the sqlite file will be erased! """ s3db = sqlite3.connect(output_file) - - + for table in reversed(mdb_tables + mdb_5_23_tables): s3db.execute('DROP TABLE IF EXISTS "%s";' % table) - + for table in mdb_tables: s3db.executescript(mdb_create_scripts[table]) mdb_get_data(s3db, input_file, table) if table in mdb_create_indices: print("Creating indices for %s..." % table) s3db.executescript(mdb_create_indices[table]) - - if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"): + + csr = s3db.execute("SELECT Version_Schema_Field FROM Version_Table;") + version_text, = csr.fetchone() + if (version_text == "Results File 5.23"): for table in mdb_5_23_tables: s3db.executescript(mdb_create_scripts[table]) mdb_get_data(input_file, table) if table in mdb_create_indices: s3db.executescript(mdb_create_indices[table]) - + print("Creating helper table for capacity and energy totals...") s3db.executescript(helper_table_script) - + print("Vacuuming database...") s3db.executescript("VACUUM; ANALYZE;") From 1c8335289acbcf85083eb808278dfc0943050884 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 08:33:00 +0200 Subject: [PATCH 3/8] Add flake8 configuration --- .flake8 | 2 ++ tox.ini | 3 +++ 2 files changed, 5 insertions(+) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..ac8f3fb --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +# This file will be ignored - see http://flake8.pycqa.org/en/2.6.0/config.html#per-project +# Edit the [flake8] section in tox.ini instead diff --git a/tox.ini b/tox.ini index ca5b805..1657e13 100644 --- a/tox.ini +++ b/tox.ini @@ -3,3 +3,6 @@ envlist = py27,py35,py37 [testenv] deps=pytest commands=pytest + +[flake8] +exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__ From ed43de132601b842cc09ad40801152fa8d2f52cd Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 08:35:41 +0200 Subject: [PATCH 4/8] Fix flake8 warnings on comments style --- galvani/res2sqlite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 843129c..1ad5314 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -225,7 +225,7 @@ CREATE TABLE Smart_Battery_Data_Table FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point) ); """, - ## The following tables are not present in version 1.14 + # The following tables are not present in version 1.14 'MCell_Aci_Data_Table': """ CREATE TABLE MCell_Aci_Data_Table ( From d137bfccef2ee8411fa3ff2cdfe8e02564aa2a61 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 09:10:41 +0200 Subject: [PATCH 5/8] Add flake8 to tox.ini --- tox.ini | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 1657e13..a6ded79 100644 --- a/tox.ini +++ b/tox.ini @@ -1,8 +1,12 @@ [tox] envlist = py27,py35,py37 [testenv] -deps=pytest -commands=pytest +deps = + flake8 + pytest +commands = + flake8 + pytest [flake8] exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__ From 3440047dc271369efcd4b8d3972a20afe4f1311d Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 08:44:53 +0200 Subject: [PATCH 6/8] Fixed flake8 warning about lambda assignment --- galvani/BioLogic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 4870330..4eeac7f 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -10,6 +10,7 @@ from os import SEEK_SET import time from datetime import date, datetime, timedelta from collections import defaultdict, OrderedDict +import functools import numpy as np @@ -18,7 +19,7 @@ if sys.version_info.major <= 2: str3 = str from string import maketrans else: - str3 = lambda b: str(b, encoding='ascii') + str3 = functools.partial(str, encoding='ascii') maketrans = bytes.maketrans From 8abab57c06ad18d5afaeaba54909841a6d266650 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Sun, 12 May 2019 09:20:33 +0200 Subject: [PATCH 7/8] Fixed some more flake8 warnings --- galvani/__init__.py | 4 +++- galvani/res2sqlite.py | 9 ++++++--- tests/test_BioLogic.py | 6 +++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/galvani/__init__.py b/galvani/__init__.py index 1949033..ce98e1d 100644 --- a/galvani/__init__.py +++ b/galvani/__init__.py @@ -1 +1,3 @@ -from .BioLogic import MPTfile, MPRfile +from .BioLogic import MPRfile, MPTfile + +__all__ = ['MPRfile', 'MPTfile'] diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 1ad5314..793092a 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -377,7 +377,7 @@ def mdb_get_data_text(s3db, filename, table): s3db.execute(insert_match.group()) mdb_output = mdb_output[insert_match.end():] s3db.commit() - except: + except BaseException: print("Error while importing %s" % table) print("Remaining mdb-export output:", mdb_output) if insert_match: @@ -398,8 +398,11 @@ def mdb_get_data_numeric(s3db, filename, table): quoted_headers = ['"%s"' % h for h in mdb_headers] joined_headers = ', '.join(quoted_headers) joined_placemarks = ', '.join(['?' for h in mdb_headers]) - insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(table, - joined_headers, joined_placemarks) + insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format( + table, + joined_headers, + joined_placemarks, + ) s3db.executemany(insert_stmt, mdb_csv) s3db.commit() finally: diff --git a/tests/test_BioLogic.py b/tests/test_BioLogic.py index c8bf163..7fe2cb6 100644 --- a/tests/test_BioLogic.py +++ b/tests/test_BioLogic.py @@ -129,7 +129,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): assert_array_equal(mpr.get_flag("control changes"), mpt["control changes"]) if "Ns changes" in mpt.dtype.fields: assert_array_equal(mpr.get_flag("Ns changes"), mpt["Ns changes"]) - ## Nothing uses the 0x40 bit of the flags + # Nothing uses the 0x40 bit of the flags assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."]) assert_array_almost_equal(mpr.data["time/s"], @@ -146,10 +146,10 @@ def assert_MPR_matches_MPT(mpr, mpt, comments): assert_field_matches("dQ/mA.h", decimal=17) # 64 bit float precision assert_field_matches("P/W", decimal=10) # 32 bit float precision for 1.xxE-5 assert_field_matches("I/mA", decimal=6) # 32 bit float precision - + assert_field_exact("cycle number") assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision - + try: assert timestamp_from_comments(comments) == mpr.timestamp except AttributeError: From aab135391a377c616e9886cc676f8bab05baf342 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Wed, 15 May 2019 07:09:11 +0200 Subject: [PATCH 8/8] Change max-line-length to 100 and refactor all longer lines --- galvani/res2sqlite.py | 16 +++++++++++----- tox.ini | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/galvani/res2sqlite.py b/galvani/res2sqlite.py index 793092a..f31193f 100755 --- a/galvani/res2sqlite.py +++ b/galvani/res2sqlite.py @@ -353,10 +353,14 @@ DROP TABLE capacity_helper; CREATE VIEW IF NOT EXISTS Capacity_View AS SELECT Test_ID, Data_Point, Test_Time, Step_Time, DateTime, Step_Index, Cycle_Index, Current, Voltage, "dV/dt", - Discharge_Capacity + Discharge_Capacity_Sum - Charge_Capacity - Charge_Capacity_Sum AS Net_Capacity, - Discharge_Capacity + Discharge_Capacity_Sum + Charge_Capacity + Charge_Capacity_Sum AS Gross_Capacity, - Discharge_Energy + Discharge_Energy_Sum - Charge_Energy - Charge_Energy_Sum AS Net_Energy, - Discharge_Energy + Discharge_Energy_Sum + Charge_Energy + Charge_Energy_Sum AS Gross_Energy + ( (Discharge_Capacity + Discharge_Capacity_Sum) + - (Charge_Capacity + Charge_Capacity_Sum) ) AS Net_Capacity, + ( (Discharge_Capacity + Discharge_Capacity_Sum) + + (Charge_Capacity + Charge_Capacity_Sum) ) AS Gross_Capacity, + ( (Discharge_Energy + Discharge_Energy_Sum) + - (Charge_Energy + Charge_Energy_Sum) ) AS Net_Energy, + ( (Discharge_Energy + Discharge_Energy_Sum) + + (Charge_Energy + Charge_Energy_Sum) ) AS Gross_Energy FROM Channel_Normal_Table NATURAL JOIN Capacity_Sum_Table; """ @@ -452,7 +456,9 @@ def convert_arbin_to_sqlite(input_file, output_file): def main(argv=None): - parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export") + parser = argparse.ArgumentParser( + description="Convert Arbin .res files to sqlite3 databases using mdb-export", + ) parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect diff --git a/tox.ini b/tox.ini index a6ded79..8ccfc09 100644 --- a/tox.ini +++ b/tox.ini @@ -10,3 +10,4 @@ commands = [flake8] exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__ +max-line-length = 100