Merge pull request #32 from chatcannon/flake8

Add code style checking with flake8
This commit is contained in:
2019-05-16 17:25:36 +02:00
committed by GitHub
6 changed files with 106 additions and 66 deletions

2
.flake8 Normal file
View File

@@ -0,0 +1,2 @@
# This file will be ignored - see http://flake8.pycqa.org/en/2.6.0/config.html#per-project
# Edit the [flake8] section in tox.ini instead

View File

@@ -10,6 +10,7 @@ from os import SEEK_SET
import time
from datetime import date, datetime, timedelta
from collections import defaultdict, OrderedDict
import functools
import numpy as np
@@ -18,7 +19,7 @@ if sys.version_info.major <= 2:
str3 = str
from string import maketrans
else:
str3 = lambda b: str(b, encoding='ascii')
str3 = functools.partial(str, encoding='ascii')
maketrans = bytes.maketrans
@@ -71,19 +72,20 @@ def MPTfile(file_or_path):
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
# TODO use rb'string' here once Python 2 is no longer supported
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file))
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$',
next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
# The 'magic number' line, the 'Nb headers' line and the column headers
# make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
## Must be able to parse files where commas are used for decimal points
# Must be able to parse files where commas are used for decimal points
converter_dict = dict(((i, comma_converter)
for i in range(len(fieldnames))))
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
@@ -113,8 +115,8 @@ def MPTfileCSV(file_or_path):
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
## The 'magic number' line, the 'Nb headers' line and the column headers
## make three lines. Every additional line is a comment line.
# The 'magic number' line, the 'Nb headers' line and the column headers
# make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)]
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
@@ -256,7 +258,8 @@ def read_VMP_modules(fileobj, read_module_data=True):
if len(module_magic) == 0: # end of file
break
elif module_magic != b'MODULE':
raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic)
raise ValueError("Found %r, expecting start of new VMP MODULE"
% module_magic)
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
@@ -280,6 +283,9 @@ def read_VMP_modules(fileobj, read_module_data=True):
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET)
MPR_MAGIC = b'BIO-LOGIC MODULAR FILE\x1a'.ljust(48) + b'\x00\x00\x00\x00'
class MPRfile:
"""Bio-Logic .mpr file
@@ -302,10 +308,8 @@ class MPRfile:
mpr_file = open(file_or_path, 'rb')
else:
mpr_file = file_or_path
mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00'
magic = mpr_file.read(len(mpr_magic))
if magic != mpr_magic:
magic = mpr_file.read(len(MPR_MAGIC))
if magic != MPR_MAGIC:
raise ValueError('Invalid magic for .mpr file: %s' % magic)
modules = list(read_VMP_modules(mpr_file))
@@ -326,7 +330,7 @@ class MPRfile:
elif data_module['version'] == 2:
column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
count=n_columns)
## There is 405 bytes of data before the main array starts
# There is 405 bytes of data before the main array starts
remaining_headers = data_module['data'][5 + 2 * n_columns:405]
main_data = data_module['data'][405:]
else:
@@ -342,8 +346,8 @@ class MPRfile:
self.data = np.frombuffer(main_data, dtype=self.dtype)
assert(self.data.shape[0] == n_data_points)
## No idea what these 'column types' mean or even if they are actually
## column types at all
# No idea what these 'column types' mean or even if they are actually
# column types at all
self.version = int(data_module['version'])
self.cols = column_types
self.npts = n_data_points
@@ -372,9 +376,9 @@ class MPRfile:
tm = time.strptime(str3(log_module['date']), '%m-%d-%y')
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
## There is a timestamp at either 465 or 469 bytes
## I can't find any reason why it is one or the other in any
## given file
# There is a timestamp at either 465 or 469 bytes
# I can't find any reason why it is one or the other in any
# given file
ole_timestamp1 = np.frombuffer(log_module['data'][465:],
dtype='<f8', count=1)
ole_timestamp2 = np.frombuffer(log_module['data'][469:],
@@ -400,10 +404,10 @@ class MPRfile:
ole_timedelta = timedelta(days=ole_timestamp[0])
self.timestamp = ole_base + ole_timedelta
if self.startdate != self.timestamp.date():
raise ValueError("""Date mismatch:
Start date: %s
End date: %s
Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp))
raise ValueError("Date mismatch:\n"
+ " Start date: %s\n" % self.startdate
+ " End date: %s\n" % self.enddate
+ " Timestamp: %s\n" % self.timestamp)
def get_flag(self, flagname):
if flagname in self.flags_dict:

View File

@@ -1 +1,3 @@
from .BioLogic import MPTfile, MPRfile
from .BioLogic import MPRfile, MPTfile
__all__ = ['MPRfile', 'MPTfile']

View File

@@ -7,8 +7,8 @@ import csv
import argparse
## The following scripts are adapted from the result of running
## $ mdb-schema <result.res> oracle
# The following scripts are adapted from the result of running
# $ mdb-schema <result.res> oracle
mdb_tables = ["Version_Table", "Global_Table", "Resume_Table",
"Channel_Normal_Table", "Channel_Statistic_Table",
@@ -126,7 +126,8 @@ CREATE TABLE Channel_Statistic_Table
-- Version 1.14 ends here, version 5.23 continues
Charge_Time REAL DEFAULT NULL,
Discharge_Time REAL DEFAULT NULL,
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
); """,
"Auxiliary_Table": """
CREATE TABLE Auxiliary_Table
@@ -137,7 +138,8 @@ CREATE TABLE Auxiliary_Table
Data_Type INTEGER,
X REAL,
"dX/dt" REAL,
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
); """,
"Event_Table": """
CREATE TABLE Event_Table
@@ -220,9 +222,10 @@ CREATE TABLE Smart_Battery_Data_Table
ChargingCurrent REAL DEFAULT NULL,
ChargingVoltage REAL DEFAULT NULL,
ManufacturerData REAL DEFAULT NULL,
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
); """,
## The following tables are not present in version 1.14
# The following tables are not present in version 1.14
'MCell_Aci_Data_Table': """
CREATE TABLE MCell_Aci_Data_Table
(
@@ -233,7 +236,8 @@ CREATE TABLE MCell_Aci_Data_Table
Phase_Shift REAL,
Voltage REAL,
Current REAL,
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
);""",
'Aux_Global_Data_Table': """
CREATE TABLE Aux_Global_Data_Table
@@ -288,7 +292,8 @@ CREATE TABLE Smart_Battery_Clock_Stretch_Table
VCELL3 INTEGER,
VCELL2 INTEGER,
VCELL1 INTEGER,
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
);"""}
mdb_create_indices = {
@@ -306,11 +311,14 @@ CREATE TEMPORARY TABLE capacity_helper(
Discharge_Capacity REAL NOT NULL,
Charge_Energy REAL NOT NULL,
Discharge_Energy REAL NOT NULL,
FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
FOREIGN KEY (Test_ID, Cycle_Index)
REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
);
INSERT INTO capacity_helper
SELECT Test_ID, Cycle_Index, max(Charge_Capacity), max(Discharge_Capacity), max(Charge_Energy), max(Discharge_Energy)
SELECT Test_ID, Cycle_Index,
max(Charge_Capacity), max(Discharge_Capacity),
max(Charge_Energy), max(Discharge_Energy)
FROM Channel_Normal_Table
GROUP BY Test_ID, Cycle_Index;
@@ -328,11 +336,14 @@ CREATE TABLE Capacity_Sum_Table(
Discharge_Capacity_Sum REAL NOT NULL,
Charge_Energy_Sum REAL NOT NULL,
Discharge_Energy_Sum REAL NOT NULL,
FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
FOREIGN KEY (Test_ID, Cycle_Index)
REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
);
INSERT INTO Capacity_Sum_Table
SELECT a.Test_ID, a.Cycle_Index, total(b.Charge_Capacity), total(b.Discharge_Capacity), total(b.Charge_Energy), total(b.Discharge_Energy)
SELECT a.Test_ID, a.Cycle_Index,
total(b.Charge_Capacity), total(b.Discharge_Capacity),
total(b.Charge_Energy), total(b.Discharge_Energy)
FROM capacity_helper AS a LEFT JOIN capacity_helper AS b
ON (a.Test_ID = b.Test_ID AND a.Cycle_Index > b.Cycle_Index)
GROUP BY a.Test_ID, a.Cycle_Index;
@@ -342,28 +353,35 @@ DROP TABLE capacity_helper;
CREATE VIEW IF NOT EXISTS Capacity_View
AS SELECT Test_ID, Data_Point, Test_Time, Step_Time, DateTime,
Step_Index, Cycle_Index, Current, Voltage, "dV/dt",
Discharge_Capacity + Discharge_Capacity_Sum - Charge_Capacity - Charge_Capacity_Sum AS Net_Capacity,
Discharge_Capacity + Discharge_Capacity_Sum + Charge_Capacity + Charge_Capacity_Sum AS Gross_Capacity,
Discharge_Energy + Discharge_Energy_Sum - Charge_Energy - Charge_Energy_Sum AS Net_Energy,
Discharge_Energy + Discharge_Energy_Sum + Charge_Energy + Charge_Energy_Sum AS Gross_Energy
( (Discharge_Capacity + Discharge_Capacity_Sum)
- (Charge_Capacity + Charge_Capacity_Sum) ) AS Net_Capacity,
( (Discharge_Capacity + Discharge_Capacity_Sum)
+ (Charge_Capacity + Charge_Capacity_Sum) ) AS Gross_Capacity,
( (Discharge_Energy + Discharge_Energy_Sum)
- (Charge_Energy + Charge_Energy_Sum) ) AS Net_Energy,
( (Discharge_Energy + Discharge_Energy_Sum)
+ (Charge_Energy + Charge_Energy_Sum) ) AS Gross_Energy
FROM Channel_Normal_Table NATURAL JOIN Capacity_Sum_Table;
"""
def mdb_get_data_text(s3db, filename, table):
print("Reading %s..." % table)
insert_pattern = re.compile(
r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
re.IGNORECASE
)
try:
mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
bufsize=-1, stdin=None, stdout=sp.PIPE,
universal_newlines=True)
mdb_output = mdb_sql.stdout.read()
while len(mdb_output) > 0:
insert_match = re.match(r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
mdb_output, re.IGNORECASE)
insert_match = insert_pattern.match(mdb_output)
s3db.execute(insert_match.group())
mdb_output = mdb_output[insert_match.end():]
s3db.commit()
except:
except BaseException:
print("Error while importing %s" % table)
print("Remaining mdb-export output:", mdb_output)
if insert_match:
@@ -384,8 +402,11 @@ def mdb_get_data_numeric(s3db, filename, table):
quoted_headers = ['"%s"' % h for h in mdb_headers]
joined_headers = ', '.join(quoted_headers)
joined_placemarks = ', '.join(['?' for h in mdb_headers])
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(table,
joined_headers, joined_placemarks)
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(
table,
joined_headers,
joined_placemarks,
)
s3db.executemany(insert_stmt, mdb_csv)
s3db.commit()
finally:
@@ -408,7 +429,6 @@ def convert_arbin_to_sqlite(input_file, output_file):
"""
s3db = sqlite3.connect(output_file)
for table in reversed(mdb_tables + mdb_5_23_tables):
s3db.execute('DROP TABLE IF EXISTS "%s";' % table)
@@ -419,7 +439,9 @@ def convert_arbin_to_sqlite(input_file, output_file):
print("Creating indices for %s..." % table)
s3db.executescript(mdb_create_indices[table])
if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"):
csr = s3db.execute("SELECT Version_Schema_Field FROM Version_Table;")
version_text, = csr.fetchone()
if (version_text == "Results File 5.23"):
for table in mdb_5_23_tables:
s3db.executescript(mdb_create_scripts[table])
mdb_get_data(input_file, table)
@@ -434,7 +456,9 @@ def convert_arbin_to_sqlite(input_file, output_file):
def main(argv=None):
parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export")
parser = argparse.ArgumentParser(
description="Convert Arbin .res files to sqlite3 databases using mdb-export",
)
parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen
parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect

View File

@@ -129,7 +129,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
assert_array_equal(mpr.get_flag("control changes"), mpt["control changes"])
if "Ns changes" in mpt.dtype.fields:
assert_array_equal(mpr.get_flag("Ns changes"), mpt["Ns changes"])
## Nothing uses the 0x40 bit of the flags
# Nothing uses the 0x40 bit of the flags
assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."])
assert_array_almost_equal(mpr.data["time/s"],

12
tox.ini
View File

@@ -1,5 +1,13 @@
[tox]
envlist = py27,py35,py37
[testenv]
deps=pytest
commands=pytest
deps =
flake8
pytest
commands =
flake8
pytest
[flake8]
exclude = build,dist,*.egg-info,.cache,.git,.tox,__pycache__
max-line-length = 100