1 Commits
0.2.0 ... 0.1.0

Author SHA1 Message Date
aa4ca29c22 Release version 0.1.0
Update package URL to the new echemdata repo
2019-06-02 13:26:20 +02:00
9 changed files with 107 additions and 260 deletions

View File

@@ -6,10 +6,9 @@ cache:
- .pytest_cache - .pytest_cache
- tests/testdata - tests/testdata
python: python:
- "3.6" - "2.7"
- "3.7" - "3.5"
- "3.8" # - "3.7" # Python 3.7 is not available on travis CI yet
- "3.9"
install: install:
- pip install tox-travis - pip install tox-travis
- sh get_testdata.sh - sh get_testdata.sh

View File

@@ -7,14 +7,6 @@ Read proprietary file formats from electrochemical test stations
Use the `MPRfile` class from BioLogic.py (exported in the main package) Use the `MPRfile` class from BioLogic.py (exported in the main package)
````
from galvani import BioLogic
import pandas as pd
mpr_file = BioLogic.MPRfile('test.mpr')
df = pd.DataFrame(mpr_file.data)
````
## Arbin .res files ## ## Arbin .res files ##
Use the res2sqlite.py script to convert the .res file to a sqlite3 database Use the res2sqlite.py script to convert the .res file to a sqlite3 database

View File

@@ -3,16 +3,26 @@
__all__ = ['MPTfileCSV', 'MPTfile'] __all__ = ['MPTfileCSV', 'MPTfile']
import sys
import re import re
import csv import csv
from os import SEEK_SET from os import SEEK_SET
import time import time
from datetime import date, datetime, timedelta from datetime import date, datetime, timedelta
from collections import defaultdict, OrderedDict from collections import defaultdict, OrderedDict
import functools
import numpy as np import numpy as np
if sys.version_info.major <= 2:
str3 = str
from string import maketrans
else:
str3 = functools.partial(str, encoding='ascii')
maketrans = bytes.maketrans
def fieldname_to_dtype(fieldname): def fieldname_to_dtype(fieldname):
"""Converts a column header from the MPT file into a tuple of """Converts a column header from the MPT file into a tuple of
canonical name and appropriate numpy dtype""" canonical name and appropriate numpy dtype"""
@@ -39,13 +49,13 @@ def fieldname_to_dtype(fieldname):
raise ValueError("Invalid column header: %s" % fieldname) raise ValueError("Invalid column header: %s" % fieldname)
def comma_converter(float_text): def comma_converter(float_string):
"""Convert text to float whether the decimal point is '.' or ','""" """Convert numbers to floats whether the decimal point is '.' or ','"""
trans_table = bytes.maketrans(b',', b'.') trans_table = maketrans(b',', b'.')
return float(float_text.translate(trans_table)) return float(float_string.translate(trans_table))
def MPTfile(file_or_path, encoding='ascii'): def MPTfile(file_or_path):
"""Opens .mpt files as numpy record arrays """Opens .mpt files as numpy record arrays
Checks for the correct headings, skips any comments and returns a Checks for the correct headings, skips any comments and returns a
@@ -61,7 +71,8 @@ def MPTfile(file_or_path, encoding='ascii'):
if magic != b'EC-Lab ASCII FILE\r\n': if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match(rb'Nb header lines : (\d+)\s*$', # TODO use rb'string' here once Python 2 is no longer supported
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$',
next(mpt_file)) next(mpt_file))
nb_headers = int(nb_headers_match.group(1)) nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3: if nb_headers < 3:
@@ -71,7 +82,7 @@ def MPTfile(file_or_path, encoding='ascii'):
# make three lines. Every additional line is a comment line. # make three lines. Every additional line is a comment line.
comments = [next(mpt_file) for i in range(nb_headers - 3)] comments = [next(mpt_file) for i in range(nb_headers - 3)]
fieldnames = next(mpt_file).decode(encoding).strip().split('\t') fieldnames = str3(next(mpt_file)).strip().split('\t')
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames))) record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
# Must be able to parse files where commas are used for decimal points # Must be able to parse files where commas are used for decimal points
@@ -145,7 +156,6 @@ VMPdata_colID_dtype_map = {
9: ('Ece/V', '<f4'), 9: ('Ece/V', '<f4'),
11: ('I/mA', '<f8'), 11: ('I/mA', '<f8'),
13: ('(Q-Qo)/mA.h', '<f8'), 13: ('(Q-Qo)/mA.h', '<f8'),
16: ('Analog IN 1/V', '<f4'),
19: ('control/V', '<f4'), 19: ('control/V', '<f4'),
20: ('control/mA', '<f4'), 20: ('control/mA', '<f4'),
23: ('dQ/mA.h', '<f8'), # Same as 7? 23: ('dQ/mA.h', '<f8'), # Same as 7?
@@ -159,73 +169,25 @@ VMPdata_colID_dtype_map = {
38: ('-Im(Z)/Ohm', '<f4'), 38: ('-Im(Z)/Ohm', '<f4'),
39: ('I Range', '<u2'), 39: ('I Range', '<u2'),
70: ('P/W', '<f4'), 70: ('P/W', '<f4'),
74: ('Energy/W.h', '<f8'),
75: ('Analog OUT/V', '<f4'),
76: ('<I>/mA', '<f4'), 76: ('<I>/mA', '<f4'),
77: ('<Ewe>/V', '<f4'), 77: ('<Ewe>/V', '<f4'),
78: ('Cs-2/µF-2', '<f4'),
96: ('|Ece|/V', '<f4'),
98: ('Phase(Zce)/deg', '<f4'),
99: ('|Zce|/Ohm', '<f4'),
100: ('Re(Zce)/Ohm', '<f4'),
101: ('-Im(Zce)/Ohm', '<f4'),
123: ('Energy charge/W.h', '<f8'), 123: ('Energy charge/W.h', '<f8'),
124: ('Energy discharge/W.h', '<f8'), 124: ('Energy discharge/W.h', '<f8'),
125: ('Capacitance charge/µF', '<f8'), 125: ('Capacitance charge/µF', '<f8'),
126: ('Capacitance discharge/µF', '<f8'), 126: ('Capacitance discharge/µF', '<f8'),
131: ('Ns', '<u2'), 131: ('Ns', '<u2'),
163: ('|Estack|/V', '<f4'),
168: ('Rcmp/Ohm', '<f4'),
169: ('Cs/µF', '<f4'), 169: ('Cs/µF', '<f4'),
172: ('Cp/µF', '<f4'), 172: ('Cp/µF', '<f4'),
173: ('Cp-2/µF-2', '<f4'),
241: ('|E1|/V', '<f4'),
242: ('|E2|/V', '<f4'),
271: ('Phase(Z1) / deg', '<f4'),
272: ('Phase(Z2) / deg', '<f4'),
301: ('|Z1|/Ohm', '<f4'),
302: ('|Z2|/Ohm', '<f4'),
331: ('Re(Z1)/Ohm', '<f4'),
332: ('Re(Z2)/Ohm', '<f4'),
361: ('-Im(Z1)/Ohm', '<f4'),
362: ('-Im(Z2)/Ohm', '<f4'),
391: ('<E1>/V', '<f4'),
392: ('<E2>/V', '<f4'),
422: ('Phase(Zstack)/deg', '<f4'),
423: ('|Zstack|/Ohm', '<f4'),
424: ('Re(Zstack)/Ohm', '<f4'),
425: ('-Im(Zstack)/Ohm', '<f4'),
426: ('<Estack>/V', '<f4'),
430: ('Phase(Zwe-ce)/deg', '<f4'),
431: ('|Zwe-ce|/Ohm', '<f4'),
432: ('Re(Zwe-ce)/Ohm', '<f4'),
433: ('-Im(Zwe-ce)/Ohm', '<f4'),
434: ('(Q-Qo)/C', '<f4'), 434: ('(Q-Qo)/C', '<f4'),
435: ('dQ/C', '<f4'), 435: ('dQ/C', '<f4'),
441: ('<Ecv>/V', '<f4'),
462: ('Temperature/°C', '<f4'),
467: ('Q charge/discharge/mA.h', '<f8'), 467: ('Q charge/discharge/mA.h', '<f8'),
468: ('half cycle', '<u4'), 468: ('half cycle', '<u4'),
469: ('z cycle', '<u4'),
471: ('<Ece>/V', '<f4'),
473: ('THD Ewe/%', '<f4'), 473: ('THD Ewe/%', '<f4'),
474: ('THD I/%', '<f4'), 474: ('THD I/%', '<f4'),
476: ('NSD Ewe/%', '<f4'), 476: ('NSD Ewe/%', '<f4'),
477: ('NSD I/%', '<f4'), 477: ('NSD I/%', '<f4'),
479: ('NSR Ewe/%', '<f4'), 479: ('NSR Ewe/%', '<f4'),
480: ('NSR I/%', '<f4'), 480: ('NSR I/%', '<f4'),
486: ('|Ewe h2|/V', '<f4'),
487: ('|Ewe h3|/V', '<f4'),
488: ('|Ewe h4|/V', '<f4'),
489: ('|Ewe h5|/V', '<f4'),
490: ('|Ewe h6|/V', '<f4'),
491: ('|Ewe h7|/V', '<f4'),
492: ('|I h2|/A', '<f4'),
493: ('|I h3|/A', '<f4'),
494: ('|I h4|/A', '<f4'),
495: ('|I h5|/A', '<f4'),
496: ('|I h6|/A', '<f4'),
497: ('|I h7|/A', '<f4'),
} }
# These column IDs define flags which are all stored packed in a single byte # These column IDs define flags which are all stored packed in a single byte
@@ -281,10 +243,7 @@ def VMPdata_dtype_from_colIDs(colIDs):
unique_field_name = field_name unique_field_name = field_name
type_list.append((unique_field_name, field_type)) type_list.append((unique_field_name, field_type))
else: else:
raise NotImplementedError("Column ID {cid} after column {prev} " raise NotImplementedError("column type %d not implemented" % colID)
"is unknown"
.format(cid=colID,
prev=type_list[-1][0]))
return np.dtype(type_list), flags_dict return np.dtype(type_list), flags_dict
@@ -368,21 +327,20 @@ class MPRfile:
count=n_columns) count=n_columns)
remaining_headers = data_module['data'][5 + n_columns:100] remaining_headers = data_module['data'][5 + n_columns:100]
main_data = data_module['data'][100:] main_data = data_module['data'][100:]
elif data_module['version'] in [2, 3]: elif data_module['version'] == 2:
column_types = np.frombuffer(data_module['data'][5:], dtype='<u2', column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
count=n_columns) count=n_columns)
# There are bytes of data before the main array starts # There is 405 bytes of data before the main array starts
if data_module['version'] == 3:
num_bytes_before = 406 # version 3 added `\x01` to the start
else:
num_bytes_before = 405
remaining_headers = data_module['data'][5 + 2 * n_columns:405] remaining_headers = data_module['data'][5 + 2 * n_columns:405]
main_data = data_module['data'][num_bytes_before:] main_data = data_module['data'][405:]
else: else:
raise ValueError("Unrecognised version for data module: %d" % raise ValueError("Unrecognised version for data module: %d" %
data_module['version']) data_module['version'])
assert(not any(remaining_headers)) if sys.version_info.major <= 2:
assert(all((b == '\x00' for b in remaining_headers)))
else:
assert(not any(remaining_headers))
self.dtype, self.flags_dict = VMPdata_dtype_from_colIDs(column_types) self.dtype, self.flags_dict = VMPdata_dtype_from_colIDs(column_types)
self.data = np.frombuffer(main_data, dtype=self.dtype) self.data = np.frombuffer(main_data, dtype=self.dtype)
@@ -395,9 +353,9 @@ class MPRfile:
self.npts = n_data_points self.npts = n_data_points
try: try:
tm = time.strptime(settings_mod['date'].decode('ascii'), '%m/%d/%y') tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y')
except ValueError: except ValueError:
tm = time.strptime(settings_mod['date'].decode('ascii'), '%m-%d-%y') tm = time.strptime(str3(settings_mod['date']), '%m-%d-%y')
self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday) self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
if maybe_loop_module: if maybe_loop_module:
@@ -413,9 +371,9 @@ class MPRfile:
if maybe_log_module: if maybe_log_module:
log_module, = maybe_log_module log_module, = maybe_log_module
try: try:
tm = time.strptime(log_module['date'].decode('ascii'), '%m/%d/%y') tm = time.strptime(str3(log_module['date']), '%m/%d/%y')
except ValueError: except ValueError:
tm = time.strptime(log_module['date'].decode('ascii'), '%m-%d-%y') tm = time.strptime(str3(log_module['date']), '%m-%d-%y')
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday) self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
# There is a timestamp at either 465 or 469 bytes # There is a timestamp at either 465 or 469 bytes

View File

@@ -5,51 +5,26 @@ import sqlite3
import re import re
import csv import csv
import argparse import argparse
from copy import copy
# The following scripts are adapted from the result of running # The following scripts are adapted from the result of running
# $ mdb-schema <result.res> oracle # $ mdb-schema <result.res> oracle
mdb_tables = [ mdb_tables = ["Version_Table", "Global_Table", "Resume_Table",
'Version_Table', "Channel_Normal_Table", "Channel_Statistic_Table",
'Global_Table', "Auxiliary_Table", "Event_Table",
'Resume_Table', "Smart_Battery_Info_Table", "Smart_Battery_Data_Table"]
'Channel_Normal_Table',
'Channel_Statistic_Table',
'Auxiliary_Table',
'Event_Table',
'Smart_Battery_Info_Table',
'Smart_Battery_Data_Table',
]
mdb_5_23_tables = [
'MCell_Aci_Data_Table',
'Aux_Global_Data_Table',
'Smart_Battery_Clock_Stretch_Table',
]
mdb_5_26_tables = [
'Can_BMS_Info_Table',
'Can_BMS_Data_Table',
]
mdb_tables_text = { mdb_tables_text = ["Version_Table", "Global_Table", "Event_Table",
'Version_Table', "Smart_Battery_Info_Table"]
'Global_Table', mdb_tables_numeric = ["Resume_Table", "Channel_Normal_Table",
'Event_Table', "Channel_Statistic_Table", "Auxiliary_Table",
'Smart_Battery_Info_Table', "Smart_Battery_Data_Table", 'MCell_Aci_Data_Table',
'Can_BMS_Info_Table', 'Aux_Global_Data_Table',
} 'Smart_Battery_Clock_Stretch_Table']
mdb_tables_numeric = {
'Resume_Table', mdb_5_23_tables = ['MCell_Aci_Data_Table', 'Aux_Global_Data_Table',
'Channel_Normal_Table', 'Smart_Battery_Clock_Stretch_Table']
'Channel_Statistic_Table',
'Auxiliary_Table',
'Smart_Battery_Data_Table',
'MCell_Aci_Data_Table',
'Aux_Global_Data_Table',
'Smart_Battery_Clock_Stretch_Table',
'Can_BMS_Data_Table',
}
mdb_create_scripts = { mdb_create_scripts = {
"Version_Table": """ "Version_Table": """
@@ -81,17 +56,8 @@ CREATE TABLE Global_Table
Log_Aux_Data_Flag INTEGER, Log_Aux_Data_Flag INTEGER,
Log_Event_Data_Flag INTEGER, Log_Event_Data_Flag INTEGER,
Log_Smart_Battery_Data_Flag INTEGER, Log_Smart_Battery_Data_Flag INTEGER,
-- The following items are in 5.26 but not in 5.23
Log_Can_BMS_Data_Flag INTEGER DEFAULT NULL,
Software_Version TEXT DEFAULT NULL,
Serial_Number TEXT DEFAULT NULL,
Schedule_Version TEXT DEFAULT NULL,
MASS REAL DEFAULT NULL,
Specific_Capacity REAL DEFAULT NULL,
Capacity REAL DEFAULT NULL,
-- Item_ID exists in all versions
Item_ID TEXT, Item_ID TEXT,
-- These items are in 5.26 and 5.23 but not in 1.14 -- Version 1.14 ends here, version 5.23 continues
Mapped_Aux_Conc_CNumber INTEGER DEFAULT NULL, Mapped_Aux_Conc_CNumber INTEGER DEFAULT NULL,
Mapped_Aux_DI_CNumber INTEGER DEFAULT NULL, Mapped_Aux_DI_CNumber INTEGER DEFAULT NULL,
Mapped_Aux_DO_CNumber INTEGER DEFAULT NULL Mapped_Aux_DO_CNumber INTEGER DEFAULT NULL
@@ -99,7 +65,7 @@ CREATE TABLE Global_Table
"Resume_Table": """ "Resume_Table": """
CREATE TABLE Resume_Table CREATE TABLE Resume_Table
( (
Test_ID INTEGER PRIMARY KEY REFERENCES Global_Table(Test_ID), Test_ID INTEGER REFERENCES Global_Table(Test_ID),
Step_Index INTEGER, Step_Index INTEGER,
Cycle_Index INTEGER, Cycle_Index INTEGER,
Channel_Status INTEGER, Channel_Status INTEGER,
@@ -149,8 +115,7 @@ CREATE TABLE Channel_Normal_Table
"dV/dt" REAL, "dV/dt" REAL,
Internal_Resistance REAL, Internal_Resistance REAL,
AC_Impedance REAL, AC_Impedance REAL,
ACI_Phase_Angle REAL, ACI_Phase_Angle REAL
PRIMARY KEY (Test_ID, Data_Point)
); """, ); """,
"Channel_Statistic_Table": """ "Channel_Statistic_Table": """
CREATE TABLE Channel_Statistic_Table CREATE TABLE Channel_Statistic_Table
@@ -161,7 +126,6 @@ CREATE TABLE Channel_Statistic_Table
-- Version 1.14 ends here, version 5.23 continues -- Version 1.14 ends here, version 5.23 continues
Charge_Time REAL DEFAULT NULL, Charge_Time REAL DEFAULT NULL,
Discharge_Time REAL DEFAULT NULL, Discharge_Time REAL DEFAULT NULL,
PRIMARY KEY (Test_ID, Data_Point),
FOREIGN KEY (Test_ID, Data_Point) FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
); """, ); """,
@@ -174,7 +138,6 @@ CREATE TABLE Auxiliary_Table
Data_Type INTEGER, Data_Type INTEGER,
X REAL, X REAL,
"dX/dt" REAL, "dX/dt" REAL,
PRIMARY KEY (Test_ID, Data_Point, Auxiliary_Index),
FOREIGN KEY (Test_ID, Data_Point) FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
); """, ); """,
@@ -190,7 +153,7 @@ CREATE TABLE Event_Table
"Smart_Battery_Info_Table": """ "Smart_Battery_Info_Table": """
CREATE TABLE Smart_Battery_Info_Table CREATE TABLE Smart_Battery_Info_Table
( (
Test_ID INTEGER PRIMARY KEY REFERENCES Global_Table(Test_ID), Test_ID INTEGER REFERENCES Global_Table(Test_ID),
ManufacturerDate REAL, ManufacturerDate REAL,
ManufacturerAccess TEXT, ManufacturerAccess TEXT,
SpecificationInfo TEXT, SpecificationInfo TEXT,
@@ -259,14 +222,10 @@ CREATE TABLE Smart_Battery_Data_Table
ChargingCurrent REAL DEFAULT NULL, ChargingCurrent REAL DEFAULT NULL,
ChargingVoltage REAL DEFAULT NULL, ChargingVoltage REAL DEFAULT NULL,
ManufacturerData REAL DEFAULT NULL, ManufacturerData REAL DEFAULT NULL,
-- Version 5.23 ends here, version 5.26 continues
BATMAN_Status INTEGER DEFAULT NULL,
DTM_PDM_Status INTEGER DEFAULT NULL,
PRIMARY KEY (Test_ID, Data_Point),
FOREIGN KEY (Test_ID, Data_Point) FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
); """, ); """,
# The following tables are not present in version 1.14, but are in 5.23 # The following tables are not present in version 1.14
'MCell_Aci_Data_Table': """ 'MCell_Aci_Data_Table': """
CREATE TABLE MCell_Aci_Data_Table CREATE TABLE MCell_Aci_Data_Table
( (
@@ -277,7 +236,6 @@ CREATE TABLE MCell_Aci_Data_Table
Phase_Shift REAL, Phase_Shift REAL,
Voltage REAL, Voltage REAL,
Current REAL, Current REAL,
PRIMARY KEY (Test_ID, Data_Point, Cell_Index),
FOREIGN KEY (Test_ID, Data_Point) FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
);""", );""",
@@ -288,8 +246,7 @@ CREATE TABLE Aux_Global_Data_Table
Auxiliary_Index INTEGER, Auxiliary_Index INTEGER,
Data_Type INTEGER, Data_Type INTEGER,
Nickname TEXT, Nickname TEXT,
Unit TEXT, Unit TEXT
PRIMARY KEY (Channel_Index, Auxiliary_Index, Data_Type)
);""", );""",
'Smart_Battery_Clock_Stretch_Table': """ 'Smart_Battery_Clock_Stretch_Table': """
CREATE TABLE Smart_Battery_Clock_Stretch_Table CREATE TABLE Smart_Battery_Clock_Stretch_Table
@@ -335,32 +292,9 @@ CREATE TABLE Smart_Battery_Clock_Stretch_Table
VCELL3 INTEGER, VCELL3 INTEGER,
VCELL2 INTEGER, VCELL2 INTEGER,
VCELL1 INTEGER, VCELL1 INTEGER,
PRIMARY KEY (Test_ID, Data_Point),
FOREIGN KEY (Test_ID, Data_Point) FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
);""", );"""}
# The following tables are not present in version 5.23, but are in 5.26
'Can_BMS_Info_Table': """
CREATE TABLE "Can_BMS_Info_Table"
(
Channel_Index INTEGER PRIMARY KEY,
CAN_Cfg_File_Name TEXT,
CAN_Configuration TEXT
);
""",
'Can_BMS_Data_Table': """
CREATE TABLE "Can_BMS_Data_Table"
(
Test_ID INTEGER,
Data_Point INTEGER,
CAN_MV_Index INTEGER,
Signal_Value_X REAL,
PRIMARY KEY (Test_ID, Data_Point, CAN_MV_Index),
FOREIGN KEY (Test_ID, Data_Point)
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
);
""",
}
mdb_create_indices = { mdb_create_indices = {
"Channel_Normal_Table": """ "Channel_Normal_Table": """
@@ -437,28 +371,27 @@ def mdb_get_data_text(s3db, filename, table):
r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n', r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
re.IGNORECASE re.IGNORECASE
) )
# TODO after dropping Python 2 support - use Popen as contextmanager
try: try:
# Initialize values to avoid NameError in except clause mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
mdb_output = '' bufsize=-1, stdin=None, stdout=sp.PIPE,
insert_match = None universal_newlines=True)
with sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE,
universal_newlines=True) as mdb_sql:
mdb_output = mdb_sql.stdout.read()
while len(mdb_output) > 0:
insert_match = insert_pattern.match(mdb_output)
s3db.execute(insert_match.group())
mdb_output = mdb_output[insert_match.end():]
mdb_output += mdb_sql.stdout.read()
s3db.commit()
except OSError as e: except OSError as e:
if e.errno == 2: if e.errno == 2:
raise RuntimeError('Could not locate the `mdb-export` executable. ' raise RuntimeError('Could not locate the `mdb-export` executable. '
'Check that mdbtools is properly installed.') 'Check that mdbtools is properly installed.')
else: else:
raise raise
try:
# Initialize values to avoid NameError in except clause
mdb_output = ''
insert_match = None
mdb_output = mdb_sql.stdout.read()
while len(mdb_output) > 0:
insert_match = insert_pattern.match(mdb_output)
s3db.execute(insert_match.group())
mdb_output = mdb_output[insert_match.end():]
s3db.commit()
except BaseException: except BaseException:
print("Error while importing %s" % table) print("Error while importing %s" % table)
if mdb_output: if mdb_output:
@@ -466,32 +399,38 @@ def mdb_get_data_text(s3db, filename, table):
if insert_match: if insert_match:
print("insert_re match:", insert_match) print("insert_re match:", insert_match)
raise raise
finally:
mdb_sql.terminate()
def mdb_get_data_numeric(s3db, filename, table): def mdb_get_data_numeric(s3db, filename, table):
print("Reading %s..." % table) print("Reading %s..." % table)
# TODO after dropping Python 2 support - use Popen as contextmanager
try: try:
with sp.Popen(['mdb-export', filename, table], mdb_sql = sp.Popen(['mdb-export', filename, table],
bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE, bufsize=-1, stdin=None, stdout=sp.PIPE,
universal_newlines=True) as mdb_sql: universal_newlines=True)
mdb_csv = csv.reader(mdb_sql.stdout)
mdb_headers = next(mdb_csv)
quoted_headers = ['"%s"' % h for h in mdb_headers]
joined_headers = ', '.join(quoted_headers)
joined_placemarks = ', '.join(['?' for h in mdb_headers])
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(
table,
joined_headers,
joined_placemarks,
)
s3db.executemany(insert_stmt, mdb_csv)
s3db.commit()
except OSError as e: except OSError as e:
if e.errno == 2: if e.errno == 2:
raise RuntimeError('Could not locate the `mdb-export` executable. ' raise RuntimeError('Could not locate the `mdb-export` executable. '
'Check that mdbtools is properly installed.') 'Check that mdbtools is properly installed.')
else: else:
raise raise
try:
mdb_csv = csv.reader(mdb_sql.stdout)
mdb_headers = next(mdb_csv)
quoted_headers = ['"%s"' % h for h in mdb_headers]
joined_headers = ', '.join(quoted_headers)
joined_placemarks = ', '.join(['?' for h in mdb_headers])
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(
table,
joined_headers,
joined_placemarks,
)
s3db.executemany(insert_stmt, mdb_csv)
s3db.commit()
finally:
mdb_sql.terminate()
def mdb_get_data(s3db, filename, table): def mdb_get_data(s3db, filename, table):
@@ -503,69 +442,32 @@ def mdb_get_data(s3db, filename, table):
raise ValueError("'%s' is in neither mdb_tables_text nor mdb_tables_numeric" % table) raise ValueError("'%s' is in neither mdb_tables_text nor mdb_tables_numeric" % table)
def mdb_get_version(filename):
"""Get the version number from an Arbin .res file.
Reads the Version_Table and parses the version from Version_Schema_Field.
"""
print("Reading version number...")
try:
with sp.Popen(['mdb-export', filename, 'Version_Table'],
bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE,
universal_newlines=True) as mdb_sql:
mdb_csv = csv.reader(mdb_sql.stdout)
mdb_headers = next(mdb_csv)
mdb_values = next(mdb_csv)
try:
next(mdb_csv)
except StopIteration:
pass
else:
raise ValueError('Version_Table of %s lists multiple versions' % filename)
except OSError as e:
if e.errno == 2:
raise RuntimeError('Could not locate the `mdb-export` executable. '
'Check that mdbtools is properly installed.')
else:
raise
if 'Version_Schema_Field' not in mdb_headers:
raise ValueError('Version_Table of %s does not contain a Version_Schema_Field column'
% filename)
version_fields = dict(zip(mdb_headers, mdb_values))
version_text = version_fields['Version_Schema_Field']
version_match = re.fullmatch('Results File ([.0-9]+)', version_text)
if not version_match:
raise ValueError('File version "%s" did not match expected format' % version_text)
version_string = version_match.group(1)
version_tuple = tuple(map(int, version_string.split('.')))
return version_tuple
def convert_arbin_to_sqlite(input_file, output_file): def convert_arbin_to_sqlite(input_file, output_file):
"""Read data from an Arbin .res data file and write to a sqlite file. """Read data from an Arbin .res data file and write to a sqlite file.
Any data currently in the sqlite file will be erased! Any data currently in the sqlite file will be erased!
""" """
arbin_version = mdb_get_version(input_file)
s3db = sqlite3.connect(output_file) s3db = sqlite3.connect(output_file)
tables_to_convert = copy(mdb_tables) for table in reversed(mdb_tables + mdb_5_23_tables):
if arbin_version >= (5, 23):
tables_to_convert.extend(mdb_5_23_tables)
if arbin_version >= (5, 26):
tables_to_convert.extend(mdb_5_26_tables)
for table in reversed(tables_to_convert):
s3db.execute('DROP TABLE IF EXISTS "%s";' % table) s3db.execute('DROP TABLE IF EXISTS "%s";' % table)
for table in tables_to_convert: for table in mdb_tables:
s3db.executescript(mdb_create_scripts[table]) s3db.executescript(mdb_create_scripts[table])
mdb_get_data(s3db, input_file, table) mdb_get_data(s3db, input_file, table)
if table in mdb_create_indices: if table in mdb_create_indices:
print("Creating indices for %s..." % table) print("Creating indices for %s..." % table)
s3db.executescript(mdb_create_indices[table]) s3db.executescript(mdb_create_indices[table])
csr = s3db.execute("SELECT Version_Schema_Field FROM Version_Table;")
version_text, = csr.fetchone()
if (version_text == "Results File 5.23"):
for table in mdb_5_23_tables:
s3db.executescript(mdb_create_scripts[table])
mdb_get_data(input_file, table)
if table in mdb_create_indices:
s3db.executescript(mdb_create_indices[table])
print("Creating helper table for capacity and energy totals...") print("Creating helper table for capacity and energy totals...")
s3db.executescript(helper_table_script) s3db.executescript(helper_table_script)

View File

@@ -24,5 +24,4 @@ https://files.figshare.com/1780530/121_CA_455nm_6V_30min_C01.mpt
https://files.figshare.com/1780526/CV_C01.mpr https://files.figshare.com/1780526/CV_C01.mpr
https://files.figshare.com/1780527/CV_C01.mpt https://files.figshare.com/1780527/CV_C01.mpt
https://files.figshare.com/14752538/C019P-0ppb-A_C01.mpr https://files.figshare.com/14752538/C019P-0ppb-A_C01.mpr
https://files.figshare.com/25331510/UM34_Test005E.res
END_FILELIST END_FILELIST

View File

@@ -9,7 +9,7 @@ with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
setup( setup(
name='galvani', name='galvani',
version='0.2.0', version='0.1.0',
description='Open and process battery charger log data files', description='Open and process battery charger log data files',
long_description=readme, long_description=readme,
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
@@ -23,8 +23,6 @@ setup(
'Intended Audience :: Science/Research', 'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English', 'Natural Language :: English',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Chemistry',
], ],
packages=['galvani'], packages=['galvani'],
entry_points={ entry_points={
@@ -32,7 +30,6 @@ setup(
'res2sqlite = galvani.res2sqlite:main', 'res2sqlite = galvani.res2sqlite:main',
], ],
}, },
python_requires='>=3.6',
install_requires=['numpy'], install_requires=['numpy'],
tests_require=['pytest'], tests_require=['pytest'],
) )

View File

@@ -9,8 +9,8 @@ import pytest
from galvani import res2sqlite from galvani import res2sqlite
have_mdbtools = (subprocess.call(['which', 'mdb-export'], # TODO - change to subprocess.DEVNULL when python 2 support is removed
stdout=subprocess.DEVNULL) == 0) have_mdbtools = (subprocess.call(['which', 'mdb-export'], stdout=None) == 0)
def test_res2sqlite_help(): def test_res2sqlite_help():
@@ -32,7 +32,7 @@ def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir):
@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools') @pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools')
@pytest.mark.parametrize('basename', ['arbin1', 'UM34_Test005E']) @pytest.mark.parametrize('basename', ['arbin1'])
def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename): def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename):
"""Convert an Arbin file to SQLite using the functional interface.""" """Convert an Arbin file to SQLite using the functional interface."""
res_file = os.path.join(testdata_dir, basename + '.res') res_file = os.path.join(testdata_dir, basename + '.res')

View File

@@ -9,7 +9,7 @@ from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest import pytest
from galvani import BioLogic, MPTfile, MPRfile from galvani import BioLogic, MPTfile, MPRfile
from galvani.BioLogic import MPTfileCSV # not exported from galvani.BioLogic import MPTfileCSV, str3 # not exported
def test_open_MPT(testdata_dir): def test_open_MPT(testdata_dir):
@@ -103,7 +103,7 @@ def timestamp_from_comments(comments):
for line in comments: for line in comments:
time_match = re.match(b'Acquisition started on : ([0-9/]+ [0-9:]+)', line) time_match = re.match(b'Acquisition started on : ([0-9/]+ [0-9:]+)', line)
if time_match: if time_match:
timestamp = datetime.strptime(time_match.group(1).decode('ascii'), timestamp = datetime.strptime(str3(time_match.group(1)),
'%m/%d/%Y %H:%M:%S') '%m/%d/%Y %H:%M:%S')
return timestamp return timestamp
raise AttributeError("No timestamp in comments") raise AttributeError("No timestamp in comments")

View File

@@ -1,5 +1,5 @@
[tox] [tox]
envlist = py36,py37,py38,py39 envlist = py27,py35,py37
[testenv] [testenv]
deps = deps =
flake8 flake8