mirror of
https://github.com/echemdata/galvani.git
synced 2025-12-14 01:15:34 +00:00
Merge branch 'master' into test-and-fix-Arbin
This commit is contained in:
2
.flake8
Normal file
2
.flake8
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# This file will be ignored - see http://flake8.pycqa.org/en/2.6.0/config.html#per-project
|
||||||
|
# Edit the [flake8] section in tox.ini instead
|
||||||
@@ -10,6 +10,7 @@ from os import SEEK_SET
|
|||||||
import time
|
import time
|
||||||
from datetime import date, datetime, timedelta
|
from datetime import date, datetime, timedelta
|
||||||
from collections import defaultdict, OrderedDict
|
from collections import defaultdict, OrderedDict
|
||||||
|
import functools
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
@@ -18,7 +19,7 @@ if sys.version_info.major <= 2:
|
|||||||
str3 = str
|
str3 = str
|
||||||
from string import maketrans
|
from string import maketrans
|
||||||
else:
|
else:
|
||||||
str3 = lambda b: str(b, encoding='ascii')
|
str3 = functools.partial(str, encoding='ascii')
|
||||||
maketrans = bytes.maketrans
|
maketrans = bytes.maketrans
|
||||||
|
|
||||||
|
|
||||||
@@ -71,19 +72,20 @@ def MPTfile(file_or_path):
|
|||||||
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
|
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
|
||||||
|
|
||||||
# TODO use rb'string' here once Python 2 is no longer supported
|
# TODO use rb'string' here once Python 2 is no longer supported
|
||||||
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file))
|
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$',
|
||||||
|
next(mpt_file))
|
||||||
nb_headers = int(nb_headers_match.group(1))
|
nb_headers = int(nb_headers_match.group(1))
|
||||||
if nb_headers < 3:
|
if nb_headers < 3:
|
||||||
raise ValueError("Too few header lines: %d" % nb_headers)
|
raise ValueError("Too few header lines: %d" % nb_headers)
|
||||||
|
|
||||||
## The 'magic number' line, the 'Nb headers' line and the column headers
|
# The 'magic number' line, the 'Nb headers' line and the column headers
|
||||||
## make three lines. Every additional line is a comment line.
|
# make three lines. Every additional line is a comment line.
|
||||||
comments = [next(mpt_file) for i in range(nb_headers - 3)]
|
comments = [next(mpt_file) for i in range(nb_headers - 3)]
|
||||||
|
|
||||||
fieldnames = str3(next(mpt_file)).strip().split('\t')
|
fieldnames = str3(next(mpt_file)).strip().split('\t')
|
||||||
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
|
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
|
||||||
|
|
||||||
## Must be able to parse files where commas are used for decimal points
|
# Must be able to parse files where commas are used for decimal points
|
||||||
converter_dict = dict(((i, comma_converter)
|
converter_dict = dict(((i, comma_converter)
|
||||||
for i in range(len(fieldnames))))
|
for i in range(len(fieldnames))))
|
||||||
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
|
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
|
||||||
@@ -113,8 +115,8 @@ def MPTfileCSV(file_or_path):
|
|||||||
if nb_headers < 3:
|
if nb_headers < 3:
|
||||||
raise ValueError("Too few header lines: %d" % nb_headers)
|
raise ValueError("Too few header lines: %d" % nb_headers)
|
||||||
|
|
||||||
## The 'magic number' line, the 'Nb headers' line and the column headers
|
# The 'magic number' line, the 'Nb headers' line and the column headers
|
||||||
## make three lines. Every additional line is a comment line.
|
# make three lines. Every additional line is a comment line.
|
||||||
comments = [next(mpt_file) for i in range(nb_headers - 3)]
|
comments = [next(mpt_file) for i in range(nb_headers - 3)]
|
||||||
|
|
||||||
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
|
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
|
||||||
@@ -256,7 +258,8 @@ def read_VMP_modules(fileobj, read_module_data=True):
|
|||||||
if len(module_magic) == 0: # end of file
|
if len(module_magic) == 0: # end of file
|
||||||
break
|
break
|
||||||
elif module_magic != b'MODULE':
|
elif module_magic != b'MODULE':
|
||||||
raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic)
|
raise ValueError("Found %r, expecting start of new VMP MODULE"
|
||||||
|
% module_magic)
|
||||||
|
|
||||||
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
|
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
|
||||||
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
|
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
|
||||||
@@ -280,6 +283,9 @@ def read_VMP_modules(fileobj, read_module_data=True):
|
|||||||
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET)
|
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET)
|
||||||
|
|
||||||
|
|
||||||
|
MPR_MAGIC = b'BIO-LOGIC MODULAR FILE\x1a'.ljust(48) + b'\x00\x00\x00\x00'
|
||||||
|
|
||||||
|
|
||||||
class MPRfile:
|
class MPRfile:
|
||||||
"""Bio-Logic .mpr file
|
"""Bio-Logic .mpr file
|
||||||
|
|
||||||
@@ -302,10 +308,8 @@ class MPRfile:
|
|||||||
mpr_file = open(file_or_path, 'rb')
|
mpr_file = open(file_or_path, 'rb')
|
||||||
else:
|
else:
|
||||||
mpr_file = file_or_path
|
mpr_file = file_or_path
|
||||||
|
magic = mpr_file.read(len(MPR_MAGIC))
|
||||||
mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00'
|
if magic != MPR_MAGIC:
|
||||||
magic = mpr_file.read(len(mpr_magic))
|
|
||||||
if magic != mpr_magic:
|
|
||||||
raise ValueError('Invalid magic for .mpr file: %s' % magic)
|
raise ValueError('Invalid magic for .mpr file: %s' % magic)
|
||||||
|
|
||||||
modules = list(read_VMP_modules(mpr_file))
|
modules = list(read_VMP_modules(mpr_file))
|
||||||
@@ -326,7 +330,7 @@ class MPRfile:
|
|||||||
elif data_module['version'] == 2:
|
elif data_module['version'] == 2:
|
||||||
column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
|
column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
|
||||||
count=n_columns)
|
count=n_columns)
|
||||||
## There is 405 bytes of data before the main array starts
|
# There is 405 bytes of data before the main array starts
|
||||||
remaining_headers = data_module['data'][5 + 2 * n_columns:405]
|
remaining_headers = data_module['data'][5 + 2 * n_columns:405]
|
||||||
main_data = data_module['data'][405:]
|
main_data = data_module['data'][405:]
|
||||||
else:
|
else:
|
||||||
@@ -342,8 +346,8 @@ class MPRfile:
|
|||||||
self.data = np.frombuffer(main_data, dtype=self.dtype)
|
self.data = np.frombuffer(main_data, dtype=self.dtype)
|
||||||
assert(self.data.shape[0] == n_data_points)
|
assert(self.data.shape[0] == n_data_points)
|
||||||
|
|
||||||
## No idea what these 'column types' mean or even if they are actually
|
# No idea what these 'column types' mean or even if they are actually
|
||||||
## column types at all
|
# column types at all
|
||||||
self.version = int(data_module['version'])
|
self.version = int(data_module['version'])
|
||||||
self.cols = column_types
|
self.cols = column_types
|
||||||
self.npts = n_data_points
|
self.npts = n_data_points
|
||||||
@@ -372,9 +376,9 @@ class MPRfile:
|
|||||||
tm = time.strptime(str3(log_module['date']), '%m-%d-%y')
|
tm = time.strptime(str3(log_module['date']), '%m-%d-%y')
|
||||||
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
|
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
|
||||||
|
|
||||||
## There is a timestamp at either 465 or 469 bytes
|
# There is a timestamp at either 465 or 469 bytes
|
||||||
## I can't find any reason why it is one or the other in any
|
# I can't find any reason why it is one or the other in any
|
||||||
## given file
|
# given file
|
||||||
ole_timestamp1 = np.frombuffer(log_module['data'][465:],
|
ole_timestamp1 = np.frombuffer(log_module['data'][465:],
|
||||||
dtype='<f8', count=1)
|
dtype='<f8', count=1)
|
||||||
ole_timestamp2 = np.frombuffer(log_module['data'][469:],
|
ole_timestamp2 = np.frombuffer(log_module['data'][469:],
|
||||||
@@ -392,7 +396,7 @@ class MPRfile:
|
|||||||
ole_timestamp = ole_timestamp3
|
ole_timestamp = ole_timestamp3
|
||||||
elif ole_timestamp4 > 40000 and ole_timestamp4 < 50000:
|
elif ole_timestamp4 > 40000 and ole_timestamp4 < 50000:
|
||||||
ole_timestamp = ole_timestamp4
|
ole_timestamp = ole_timestamp4
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Could not find timestamp in the LOG module")
|
raise ValueError("Could not find timestamp in the LOG module")
|
||||||
|
|
||||||
@@ -400,10 +404,10 @@ class MPRfile:
|
|||||||
ole_timedelta = timedelta(days=ole_timestamp[0])
|
ole_timedelta = timedelta(days=ole_timestamp[0])
|
||||||
self.timestamp = ole_base + ole_timedelta
|
self.timestamp = ole_base + ole_timedelta
|
||||||
if self.startdate != self.timestamp.date():
|
if self.startdate != self.timestamp.date():
|
||||||
raise ValueError("""Date mismatch:
|
raise ValueError("Date mismatch:\n"
|
||||||
Start date: %s
|
+ " Start date: %s\n" % self.startdate
|
||||||
End date: %s
|
+ " End date: %s\n" % self.enddate
|
||||||
Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp))
|
+ " Timestamp: %s\n" % self.timestamp)
|
||||||
|
|
||||||
def get_flag(self, flagname):
|
def get_flag(self, flagname):
|
||||||
if flagname in self.flags_dict:
|
if flagname in self.flags_dict:
|
||||||
|
|||||||
@@ -1 +1,3 @@
|
|||||||
from .BioLogic import MPTfile, MPRfile
|
from .BioLogic import MPRfile, MPTfile
|
||||||
|
|
||||||
|
__all__ = ['MPRfile', 'MPTfile']
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import csv
|
|||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
## The following scripts are adapted from the result of running
|
# The following scripts are adapted from the result of running
|
||||||
## $ mdb-schema <result.res> oracle
|
# $ mdb-schema <result.res> oracle
|
||||||
|
|
||||||
mdb_tables = ["Version_Table", "Global_Table", "Resume_Table",
|
mdb_tables = ["Version_Table", "Global_Table", "Resume_Table",
|
||||||
"Channel_Normal_Table", "Channel_Statistic_Table",
|
"Channel_Normal_Table", "Channel_Statistic_Table",
|
||||||
@@ -126,7 +126,8 @@ CREATE TABLE Channel_Statistic_Table
|
|||||||
-- Version 1.14 ends here, version 5.23 continues
|
-- Version 1.14 ends here, version 5.23 continues
|
||||||
Charge_Time REAL DEFAULT NULL,
|
Charge_Time REAL DEFAULT NULL,
|
||||||
Discharge_Time REAL DEFAULT NULL,
|
Discharge_Time REAL DEFAULT NULL,
|
||||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
FOREIGN KEY (Test_ID, Data_Point)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||||
); """,
|
); """,
|
||||||
"Auxiliary_Table": """
|
"Auxiliary_Table": """
|
||||||
CREATE TABLE Auxiliary_Table
|
CREATE TABLE Auxiliary_Table
|
||||||
@@ -137,7 +138,8 @@ CREATE TABLE Auxiliary_Table
|
|||||||
Data_Type INTEGER,
|
Data_Type INTEGER,
|
||||||
X REAL,
|
X REAL,
|
||||||
"dX/dt" REAL,
|
"dX/dt" REAL,
|
||||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
FOREIGN KEY (Test_ID, Data_Point)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||||
); """,
|
); """,
|
||||||
"Event_Table": """
|
"Event_Table": """
|
||||||
CREATE TABLE Event_Table
|
CREATE TABLE Event_Table
|
||||||
@@ -220,9 +222,10 @@ CREATE TABLE Smart_Battery_Data_Table
|
|||||||
ChargingCurrent REAL DEFAULT NULL,
|
ChargingCurrent REAL DEFAULT NULL,
|
||||||
ChargingVoltage REAL DEFAULT NULL,
|
ChargingVoltage REAL DEFAULT NULL,
|
||||||
ManufacturerData REAL DEFAULT NULL,
|
ManufacturerData REAL DEFAULT NULL,
|
||||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
FOREIGN KEY (Test_ID, Data_Point)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||||
); """,
|
); """,
|
||||||
## The following tables are not present in version 1.14
|
# The following tables are not present in version 1.14
|
||||||
'MCell_Aci_Data_Table': """
|
'MCell_Aci_Data_Table': """
|
||||||
CREATE TABLE MCell_Aci_Data_Table
|
CREATE TABLE MCell_Aci_Data_Table
|
||||||
(
|
(
|
||||||
@@ -233,7 +236,8 @@ CREATE TABLE MCell_Aci_Data_Table
|
|||||||
Phase_Shift REAL,
|
Phase_Shift REAL,
|
||||||
Voltage REAL,
|
Voltage REAL,
|
||||||
Current REAL,
|
Current REAL,
|
||||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
FOREIGN KEY (Test_ID, Data_Point)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||||
);""",
|
);""",
|
||||||
'Aux_Global_Data_Table': """
|
'Aux_Global_Data_Table': """
|
||||||
CREATE TABLE Aux_Global_Data_Table
|
CREATE TABLE Aux_Global_Data_Table
|
||||||
@@ -288,7 +292,8 @@ CREATE TABLE Smart_Battery_Clock_Stretch_Table
|
|||||||
VCELL3 INTEGER,
|
VCELL3 INTEGER,
|
||||||
VCELL2 INTEGER,
|
VCELL2 INTEGER,
|
||||||
VCELL1 INTEGER,
|
VCELL1 INTEGER,
|
||||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
FOREIGN KEY (Test_ID, Data_Point)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||||
);"""}
|
);"""}
|
||||||
|
|
||||||
mdb_create_indices = {
|
mdb_create_indices = {
|
||||||
@@ -306,18 +311,21 @@ CREATE TEMPORARY TABLE capacity_helper(
|
|||||||
Discharge_Capacity REAL NOT NULL,
|
Discharge_Capacity REAL NOT NULL,
|
||||||
Charge_Energy REAL NOT NULL,
|
Charge_Energy REAL NOT NULL,
|
||||||
Discharge_Energy REAL NOT NULL,
|
Discharge_Energy REAL NOT NULL,
|
||||||
FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
FOREIGN KEY (Test_ID, Cycle_Index)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
||||||
);
|
);
|
||||||
|
|
||||||
INSERT INTO capacity_helper
|
INSERT INTO capacity_helper
|
||||||
SELECT Test_ID, Cycle_Index, max(Charge_Capacity), max(Discharge_Capacity), max(Charge_Energy), max(Discharge_Energy)
|
SELECT Test_ID, Cycle_Index,
|
||||||
FROM Channel_Normal_Table
|
max(Charge_Capacity), max(Discharge_Capacity),
|
||||||
|
max(Charge_Energy), max(Discharge_Energy)
|
||||||
|
FROM Channel_Normal_Table
|
||||||
GROUP BY Test_ID, Cycle_Index;
|
GROUP BY Test_ID, Cycle_Index;
|
||||||
|
|
||||||
-- ## Alternative way of selecting ##
|
-- ## Alternative way of selecting ##
|
||||||
-- select *
|
-- select *
|
||||||
-- from Channel_Normal_Table as a join Channel_Normal_Table as b
|
-- from Channel_Normal_Table as a join Channel_Normal_Table as b
|
||||||
-- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1
|
-- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1
|
||||||
-- and a.Charge_Capacity < b.Charge_Capacity);
|
-- and a.Charge_Capacity < b.Charge_Capacity);
|
||||||
|
|
||||||
DROP TABLE IF EXISTS Capacity_Sum_Table;
|
DROP TABLE IF EXISTS Capacity_Sum_Table;
|
||||||
@@ -328,12 +336,15 @@ CREATE TABLE Capacity_Sum_Table(
|
|||||||
Discharge_Capacity_Sum REAL NOT NULL,
|
Discharge_Capacity_Sum REAL NOT NULL,
|
||||||
Charge_Energy_Sum REAL NOT NULL,
|
Charge_Energy_Sum REAL NOT NULL,
|
||||||
Discharge_Energy_Sum REAL NOT NULL,
|
Discharge_Energy_Sum REAL NOT NULL,
|
||||||
FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
FOREIGN KEY (Test_ID, Cycle_Index)
|
||||||
|
REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
||||||
);
|
);
|
||||||
|
|
||||||
INSERT INTO Capacity_Sum_Table
|
INSERT INTO Capacity_Sum_Table
|
||||||
SELECT a.Test_ID, a.Cycle_Index, total(b.Charge_Capacity), total(b.Discharge_Capacity), total(b.Charge_Energy), total(b.Discharge_Energy)
|
SELECT a.Test_ID, a.Cycle_Index,
|
||||||
FROM capacity_helper AS a LEFT JOIN capacity_helper AS b
|
total(b.Charge_Capacity), total(b.Discharge_Capacity),
|
||||||
|
total(b.Charge_Energy), total(b.Discharge_Energy)
|
||||||
|
FROM capacity_helper AS a LEFT JOIN capacity_helper AS b
|
||||||
ON (a.Test_ID = b.Test_ID AND a.Cycle_Index > b.Cycle_Index)
|
ON (a.Test_ID = b.Test_ID AND a.Cycle_Index > b.Cycle_Index)
|
||||||
GROUP BY a.Test_ID, a.Cycle_Index;
|
GROUP BY a.Test_ID, a.Cycle_Index;
|
||||||
|
|
||||||
@@ -342,16 +353,24 @@ DROP TABLE capacity_helper;
|
|||||||
CREATE VIEW IF NOT EXISTS Capacity_View
|
CREATE VIEW IF NOT EXISTS Capacity_View
|
||||||
AS SELECT Test_ID, Data_Point, Test_Time, Step_Time, DateTime,
|
AS SELECT Test_ID, Data_Point, Test_Time, Step_Time, DateTime,
|
||||||
Step_Index, Cycle_Index, Current, Voltage, "dV/dt",
|
Step_Index, Cycle_Index, Current, Voltage, "dV/dt",
|
||||||
Discharge_Capacity + Discharge_Capacity_Sum - Charge_Capacity - Charge_Capacity_Sum AS Net_Capacity,
|
( (Discharge_Capacity + Discharge_Capacity_Sum)
|
||||||
Discharge_Capacity + Discharge_Capacity_Sum + Charge_Capacity + Charge_Capacity_Sum AS Gross_Capacity,
|
- (Charge_Capacity + Charge_Capacity_Sum) ) AS Net_Capacity,
|
||||||
Discharge_Energy + Discharge_Energy_Sum - Charge_Energy - Charge_Energy_Sum AS Net_Energy,
|
( (Discharge_Capacity + Discharge_Capacity_Sum)
|
||||||
Discharge_Energy + Discharge_Energy_Sum + Charge_Energy + Charge_Energy_Sum AS Gross_Energy
|
+ (Charge_Capacity + Charge_Capacity_Sum) ) AS Gross_Capacity,
|
||||||
|
( (Discharge_Energy + Discharge_Energy_Sum)
|
||||||
|
- (Charge_Energy + Charge_Energy_Sum) ) AS Net_Energy,
|
||||||
|
( (Discharge_Energy + Discharge_Energy_Sum)
|
||||||
|
+ (Charge_Energy + Charge_Energy_Sum) ) AS Gross_Energy
|
||||||
FROM Channel_Normal_Table NATURAL JOIN Capacity_Sum_Table;
|
FROM Channel_Normal_Table NATURAL JOIN Capacity_Sum_Table;
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def mdb_get_data_text(s3db, filename, table):
|
def mdb_get_data_text(s3db, filename, table):
|
||||||
print("Reading %s..." % table)
|
print("Reading %s..." % table)
|
||||||
|
insert_pattern = re.compile(
|
||||||
|
r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
|
||||||
|
re.IGNORECASE
|
||||||
|
)
|
||||||
# TODO after dropping Python 2 support - use Popen as contextmanager
|
# TODO after dropping Python 2 support - use Popen as contextmanager
|
||||||
try:
|
try:
|
||||||
mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
|
mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
|
||||||
@@ -369,12 +388,11 @@ def mdb_get_data_text(s3db, filename, table):
|
|||||||
insert_match = None
|
insert_match = None
|
||||||
mdb_output = mdb_sql.stdout.read()
|
mdb_output = mdb_sql.stdout.read()
|
||||||
while len(mdb_output) > 0:
|
while len(mdb_output) > 0:
|
||||||
insert_match = re.match(r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
|
insert_match = insert_pattern.match(mdb_output)
|
||||||
mdb_output, re.IGNORECASE)
|
|
||||||
s3db.execute(insert_match.group())
|
s3db.execute(insert_match.group())
|
||||||
mdb_output = mdb_output[insert_match.end():]
|
mdb_output = mdb_output[insert_match.end():]
|
||||||
s3db.commit()
|
s3db.commit()
|
||||||
except:
|
except BaseException:
|
||||||
print("Error while importing %s" % table)
|
print("Error while importing %s" % table)
|
||||||
if mdb_output:
|
if mdb_output:
|
||||||
print("Remaining mdb-export output:", mdb_output)
|
print("Remaining mdb-export output:", mdb_output)
|
||||||
@@ -404,8 +422,11 @@ def mdb_get_data_numeric(s3db, filename, table):
|
|||||||
quoted_headers = ['"%s"' % h for h in mdb_headers]
|
quoted_headers = ['"%s"' % h for h in mdb_headers]
|
||||||
joined_headers = ', '.join(quoted_headers)
|
joined_headers = ', '.join(quoted_headers)
|
||||||
joined_placemarks = ', '.join(['?' for h in mdb_headers])
|
joined_placemarks = ', '.join(['?' for h in mdb_headers])
|
||||||
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(table,
|
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(
|
||||||
joined_headers, joined_placemarks)
|
table,
|
||||||
|
joined_headers,
|
||||||
|
joined_placemarks,
|
||||||
|
)
|
||||||
s3db.executemany(insert_stmt, mdb_csv)
|
s3db.executemany(insert_stmt, mdb_csv)
|
||||||
s3db.commit()
|
s3db.commit()
|
||||||
finally:
|
finally:
|
||||||
@@ -427,34 +448,37 @@ def convert_arbin_to_sqlite(input_file, output_file):
|
|||||||
Any data currently in the sqlite file will be erased!
|
Any data currently in the sqlite file will be erased!
|
||||||
"""
|
"""
|
||||||
s3db = sqlite3.connect(output_file)
|
s3db = sqlite3.connect(output_file)
|
||||||
|
|
||||||
|
|
||||||
for table in reversed(mdb_tables + mdb_5_23_tables):
|
for table in reversed(mdb_tables + mdb_5_23_tables):
|
||||||
s3db.execute('DROP TABLE IF EXISTS "%s";' % table)
|
s3db.execute('DROP TABLE IF EXISTS "%s";' % table)
|
||||||
|
|
||||||
for table in mdb_tables:
|
for table in mdb_tables:
|
||||||
s3db.executescript(mdb_create_scripts[table])
|
s3db.executescript(mdb_create_scripts[table])
|
||||||
mdb_get_data(s3db, input_file, table)
|
mdb_get_data(s3db, input_file, table)
|
||||||
if table in mdb_create_indices:
|
if table in mdb_create_indices:
|
||||||
print("Creating indices for %s..." % table)
|
print("Creating indices for %s..." % table)
|
||||||
s3db.executescript(mdb_create_indices[table])
|
s3db.executescript(mdb_create_indices[table])
|
||||||
|
|
||||||
if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"):
|
csr = s3db.execute("SELECT Version_Schema_Field FROM Version_Table;")
|
||||||
|
version_text, = csr.fetchone()
|
||||||
|
if (version_text == "Results File 5.23"):
|
||||||
for table in mdb_5_23_tables:
|
for table in mdb_5_23_tables:
|
||||||
s3db.executescript(mdb_create_scripts[table])
|
s3db.executescript(mdb_create_scripts[table])
|
||||||
mdb_get_data(input_file, table)
|
mdb_get_data(input_file, table)
|
||||||
if table in mdb_create_indices:
|
if table in mdb_create_indices:
|
||||||
s3db.executescript(mdb_create_indices[table])
|
s3db.executescript(mdb_create_indices[table])
|
||||||
|
|
||||||
print("Creating helper table for capacity and energy totals...")
|
print("Creating helper table for capacity and energy totals...")
|
||||||
s3db.executescript(helper_table_script)
|
s3db.executescript(helper_table_script)
|
||||||
|
|
||||||
print("Vacuuming database...")
|
print("Vacuuming database...")
|
||||||
s3db.executescript("VACUUM; ANALYZE;")
|
s3db.executescript("VACUUM; ANALYZE;")
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export")
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Convert Arbin .res files to sqlite3 databases using mdb-export",
|
||||||
|
)
|
||||||
parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen
|
parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen
|
||||||
parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect
|
parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect
|
||||||
|
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
|
|||||||
assert_array_equal(mpr.get_flag("control changes"), mpt["control changes"])
|
assert_array_equal(mpr.get_flag("control changes"), mpt["control changes"])
|
||||||
if "Ns changes" in mpt.dtype.fields:
|
if "Ns changes" in mpt.dtype.fields:
|
||||||
assert_array_equal(mpr.get_flag("Ns changes"), mpt["Ns changes"])
|
assert_array_equal(mpr.get_flag("Ns changes"), mpt["Ns changes"])
|
||||||
## Nothing uses the 0x40 bit of the flags
|
# Nothing uses the 0x40 bit of the flags
|
||||||
assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."])
|
assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."])
|
||||||
|
|
||||||
assert_array_almost_equal(mpr.data["time/s"],
|
assert_array_almost_equal(mpr.data["time/s"],
|
||||||
@@ -144,10 +144,10 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
|
|||||||
assert_field_matches("dQ/mA.h", decimal=17) # 64 bit float precision
|
assert_field_matches("dQ/mA.h", decimal=17) # 64 bit float precision
|
||||||
assert_field_matches("P/W", decimal=10) # 32 bit float precision for 1.xxE-5
|
assert_field_matches("P/W", decimal=10) # 32 bit float precision for 1.xxE-5
|
||||||
assert_field_matches("I/mA", decimal=6) # 32 bit float precision
|
assert_field_matches("I/mA", decimal=6) # 32 bit float precision
|
||||||
|
|
||||||
assert_field_exact("cycle number")
|
assert_field_exact("cycle number")
|
||||||
assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision
|
assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision
|
||||||
|
|
||||||
try:
|
try:
|
||||||
assert timestamp_from_comments(comments) == mpr.timestamp
|
assert timestamp_from_comments(comments) == mpr.timestamp
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
|
|||||||
Reference in New Issue
Block a user