mirror of
https://github.com/echemdata/galvani.git
synced 2025-12-14 09:15:34 +00:00
Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 74413c231e | |||
| ca8845bcc9 | |||
| db9b0c0669 | |||
| 995bcf3d71 | |||
| 63f674d897 | |||
| 21085454fd | |||
| 23761dd5bf | |||
| 8cfc84922b | |||
|
|
5baa6f6a7f | ||
|
|
0757306be4 | ||
| 68e00a30ce | |||
| a1a056d304 | |||
| c90d604096 | |||
| c25e755296 | |||
| 60639299b8 | |||
| a0700b0276 | |||
| f0c3c6f6c5 | |||
|
|
2a75b3bb19 | ||
| a2b3b26917 | |||
|
|
0c5348deeb | ||
| 90b113a0ab | |||
|
|
18a1ce6848 | ||
|
|
18e8a450fa | ||
|
|
4098890c05 | ||
|
|
68bac0a5aa | ||
| a343d37b2a | |||
| e67edf9e17 | |||
|
|
37487da5d3 | ||
|
|
8370a58109 | ||
|
|
c703f31da2 | ||
| 81fbb3dde3 | |||
| b2fb092ea3 | |||
| 599413c42f | |||
| 87825b7891 | |||
| c2e7a1602f | |||
| 9ba43ecc2e | |||
| bfdc9aae28 | |||
| a74a0267c2 | |||
|
|
72d79146e6 | ||
| 0c0b48ddcc | |||
| e71076bda3 | |||
| 0ea049e279 | |||
| a4cf8c1420 | |||
| aab135391a | |||
| 8abab57c06 | |||
| 3440047dc2 | |||
| d137bfccef | |||
| ed43de1326 | |||
| 1c8335289a | |||
| 6787a7ec03 | |||
| e5aada3a85 | |||
| a41b40c7a4 | |||
| 2a36713b06 | |||
| f2b62265b9 | |||
| 61e2ac8f57 | |||
| c401aca741 | |||
| 1f57e48602 | |||
| 6b0f8b6d37 | |||
| 56a321f8e3 | |||
| d991cd496e | |||
| 531cfc6a42 | |||
|
|
ef1ea9a2f4 | ||
|
|
b3c5f36e11 | ||
|
|
d1d53e97fa | ||
|
|
4ba61aa5d8 | ||
| 4381b02242 | |||
| 846a5b3149 | |||
| 6a8fbe71a4 | |||
| 557e755f03 | |||
| a1b73867ea | |||
| 5530a7a8ff | |||
| d6d6bf1ac7 | |||
| 85cc3f523e | |||
| b977115d6e | |||
| 2471148c21 | |||
| 7a5887fb38 | |||
| 2738396c9e | |||
| dcc8ec7fcc | |||
| b08c2f4435 | |||
| 1bcbc16bab | |||
| e52efeb9bd | |||
| d1e8616f1e | |||
| a618f75bb6 | |||
| b110162763 | |||
| de29b0863c | |||
| 4365c08e8b | |||
|
|
880b4a0a2d | ||
| da67a36308 |
2
.flake8
Normal file
2
.flake8
Normal file
@@ -0,0 +1,2 @@
|
||||
# This file will be ignored - see http://flake8.pycqa.org/en/2.6.0/config.html#per-project
|
||||
# Edit the [flake8] section in tox.ini instead
|
||||
@@ -3,11 +3,13 @@ language: python
|
||||
cache:
|
||||
directories:
|
||||
- .tox
|
||||
- .pytest_cache
|
||||
- tests/testdata
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.5"
|
||||
# - "3.7" # Python 3.7 is not available on travis CI yet
|
||||
- "3.6"
|
||||
- "3.7"
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
install:
|
||||
- pip install tox-travis
|
||||
- sh get_testdata.sh
|
||||
|
||||
@@ -7,6 +7,14 @@ Read proprietary file formats from electrochemical test stations
|
||||
|
||||
Use the `MPRfile` class from BioLogic.py (exported in the main package)
|
||||
|
||||
````
|
||||
from galvani import BioLogic
|
||||
import pandas as pd
|
||||
|
||||
mpr_file = BioLogic.MPRfile('test.mpr')
|
||||
df = pd.DataFrame(mpr_file.data)
|
||||
````
|
||||
|
||||
## Arbin .res files ##
|
||||
|
||||
Use the res2sqlite.py script to convert the .res file to a sqlite3 database
|
||||
|
||||
@@ -3,25 +3,16 @@
|
||||
|
||||
__all__ = ['MPTfileCSV', 'MPTfile']
|
||||
|
||||
import sys
|
||||
import re
|
||||
import csv
|
||||
from os import SEEK_SET
|
||||
import time
|
||||
from datetime import date, datetime, timedelta
|
||||
from collections import OrderedDict
|
||||
from collections import defaultdict, OrderedDict
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
if sys.version_info.major <= 2:
|
||||
str3 = str
|
||||
from string import maketrans
|
||||
else:
|
||||
str3 = lambda b: str(b, encoding='ascii')
|
||||
maketrans = bytes.maketrans
|
||||
|
||||
|
||||
def fieldname_to_dtype(fieldname):
|
||||
"""Converts a column header from the MPT file into a tuple of
|
||||
canonical name and appropriate numpy dtype"""
|
||||
@@ -48,13 +39,13 @@ def fieldname_to_dtype(fieldname):
|
||||
raise ValueError("Invalid column header: %s" % fieldname)
|
||||
|
||||
|
||||
def comma_converter(float_string):
|
||||
"""Convert numbers to floats whether the decimal point is '.' or ','"""
|
||||
trans_table = maketrans(b',', b'.')
|
||||
return float(float_string.translate(trans_table))
|
||||
def comma_converter(float_text):
|
||||
"""Convert text to float whether the decimal point is '.' or ','"""
|
||||
trans_table = bytes.maketrans(b',', b'.')
|
||||
return float(float_text.translate(trans_table))
|
||||
|
||||
|
||||
def MPTfile(file_or_path):
|
||||
def MPTfile(file_or_path, encoding='ascii'):
|
||||
"""Opens .mpt files as numpy record arrays
|
||||
|
||||
Checks for the correct headings, skips any comments and returns a
|
||||
@@ -70,19 +61,20 @@ def MPTfile(file_or_path):
|
||||
if magic != b'EC-Lab ASCII FILE\r\n':
|
||||
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
|
||||
|
||||
nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file))
|
||||
nb_headers_match = re.match(rb'Nb header lines : (\d+)\s*$',
|
||||
next(mpt_file))
|
||||
nb_headers = int(nb_headers_match.group(1))
|
||||
if nb_headers < 3:
|
||||
raise ValueError("Too few header lines: %d" % nb_headers)
|
||||
|
||||
## The 'magic number' line, the 'Nb headers' line and the column headers
|
||||
## make three lines. Every additional line is a comment line.
|
||||
# The 'magic number' line, the 'Nb headers' line and the column headers
|
||||
# make three lines. Every additional line is a comment line.
|
||||
comments = [next(mpt_file) for i in range(nb_headers - 3)]
|
||||
|
||||
fieldnames = str3(next(mpt_file)).strip().split('\t')
|
||||
fieldnames = next(mpt_file).decode(encoding).strip().split('\t')
|
||||
record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames)))
|
||||
|
||||
## Must be able to parse files where commas are used for decimal points
|
||||
# Must be able to parse files where commas are used for decimal points
|
||||
converter_dict = dict(((i, comma_converter)
|
||||
for i in range(len(fieldnames))))
|
||||
mpt_array = np.loadtxt(mpt_file, dtype=record_type,
|
||||
@@ -107,13 +99,13 @@ def MPTfileCSV(file_or_path):
|
||||
if magic.rstrip() != 'EC-Lab ASCII FILE':
|
||||
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
|
||||
|
||||
nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file))
|
||||
nb_headers_match = re.match(r'Nb header lines : (\d+)\s*$', next(mpt_file))
|
||||
nb_headers = int(nb_headers_match.group(1))
|
||||
if nb_headers < 3:
|
||||
raise ValueError("Too few header lines: %d" % nb_headers)
|
||||
|
||||
## The 'magic number' line, the 'Nb headers' line and the column headers
|
||||
## make three lines. Every additional line is a comment line.
|
||||
# The 'magic number' line, the 'Nb headers' line and the column headers
|
||||
# make three lines. Every additional line is a comment line.
|
||||
comments = [next(mpt_file) for i in range(nb_headers - 3)]
|
||||
|
||||
mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab')
|
||||
@@ -143,109 +135,157 @@ VMPmodule_hdr = np.dtype([('shortname', 'S10'),
|
||||
('version', '<u4'),
|
||||
('date', 'S8')])
|
||||
|
||||
# Maps from colID to a tuple defining a numpy dtype
|
||||
VMPdata_colID_dtype_map = {
|
||||
4: ('time/s', '<f8'),
|
||||
5: ('control/V/mA', '<f4'),
|
||||
6: ('Ewe/V', '<f4'),
|
||||
7: ('dQ/mA.h', '<f8'),
|
||||
8: ('I/mA', '<f4'), # 8 is either I or <I> ??
|
||||
9: ('Ece/V', '<f4'),
|
||||
11: ('I/mA', '<f8'),
|
||||
13: ('(Q-Qo)/mA.h', '<f8'),
|
||||
16: ('Analog IN 1/V', '<f4'),
|
||||
19: ('control/V', '<f4'),
|
||||
20: ('control/mA', '<f4'),
|
||||
23: ('dQ/mA.h', '<f8'), # Same as 7?
|
||||
24: ('cycle number', '<f8'),
|
||||
32: ('freq/Hz', '<f4'),
|
||||
33: ('|Ewe|/V', '<f4'),
|
||||
34: ('|I|/A', '<f4'),
|
||||
35: ('Phase(Z)/deg', '<f4'),
|
||||
36: ('|Z|/Ohm', '<f4'),
|
||||
37: ('Re(Z)/Ohm', '<f4'),
|
||||
38: ('-Im(Z)/Ohm', '<f4'),
|
||||
39: ('I Range', '<u2'),
|
||||
70: ('P/W', '<f4'),
|
||||
74: ('Energy/W.h', '<f8'),
|
||||
75: ('Analog OUT/V', '<f4'),
|
||||
76: ('<I>/mA', '<f4'),
|
||||
77: ('<Ewe>/V', '<f4'),
|
||||
78: ('Cs-2/µF-2', '<f4'),
|
||||
96: ('|Ece|/V', '<f4'),
|
||||
98: ('Phase(Zce)/deg', '<f4'),
|
||||
99: ('|Zce|/Ohm', '<f4'),
|
||||
100: ('Re(Zce)/Ohm', '<f4'),
|
||||
101: ('-Im(Zce)/Ohm', '<f4'),
|
||||
123: ('Energy charge/W.h', '<f8'),
|
||||
124: ('Energy discharge/W.h', '<f8'),
|
||||
125: ('Capacitance charge/µF', '<f8'),
|
||||
126: ('Capacitance discharge/µF', '<f8'),
|
||||
131: ('Ns', '<u2'),
|
||||
163: ('|Estack|/V', '<f4'),
|
||||
168: ('Rcmp/Ohm', '<f4'),
|
||||
169: ('Cs/µF', '<f4'),
|
||||
172: ('Cp/µF', '<f4'),
|
||||
173: ('Cp-2/µF-2', '<f4'),
|
||||
241: ('|E1|/V', '<f4'),
|
||||
242: ('|E2|/V', '<f4'),
|
||||
271: ('Phase(Z1) / deg', '<f4'),
|
||||
272: ('Phase(Z2) / deg', '<f4'),
|
||||
301: ('|Z1|/Ohm', '<f4'),
|
||||
302: ('|Z2|/Ohm', '<f4'),
|
||||
331: ('Re(Z1)/Ohm', '<f4'),
|
||||
332: ('Re(Z2)/Ohm', '<f4'),
|
||||
361: ('-Im(Z1)/Ohm', '<f4'),
|
||||
362: ('-Im(Z2)/Ohm', '<f4'),
|
||||
391: ('<E1>/V', '<f4'),
|
||||
392: ('<E2>/V', '<f4'),
|
||||
422: ('Phase(Zstack)/deg', '<f4'),
|
||||
423: ('|Zstack|/Ohm', '<f4'),
|
||||
424: ('Re(Zstack)/Ohm', '<f4'),
|
||||
425: ('-Im(Zstack)/Ohm', '<f4'),
|
||||
426: ('<Estack>/V', '<f4'),
|
||||
430: ('Phase(Zwe-ce)/deg', '<f4'),
|
||||
431: ('|Zwe-ce|/Ohm', '<f4'),
|
||||
432: ('Re(Zwe-ce)/Ohm', '<f4'),
|
||||
433: ('-Im(Zwe-ce)/Ohm', '<f4'),
|
||||
434: ('(Q-Qo)/C', '<f4'),
|
||||
435: ('dQ/C', '<f4'),
|
||||
441: ('<Ecv>/V', '<f4'),
|
||||
462: ('Temperature/°C', '<f4'),
|
||||
467: ('Q charge/discharge/mA.h', '<f8'),
|
||||
468: ('half cycle', '<u4'),
|
||||
469: ('z cycle', '<u4'),
|
||||
471: ('<Ece>/V', '<f4'),
|
||||
473: ('THD Ewe/%', '<f4'),
|
||||
474: ('THD I/%', '<f4'),
|
||||
476: ('NSD Ewe/%', '<f4'),
|
||||
477: ('NSD I/%', '<f4'),
|
||||
479: ('NSR Ewe/%', '<f4'),
|
||||
480: ('NSR I/%', '<f4'),
|
||||
486: ('|Ewe h2|/V', '<f4'),
|
||||
487: ('|Ewe h3|/V', '<f4'),
|
||||
488: ('|Ewe h4|/V', '<f4'),
|
||||
489: ('|Ewe h5|/V', '<f4'),
|
||||
490: ('|Ewe h6|/V', '<f4'),
|
||||
491: ('|Ewe h7|/V', '<f4'),
|
||||
492: ('|I h2|/A', '<f4'),
|
||||
493: ('|I h3|/A', '<f4'),
|
||||
494: ('|I h4|/A', '<f4'),
|
||||
495: ('|I h5|/A', '<f4'),
|
||||
496: ('|I h6|/A', '<f4'),
|
||||
497: ('|I h7|/A', '<f4'),
|
||||
}
|
||||
|
||||
# These column IDs define flags which are all stored packed in a single byte
|
||||
# The values in the map are (name, bitmask, dtype)
|
||||
VMPdata_colID_flag_map = {
|
||||
1: ('mode', 0x03, np.uint8),
|
||||
2: ('ox/red', 0x04, np.bool_),
|
||||
3: ('error', 0x08, np.bool_),
|
||||
21: ('control changes', 0x10, np.bool_),
|
||||
31: ('Ns changes', 0x20, np.bool_),
|
||||
65: ('counter inc.', 0x80, np.bool_),
|
||||
}
|
||||
|
||||
|
||||
def VMPdata_dtype_from_colIDs(colIDs):
|
||||
dtype_dict = OrderedDict()
|
||||
"""Get a numpy record type from a list of column ID numbers.
|
||||
|
||||
The binary layout of the data in the MPR file is described by the sequence
|
||||
of column ID numbers in the file header. This function converts that
|
||||
sequence into a numpy dtype which can then be used to load data from the
|
||||
file with np.frombuffer().
|
||||
|
||||
Some column IDs refer to small values which are packed into a single byte.
|
||||
The second return value is a dict describing the bit masks with which to
|
||||
extract these columns from the flags byte.
|
||||
|
||||
"""
|
||||
type_list = []
|
||||
field_name_counts = defaultdict(int)
|
||||
flags_dict = OrderedDict()
|
||||
flags2_dict = OrderedDict()
|
||||
for colID in colIDs:
|
||||
if colID in (1, 2, 3, 21, 31, 65):
|
||||
dtype_dict['flags'] = 'u1'
|
||||
if colID == 1:
|
||||
flags_dict['mode'] = (np.uint8(0x03), np.uint8)
|
||||
elif colID == 2:
|
||||
flags_dict['ox/red'] = (np.uint8(0x04), np.bool_)
|
||||
elif colID == 3:
|
||||
flags_dict['error'] = (np.uint8(0x08), np.bool_)
|
||||
elif colID == 21:
|
||||
flags_dict['control changes'] = (np.uint8(0x10), np.bool_)
|
||||
elif colID == 31:
|
||||
flags_dict['Ns changes'] = (np.uint8(0x20), np.bool_)
|
||||
elif colID == 65:
|
||||
flags_dict['counter inc.'] = (np.uint8(0x80), np.bool_)
|
||||
if colID in VMPdata_colID_flag_map:
|
||||
# Some column IDs represent boolean flags or small integers
|
||||
# These are all packed into a single 'flags' byte whose position
|
||||
# in the overall record is determined by the position of the first
|
||||
# column ID of flag type. If there are several flags present,
|
||||
# there is still only one 'flags' int
|
||||
if 'flags' not in field_name_counts:
|
||||
type_list.append(('flags', 'u1'))
|
||||
field_name_counts['flags'] = 1
|
||||
flag_name, flag_mask, flag_type = VMPdata_colID_flag_map[colID]
|
||||
# TODO what happens if a flag colID has already been seen
|
||||
# i.e. if flag_name is already present in flags_dict?
|
||||
# Does it create a second 'flags' byte in the record?
|
||||
flags_dict[flag_name] = (np.uint8(flag_mask), flag_type)
|
||||
elif colID in VMPdata_colID_dtype_map:
|
||||
field_name, field_type = VMPdata_colID_dtype_map[colID]
|
||||
field_name_counts[field_name] += 1
|
||||
count = field_name_counts[field_name]
|
||||
if count > 1:
|
||||
unique_field_name = '%s %d' % (field_name, count)
|
||||
else:
|
||||
raise NotImplementedError("flag %d not implemented" % colID)
|
||||
elif colID == 4:
|
||||
dtype_dict['time/s'] = '<f8'
|
||||
elif colID == 5:
|
||||
dtype_dict['control/V/mA'] = '<f4'
|
||||
# 6 is Ewe, 77 is <Ewe>, I don't see the difference
|
||||
elif colID in (6, 77):
|
||||
dtype_dict['Ewe/V'] = '<f4'
|
||||
# Can't see any difference between 7 and 23
|
||||
elif colID in (7, 23):
|
||||
dtype_dict['dQ/mA.h'] = '<f8'
|
||||
# 76 is <I>, 8 is either I or <I> ??
|
||||
elif colID in (8, 76):
|
||||
dtype_dict['I/mA'] = '<f4'
|
||||
elif colID == 9:
|
||||
dtype_dict['Ece/V'] = '<f4'
|
||||
elif colID == 11:
|
||||
dtype_dict['I/mA'] = '<f8'
|
||||
elif colID == 13:
|
||||
dtype_dict['(Q-Qo)/mA.h'] = '<f8'
|
||||
elif colID == 19:
|
||||
dtype_dict['control/V'] = '<f4'
|
||||
elif colID == 20:
|
||||
dtype_dict['control/mA'] = '<f4'
|
||||
elif colID == 24:
|
||||
dtype_dict['cycle number'] = '<f8'
|
||||
elif colID == 32:
|
||||
dtype_dict['freq/Hz'] = '<f4'
|
||||
elif colID == 33:
|
||||
dtype_dict['|Ewe|/V'] = '<f4'
|
||||
elif colID == 34:
|
||||
dtype_dict['|I|/A'] = '<f4'
|
||||
elif colID == 35:
|
||||
dtype_dict['Phase(Z)/deg'] = '<f4'
|
||||
elif colID == 36:
|
||||
dtype_dict['|Z|/Ohm'] = '<f4'
|
||||
elif colID == 37:
|
||||
dtype_dict['Re(Z)/Ohm'] = '<f4'
|
||||
elif colID == 38:
|
||||
dtype_dict['-Im(Z)/Ohm'] = '<f4'
|
||||
elif colID == 39:
|
||||
dtype_dict['I Range'] = '<u2'
|
||||
elif colID == 70:
|
||||
dtype_dict['P/W'] = '<f4'
|
||||
elif colID == 123:
|
||||
dtype_dict['Energy charge/W.h'] = '<f8'
|
||||
elif colID == 124:
|
||||
dtype_dict['Energy discharge/W.h'] = '<f8'
|
||||
elif colID == 125:
|
||||
dtype_dict['Capacitance charge/µF'] = '<f8'
|
||||
elif colID == 126:
|
||||
dtype_dict['Capacitance discharge/µF'] = '<f8'
|
||||
elif colID == 131:
|
||||
dtype_dict['Ns'] = '<u2'
|
||||
elif colID == 169:
|
||||
dtype_dict['Cs/µF'] = '<f4'
|
||||
elif colID == 172:
|
||||
dtype_dict['Cp/µF'] = '<f4'
|
||||
elif colID == 434:
|
||||
dtype_dict['(Q-Qo)/C'] = '<f4'
|
||||
elif colID == 435:
|
||||
dtype_dict['dQ/C'] = '<f4'
|
||||
elif colID == 467:
|
||||
dtype_dict['Q charge/discharge/mA.h'] = '<f8'
|
||||
elif colID == 468:
|
||||
dtype_dict['half cycle'] = '<u4'
|
||||
elif colID == 473:
|
||||
dtype_dict['THD Ewe/%'] = '<f4'
|
||||
elif colID == 476:
|
||||
dtype_dict['NSD Ewe/%'] = '<f4'
|
||||
elif colID == 479:
|
||||
dtype_dict['NSR Ewe/%'] = '<f4'
|
||||
elif colID == 474:
|
||||
dtype_dict['THD I/%'] = '<f4'
|
||||
elif colID == 477:
|
||||
dtype_dict['NSD I/%'] = '<f4'
|
||||
elif colID == 480:
|
||||
dtype_dict['NSR I/%'] = '<f4'
|
||||
unique_field_name = field_name
|
||||
type_list.append((unique_field_name, field_type))
|
||||
else:
|
||||
print(dtype_dict)
|
||||
raise NotImplementedError("column type %d not implemented" % colID)
|
||||
return np.dtype(list(dtype_dict.items())), flags_dict, flags2_dict
|
||||
raise NotImplementedError("Column ID {cid} after column {prev} "
|
||||
"is unknown"
|
||||
.format(cid=colID,
|
||||
prev=type_list[-1][0]))
|
||||
return np.dtype(type_list), flags_dict
|
||||
|
||||
|
||||
def read_VMP_modules(fileobj, read_module_data=True):
|
||||
@@ -259,13 +299,14 @@ def read_VMP_modules(fileobj, read_module_data=True):
|
||||
if len(module_magic) == 0: # end of file
|
||||
break
|
||||
elif module_magic != b'MODULE':
|
||||
raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic)
|
||||
raise ValueError("Found %r, expecting start of new VMP MODULE"
|
||||
% module_magic)
|
||||
|
||||
hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize)
|
||||
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
|
||||
raise IOError("Unexpected end of file while reading module header")
|
||||
|
||||
hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1)
|
||||
hdr = np.frombuffer(hdr_bytes, dtype=VMPmodule_hdr, count=1)
|
||||
hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names))
|
||||
hdr_dict['offset'] = fileobj.tell()
|
||||
if read_module_data:
|
||||
@@ -283,6 +324,9 @@ def read_VMP_modules(fileobj, read_module_data=True):
|
||||
fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET)
|
||||
|
||||
|
||||
MPR_MAGIC = b'BIO-LOGIC MODULAR FILE\x1a'.ljust(48) + b'\x00\x00\x00\x00'
|
||||
|
||||
|
||||
class MPRfile:
|
||||
"""Bio-Logic .mpr file
|
||||
|
||||
@@ -300,74 +344,90 @@ class MPRfile:
|
||||
"""
|
||||
|
||||
def __init__(self, file_or_path):
|
||||
self.loop_index = None
|
||||
if isinstance(file_or_path, str):
|
||||
mpr_file = open(file_or_path, 'rb')
|
||||
else:
|
||||
mpr_file = file_or_path
|
||||
|
||||
mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00'
|
||||
magic = mpr_file.read(len(mpr_magic))
|
||||
if magic != mpr_magic:
|
||||
magic = mpr_file.read(len(MPR_MAGIC))
|
||||
if magic != MPR_MAGIC:
|
||||
raise ValueError('Invalid magic for .mpr file: %s' % magic)
|
||||
|
||||
modules = list(read_VMP_modules(mpr_file))
|
||||
self.modules = modules
|
||||
settings_mod, = (m for m in modules if m['shortname'] == b'VMP Set ')
|
||||
data_module, = (m for m in modules if m['shortname'] == b'VMP data ')
|
||||
maybe_loop_module = [m for m in modules if m['shortname'] == b'VMP loop ']
|
||||
maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG ']
|
||||
|
||||
n_data_points = np.fromstring(data_module['data'][:4], dtype='<u4')
|
||||
n_columns = np.fromstring(data_module['data'][4:5], dtype='u1')
|
||||
n_columns = np.asscalar(n_columns) # Compatibility with recent numpy
|
||||
n_data_points = np.frombuffer(data_module['data'][:4], dtype='<u4')
|
||||
n_columns = np.frombuffer(data_module['data'][4:5], dtype='u1').item()
|
||||
|
||||
if data_module['version'] == 0:
|
||||
column_types = np.fromstring(data_module['data'][5:], dtype='u1',
|
||||
column_types = np.frombuffer(data_module['data'][5:], dtype='u1',
|
||||
count=n_columns)
|
||||
remaining_headers = data_module['data'][5 + n_columns:100]
|
||||
main_data = data_module['data'][100:]
|
||||
elif data_module['version'] == 2:
|
||||
column_types = np.fromstring(data_module['data'][5:], dtype='<u2',
|
||||
elif data_module['version'] in [2, 3]:
|
||||
column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
|
||||
count=n_columns)
|
||||
## There is 405 bytes of data before the main array starts
|
||||
# There are bytes of data before the main array starts
|
||||
if data_module['version'] == 3:
|
||||
num_bytes_before = 406 # version 3 added `\x01` to the start
|
||||
else:
|
||||
num_bytes_before = 405
|
||||
remaining_headers = data_module['data'][5 + 2 * n_columns:405]
|
||||
main_data = data_module['data'][405:]
|
||||
main_data = data_module['data'][num_bytes_before:]
|
||||
else:
|
||||
raise ValueError("Unrecognised version for data module: %d" %
|
||||
data_module['version'])
|
||||
|
||||
if sys.version_info.major <= 2:
|
||||
assert(all((b == '\x00' for b in remaining_headers)))
|
||||
else:
|
||||
assert(not any(remaining_headers))
|
||||
assert(not any(remaining_headers))
|
||||
|
||||
self.dtype, self.flags_dict, self.flags2_dict = VMPdata_dtype_from_colIDs(column_types)
|
||||
self.data = np.fromstring(main_data, dtype=self.dtype)
|
||||
self.dtype, self.flags_dict = VMPdata_dtype_from_colIDs(column_types)
|
||||
self.data = np.frombuffer(main_data, dtype=self.dtype)
|
||||
assert(self.data.shape[0] == n_data_points)
|
||||
|
||||
## No idea what these 'column types' mean or even if they are actually
|
||||
## column types at all
|
||||
# No idea what these 'column types' mean or even if they are actually
|
||||
# column types at all
|
||||
self.version = int(data_module['version'])
|
||||
self.cols = column_types
|
||||
self.npts = n_data_points
|
||||
|
||||
tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y')
|
||||
try:
|
||||
tm = time.strptime(settings_mod['date'].decode('ascii'), '%m/%d/%y')
|
||||
except ValueError:
|
||||
tm = time.strptime(settings_mod['date'].decode('ascii'), '%m-%d-%y')
|
||||
self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
|
||||
|
||||
if maybe_loop_module:
|
||||
loop_module, = maybe_loop_module
|
||||
if loop_module['version'] == 0:
|
||||
self.loop_index = np.fromstring(loop_module['data'][4:],
|
||||
dtype='<u4')
|
||||
self.loop_index = np.trim_zeros(self.loop_index, 'b')
|
||||
else:
|
||||
raise ValueError("Unrecognised version for data module: %d" %
|
||||
data_module['version'])
|
||||
|
||||
if maybe_log_module:
|
||||
log_module, = maybe_log_module
|
||||
tm = time.strptime(str3(log_module['date']), '%m/%d/%y')
|
||||
try:
|
||||
tm = time.strptime(log_module['date'].decode('ascii'), '%m/%d/%y')
|
||||
except ValueError:
|
||||
tm = time.strptime(log_module['date'].decode('ascii'), '%m-%d-%y')
|
||||
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
|
||||
|
||||
## There is a timestamp at either 465 or 469 bytes
|
||||
## I can't find any reason why it is one or the other in any
|
||||
## given file
|
||||
ole_timestamp1 = np.fromstring(log_module['data'][465:],
|
||||
# There is a timestamp at either 465 or 469 bytes
|
||||
# I can't find any reason why it is one or the other in any
|
||||
# given file
|
||||
ole_timestamp1 = np.frombuffer(log_module['data'][465:],
|
||||
dtype='<f8', count=1)
|
||||
ole_timestamp2 = np.fromstring(log_module['data'][469:],
|
||||
ole_timestamp2 = np.frombuffer(log_module['data'][469:],
|
||||
dtype='<f8', count=1)
|
||||
ole_timestamp3 = np.fromstring(log_module['data'][473:],
|
||||
ole_timestamp3 = np.frombuffer(log_module['data'][473:],
|
||||
dtype='<f8', count=1)
|
||||
ole_timestamp4 = np.fromstring(log_module['data'][585:],
|
||||
ole_timestamp4 = np.frombuffer(log_module['data'][585:],
|
||||
dtype='<f8', count=1)
|
||||
|
||||
if ole_timestamp1 > 40000 and ole_timestamp1 < 50000:
|
||||
@@ -378,7 +438,7 @@ class MPRfile:
|
||||
ole_timestamp = ole_timestamp3
|
||||
elif ole_timestamp4 > 40000 and ole_timestamp4 < 50000:
|
||||
ole_timestamp = ole_timestamp4
|
||||
|
||||
|
||||
else:
|
||||
raise ValueError("Could not find timestamp in the LOG module")
|
||||
|
||||
@@ -386,17 +446,14 @@ class MPRfile:
|
||||
ole_timedelta = timedelta(days=ole_timestamp[0])
|
||||
self.timestamp = ole_base + ole_timedelta
|
||||
if self.startdate != self.timestamp.date():
|
||||
raise ValueError("""Date mismatch:
|
||||
Start date: %s
|
||||
End date: %s
|
||||
Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp))
|
||||
raise ValueError("Date mismatch:\n"
|
||||
+ " Start date: %s\n" % self.startdate
|
||||
+ " End date: %s\n" % self.enddate
|
||||
+ " Timestamp: %s\n" % self.timestamp)
|
||||
|
||||
def get_flag(self, flagname):
|
||||
if flagname in self.flags_dict:
|
||||
mask, dtype = self.flags_dict[flagname]
|
||||
return np.array(self.data['flags'] & mask, dtype=dtype)
|
||||
elif flagname in self.flags2_dict:
|
||||
mask, dtype = self.flags2_dict[flagname]
|
||||
return np.array(self.data['flags2'] & mask, dtype=dtype)
|
||||
else:
|
||||
raise AttributeError("Flag '%s' not present" % flagname)
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
from .BioLogic import MPTfile, MPRfile
|
||||
from .BioLogic import MPRfile, MPTfile
|
||||
|
||||
__all__ = ['MPRfile', 'MPTfile']
|
||||
|
||||
@@ -5,26 +5,51 @@ import sqlite3
|
||||
import re
|
||||
import csv
|
||||
import argparse
|
||||
from copy import copy
|
||||
|
||||
|
||||
## The following scripts are adapted from the result of running
|
||||
## $ mdb-schema <result.res> oracle
|
||||
# The following scripts are adapted from the result of running
|
||||
# $ mdb-schema <result.res> oracle
|
||||
|
||||
mdb_tables = ["Version_Table", "Global_Table", "Resume_Table",
|
||||
"Channel_Normal_Table", "Channel_Statistic_Table",
|
||||
"Auxiliary_Table", "Event_Table",
|
||||
"Smart_Battery_Info_Table", "Smart_Battery_Data_Table"]
|
||||
mdb_tables = [
|
||||
'Version_Table',
|
||||
'Global_Table',
|
||||
'Resume_Table',
|
||||
'Channel_Normal_Table',
|
||||
'Channel_Statistic_Table',
|
||||
'Auxiliary_Table',
|
||||
'Event_Table',
|
||||
'Smart_Battery_Info_Table',
|
||||
'Smart_Battery_Data_Table',
|
||||
]
|
||||
mdb_5_23_tables = [
|
||||
'MCell_Aci_Data_Table',
|
||||
'Aux_Global_Data_Table',
|
||||
'Smart_Battery_Clock_Stretch_Table',
|
||||
]
|
||||
mdb_5_26_tables = [
|
||||
'Can_BMS_Info_Table',
|
||||
'Can_BMS_Data_Table',
|
||||
]
|
||||
|
||||
mdb_tables_text = ["Version_Table", "Global_Table", "Event_Table",
|
||||
"Smart_Battery_Info_Table"]
|
||||
mdb_tables_numeric = ["Resume_Table", "Channel_Normal_Table",
|
||||
"Channel_Statistic_Table", "Auxiliary_Table",
|
||||
"Smart_Battery_Data_Table", 'MCell_Aci_Data_Table',
|
||||
'Aux_Global_Data_Table',
|
||||
'Smart_Battery_Clock_Stretch_Table']
|
||||
|
||||
mdb_5_23_tables = ['MCell_Aci_Data_Table', 'Aux_Global_Data_Table',
|
||||
'Smart_Battery_Clock_Stretch_Table']
|
||||
mdb_tables_text = {
|
||||
'Version_Table',
|
||||
'Global_Table',
|
||||
'Event_Table',
|
||||
'Smart_Battery_Info_Table',
|
||||
'Can_BMS_Info_Table',
|
||||
}
|
||||
mdb_tables_numeric = {
|
||||
'Resume_Table',
|
||||
'Channel_Normal_Table',
|
||||
'Channel_Statistic_Table',
|
||||
'Auxiliary_Table',
|
||||
'Smart_Battery_Data_Table',
|
||||
'MCell_Aci_Data_Table',
|
||||
'Aux_Global_Data_Table',
|
||||
'Smart_Battery_Clock_Stretch_Table',
|
||||
'Can_BMS_Data_Table',
|
||||
}
|
||||
|
||||
mdb_create_scripts = {
|
||||
"Version_Table": """
|
||||
@@ -56,8 +81,17 @@ CREATE TABLE Global_Table
|
||||
Log_Aux_Data_Flag INTEGER,
|
||||
Log_Event_Data_Flag INTEGER,
|
||||
Log_Smart_Battery_Data_Flag INTEGER,
|
||||
-- The following items are in 5.26 but not in 5.23
|
||||
Log_Can_BMS_Data_Flag INTEGER DEFAULT NULL,
|
||||
Software_Version TEXT DEFAULT NULL,
|
||||
Serial_Number TEXT DEFAULT NULL,
|
||||
Schedule_Version TEXT DEFAULT NULL,
|
||||
MASS REAL DEFAULT NULL,
|
||||
Specific_Capacity REAL DEFAULT NULL,
|
||||
Capacity REAL DEFAULT NULL,
|
||||
-- Item_ID exists in all versions
|
||||
Item_ID TEXT,
|
||||
-- Version 1.14 ends here, version 5.23 continues
|
||||
-- These items are in 5.26 and 5.23 but not in 1.14
|
||||
Mapped_Aux_Conc_CNumber INTEGER DEFAULT NULL,
|
||||
Mapped_Aux_DI_CNumber INTEGER DEFAULT NULL,
|
||||
Mapped_Aux_DO_CNumber INTEGER DEFAULT NULL
|
||||
@@ -65,7 +99,7 @@ CREATE TABLE Global_Table
|
||||
"Resume_Table": """
|
||||
CREATE TABLE Resume_Table
|
||||
(
|
||||
Test_ID INTEGER REFERENCES Global_Table(Test_ID),
|
||||
Test_ID INTEGER PRIMARY KEY REFERENCES Global_Table(Test_ID),
|
||||
Step_Index INTEGER,
|
||||
Cycle_Index INTEGER,
|
||||
Channel_Status INTEGER,
|
||||
@@ -115,7 +149,8 @@ CREATE TABLE Channel_Normal_Table
|
||||
"dV/dt" REAL,
|
||||
Internal_Resistance REAL,
|
||||
AC_Impedance REAL,
|
||||
ACI_Phase_Angle REAL
|
||||
ACI_Phase_Angle REAL,
|
||||
PRIMARY KEY (Test_ID, Data_Point)
|
||||
); """,
|
||||
"Channel_Statistic_Table": """
|
||||
CREATE TABLE Channel_Statistic_Table
|
||||
@@ -126,7 +161,9 @@ CREATE TABLE Channel_Statistic_Table
|
||||
-- Version 1.14 ends here, version 5.23 continues
|
||||
Charge_Time REAL DEFAULT NULL,
|
||||
Discharge_Time REAL DEFAULT NULL,
|
||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
PRIMARY KEY (Test_ID, Data_Point),
|
||||
FOREIGN KEY (Test_ID, Data_Point)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
); """,
|
||||
"Auxiliary_Table": """
|
||||
CREATE TABLE Auxiliary_Table
|
||||
@@ -137,7 +174,9 @@ CREATE TABLE Auxiliary_Table
|
||||
Data_Type INTEGER,
|
||||
X REAL,
|
||||
"dX/dt" REAL,
|
||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
PRIMARY KEY (Test_ID, Data_Point, Auxiliary_Index),
|
||||
FOREIGN KEY (Test_ID, Data_Point)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
); """,
|
||||
"Event_Table": """
|
||||
CREATE TABLE Event_Table
|
||||
@@ -151,7 +190,7 @@ CREATE TABLE Event_Table
|
||||
"Smart_Battery_Info_Table": """
|
||||
CREATE TABLE Smart_Battery_Info_Table
|
||||
(
|
||||
Test_ID INTEGER REFERENCES Global_Table(Test_ID),
|
||||
Test_ID INTEGER PRIMARY KEY REFERENCES Global_Table(Test_ID),
|
||||
ManufacturerDate REAL,
|
||||
ManufacturerAccess TEXT,
|
||||
SpecificationInfo TEXT,
|
||||
@@ -220,9 +259,14 @@ CREATE TABLE Smart_Battery_Data_Table
|
||||
ChargingCurrent REAL DEFAULT NULL,
|
||||
ChargingVoltage REAL DEFAULT NULL,
|
||||
ManufacturerData REAL DEFAULT NULL,
|
||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
-- Version 5.23 ends here, version 5.26 continues
|
||||
BATMAN_Status INTEGER DEFAULT NULL,
|
||||
DTM_PDM_Status INTEGER DEFAULT NULL,
|
||||
PRIMARY KEY (Test_ID, Data_Point),
|
||||
FOREIGN KEY (Test_ID, Data_Point)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
); """,
|
||||
## The following tables are not present in version 1.14
|
||||
# The following tables are not present in version 1.14, but are in 5.23
|
||||
'MCell_Aci_Data_Table': """
|
||||
CREATE TABLE MCell_Aci_Data_Table
|
||||
(
|
||||
@@ -233,7 +277,9 @@ CREATE TABLE MCell_Aci_Data_Table
|
||||
Phase_Shift REAL,
|
||||
Voltage REAL,
|
||||
Current REAL,
|
||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
PRIMARY KEY (Test_ID, Data_Point, Cell_Index),
|
||||
FOREIGN KEY (Test_ID, Data_Point)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
);""",
|
||||
'Aux_Global_Data_Table': """
|
||||
CREATE TABLE Aux_Global_Data_Table
|
||||
@@ -242,7 +288,8 @@ CREATE TABLE Aux_Global_Data_Table
|
||||
Auxiliary_Index INTEGER,
|
||||
Data_Type INTEGER,
|
||||
Nickname TEXT,
|
||||
Unit TEXT
|
||||
Unit TEXT,
|
||||
PRIMARY KEY (Channel_Index, Auxiliary_Index, Data_Type)
|
||||
);""",
|
||||
'Smart_Battery_Clock_Stretch_Table': """
|
||||
CREATE TABLE Smart_Battery_Clock_Stretch_Table
|
||||
@@ -288,8 +335,32 @@ CREATE TABLE Smart_Battery_Clock_Stretch_Table
|
||||
VCELL3 INTEGER,
|
||||
VCELL2 INTEGER,
|
||||
VCELL1 INTEGER,
|
||||
FOREIGN KEY (Test_ID, Data_Point) REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
);"""}
|
||||
PRIMARY KEY (Test_ID, Data_Point),
|
||||
FOREIGN KEY (Test_ID, Data_Point)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
);""",
|
||||
# The following tables are not present in version 5.23, but are in 5.26
|
||||
'Can_BMS_Info_Table': """
|
||||
CREATE TABLE "Can_BMS_Info_Table"
|
||||
(
|
||||
Channel_Index INTEGER PRIMARY KEY,
|
||||
CAN_Cfg_File_Name TEXT,
|
||||
CAN_Configuration TEXT
|
||||
);
|
||||
""",
|
||||
'Can_BMS_Data_Table': """
|
||||
CREATE TABLE "Can_BMS_Data_Table"
|
||||
(
|
||||
Test_ID INTEGER,
|
||||
Data_Point INTEGER,
|
||||
CAN_MV_Index INTEGER,
|
||||
Signal_Value_X REAL,
|
||||
PRIMARY KEY (Test_ID, Data_Point, CAN_MV_Index),
|
||||
FOREIGN KEY (Test_ID, Data_Point)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Data_Point)
|
||||
);
|
||||
""",
|
||||
}
|
||||
|
||||
mdb_create_indices = {
|
||||
"Channel_Normal_Table": """
|
||||
@@ -306,18 +377,21 @@ CREATE TEMPORARY TABLE capacity_helper(
|
||||
Discharge_Capacity REAL NOT NULL,
|
||||
Charge_Energy REAL NOT NULL,
|
||||
Discharge_Energy REAL NOT NULL,
|
||||
FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
||||
FOREIGN KEY (Test_ID, Cycle_Index)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
||||
);
|
||||
|
||||
INSERT INTO capacity_helper
|
||||
SELECT Test_ID, Cycle_Index, max(Charge_Capacity), max(Discharge_Capacity), max(Charge_Energy), max(Discharge_Energy)
|
||||
FROM Channel_Normal_Table
|
||||
INSERT INTO capacity_helper
|
||||
SELECT Test_ID, Cycle_Index,
|
||||
max(Charge_Capacity), max(Discharge_Capacity),
|
||||
max(Charge_Energy), max(Discharge_Energy)
|
||||
FROM Channel_Normal_Table
|
||||
GROUP BY Test_ID, Cycle_Index;
|
||||
|
||||
-- ## Alternative way of selecting ##
|
||||
-- select *
|
||||
-- from Channel_Normal_Table as a join Channel_Normal_Table as b
|
||||
-- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1
|
||||
-- ## Alternative way of selecting ##
|
||||
-- select *
|
||||
-- from Channel_Normal_Table as a join Channel_Normal_Table as b
|
||||
-- on (a.Test_ID = b.Test_ID and a.Data_Point = b.Data_Point + 1
|
||||
-- and a.Charge_Capacity < b.Charge_Capacity);
|
||||
|
||||
DROP TABLE IF EXISTS Capacity_Sum_Table;
|
||||
@@ -328,12 +402,15 @@ CREATE TABLE Capacity_Sum_Table(
|
||||
Discharge_Capacity_Sum REAL NOT NULL,
|
||||
Charge_Energy_Sum REAL NOT NULL,
|
||||
Discharge_Energy_Sum REAL NOT NULL,
|
||||
FOREIGN KEY (Test_ID, Cycle_Index) REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
||||
FOREIGN KEY (Test_ID, Cycle_Index)
|
||||
REFERENCES Channel_Normal_Table (Test_ID, Cycle_Index)
|
||||
);
|
||||
|
||||
INSERT INTO Capacity_Sum_Table
|
||||
SELECT a.Test_ID, a.Cycle_Index, total(b.Charge_Capacity), total(b.Discharge_Capacity), total(b.Charge_Energy), total(b.Discharge_Energy)
|
||||
FROM capacity_helper AS a LEFT JOIN capacity_helper AS b
|
||||
INSERT INTO Capacity_Sum_Table
|
||||
SELECT a.Test_ID, a.Cycle_Index,
|
||||
total(b.Charge_Capacity), total(b.Discharge_Capacity),
|
||||
total(b.Charge_Energy), total(b.Discharge_Energy)
|
||||
FROM capacity_helper AS a LEFT JOIN capacity_helper AS b
|
||||
ON (a.Test_ID = b.Test_ID AND a.Cycle_Index > b.Cycle_Index)
|
||||
GROUP BY a.Test_ID, a.Cycle_Index;
|
||||
|
||||
@@ -342,54 +419,79 @@ DROP TABLE capacity_helper;
|
||||
CREATE VIEW IF NOT EXISTS Capacity_View
|
||||
AS SELECT Test_ID, Data_Point, Test_Time, Step_Time, DateTime,
|
||||
Step_Index, Cycle_Index, Current, Voltage, "dV/dt",
|
||||
Discharge_Capacity + Discharge_Capacity_Sum - Charge_Capacity - Charge_Capacity_Sum AS Net_Capacity,
|
||||
Discharge_Capacity + Discharge_Capacity_Sum + Charge_Capacity + Charge_Capacity_Sum AS Gross_Capacity,
|
||||
Discharge_Energy + Discharge_Energy_Sum - Charge_Energy - Charge_Energy_Sum AS Net_Energy,
|
||||
Discharge_Energy + Discharge_Energy_Sum + Charge_Energy + Charge_Energy_Sum AS Gross_Energy
|
||||
( (Discharge_Capacity + Discharge_Capacity_Sum)
|
||||
- (Charge_Capacity + Charge_Capacity_Sum) ) AS Net_Capacity,
|
||||
( (Discharge_Capacity + Discharge_Capacity_Sum)
|
||||
+ (Charge_Capacity + Charge_Capacity_Sum) ) AS Gross_Capacity,
|
||||
( (Discharge_Energy + Discharge_Energy_Sum)
|
||||
- (Charge_Energy + Charge_Energy_Sum) ) AS Net_Energy,
|
||||
( (Discharge_Energy + Discharge_Energy_Sum)
|
||||
+ (Charge_Energy + Charge_Energy_Sum) ) AS Gross_Energy
|
||||
FROM Channel_Normal_Table NATURAL JOIN Capacity_Sum_Table;
|
||||
"""
|
||||
|
||||
|
||||
def mdb_get_data_text(s3db, filename, table):
|
||||
print("Reading %s..." % table)
|
||||
insert_pattern = re.compile(
|
||||
r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
|
||||
re.IGNORECASE
|
||||
)
|
||||
try:
|
||||
mdb_sql = sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
|
||||
bufsize=-1, stdin=None, stdout=sp.PIPE,
|
||||
universal_newlines=True)
|
||||
mdb_output = mdb_sql.stdout.read()
|
||||
while len(mdb_output) > 0:
|
||||
insert_match = re.match(r'INSERT INTO "\w+" \([^)]+?\) VALUES \(("[^"]*"|[^")])+?\);\n',
|
||||
mdb_output, re.IGNORECASE)
|
||||
s3db.execute(insert_match.group())
|
||||
mdb_output = mdb_output[insert_match.end():]
|
||||
s3db.commit()
|
||||
except:
|
||||
# Initialize values to avoid NameError in except clause
|
||||
mdb_output = ''
|
||||
insert_match = None
|
||||
with sp.Popen(['mdb-export', '-I', 'postgres', filename, table],
|
||||
bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE,
|
||||
universal_newlines=True) as mdb_sql:
|
||||
|
||||
mdb_output = mdb_sql.stdout.read()
|
||||
while len(mdb_output) > 0:
|
||||
insert_match = insert_pattern.match(mdb_output)
|
||||
s3db.execute(insert_match.group())
|
||||
mdb_output = mdb_output[insert_match.end():]
|
||||
mdb_output += mdb_sql.stdout.read()
|
||||
s3db.commit()
|
||||
|
||||
except OSError as e:
|
||||
if e.errno == 2:
|
||||
raise RuntimeError('Could not locate the `mdb-export` executable. '
|
||||
'Check that mdbtools is properly installed.')
|
||||
else:
|
||||
raise
|
||||
except BaseException:
|
||||
print("Error while importing %s" % table)
|
||||
print("Remaining mdb-export output:", mdb_output)
|
||||
if mdb_output:
|
||||
print("Remaining mdb-export output:", mdb_output)
|
||||
if insert_match:
|
||||
print("insert_re match:", insert_match)
|
||||
raise
|
||||
finally:
|
||||
mdb_sql.terminate()
|
||||
|
||||
|
||||
def mdb_get_data_numeric(s3db, filename, table):
|
||||
print("Reading %s..." % table)
|
||||
try:
|
||||
mdb_sql = sp.Popen(['mdb-export', filename, table],
|
||||
bufsize=-1, stdin=None, stdout=sp.PIPE,
|
||||
universal_newlines=True)
|
||||
mdb_csv = csv.reader(mdb_sql.stdout)
|
||||
mdb_headers = next(mdb_csv)
|
||||
quoted_headers = ['"%s"' % h for h in mdb_headers]
|
||||
joined_headers = ', '.join(quoted_headers)
|
||||
joined_placemarks = ', '.join(['?' for h in mdb_headers])
|
||||
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(table,
|
||||
joined_headers, joined_placemarks)
|
||||
s3db.executemany(insert_stmt, mdb_csv)
|
||||
s3db.commit()
|
||||
finally:
|
||||
mdb_sql.terminate()
|
||||
with sp.Popen(['mdb-export', filename, table],
|
||||
bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE,
|
||||
universal_newlines=True) as mdb_sql:
|
||||
mdb_csv = csv.reader(mdb_sql.stdout)
|
||||
mdb_headers = next(mdb_csv)
|
||||
quoted_headers = ['"%s"' % h for h in mdb_headers]
|
||||
joined_headers = ', '.join(quoted_headers)
|
||||
joined_placemarks = ', '.join(['?' for h in mdb_headers])
|
||||
insert_stmt = 'INSERT INTO "{0}" ({1}) VALUES ({2});'.format(
|
||||
table,
|
||||
joined_headers,
|
||||
joined_placemarks,
|
||||
)
|
||||
s3db.executemany(insert_stmt, mdb_csv)
|
||||
s3db.commit()
|
||||
except OSError as e:
|
||||
if e.errno == 2:
|
||||
raise RuntimeError('Could not locate the `mdb-export` executable. '
|
||||
'Check that mdbtools is properly installed.')
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def mdb_get_data(s3db, filename, table):
|
||||
@@ -401,40 +503,80 @@ def mdb_get_data(s3db, filename, table):
|
||||
raise ValueError("'%s' is in neither mdb_tables_text nor mdb_tables_numeric" % table)
|
||||
|
||||
|
||||
def mdb_get_version(filename):
|
||||
"""Get the version number from an Arbin .res file.
|
||||
|
||||
Reads the Version_Table and parses the version from Version_Schema_Field.
|
||||
"""
|
||||
print("Reading version number...")
|
||||
try:
|
||||
with sp.Popen(['mdb-export', filename, 'Version_Table'],
|
||||
bufsize=-1, stdin=sp.DEVNULL, stdout=sp.PIPE,
|
||||
universal_newlines=True) as mdb_sql:
|
||||
mdb_csv = csv.reader(mdb_sql.stdout)
|
||||
mdb_headers = next(mdb_csv)
|
||||
mdb_values = next(mdb_csv)
|
||||
try:
|
||||
next(mdb_csv)
|
||||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
raise ValueError('Version_Table of %s lists multiple versions' % filename)
|
||||
except OSError as e:
|
||||
if e.errno == 2:
|
||||
raise RuntimeError('Could not locate the `mdb-export` executable. '
|
||||
'Check that mdbtools is properly installed.')
|
||||
else:
|
||||
raise
|
||||
if 'Version_Schema_Field' not in mdb_headers:
|
||||
raise ValueError('Version_Table of %s does not contain a Version_Schema_Field column'
|
||||
% filename)
|
||||
version_fields = dict(zip(mdb_headers, mdb_values))
|
||||
version_text = version_fields['Version_Schema_Field']
|
||||
version_match = re.fullmatch('Results File ([.0-9]+)', version_text)
|
||||
if not version_match:
|
||||
raise ValueError('File version "%s" did not match expected format' % version_text)
|
||||
version_string = version_match.group(1)
|
||||
version_tuple = tuple(map(int, version_string.split('.')))
|
||||
return version_tuple
|
||||
|
||||
|
||||
def convert_arbin_to_sqlite(input_file, output_file):
|
||||
"""Read data from an Arbin .res data file and write to a sqlite file.
|
||||
|
||||
Any data currently in the sqlite file will be erased!
|
||||
"""
|
||||
arbin_version = mdb_get_version(input_file)
|
||||
|
||||
s3db = sqlite3.connect(output_file)
|
||||
|
||||
|
||||
for table in reversed(mdb_tables + mdb_5_23_tables):
|
||||
|
||||
tables_to_convert = copy(mdb_tables)
|
||||
if arbin_version >= (5, 23):
|
||||
tables_to_convert.extend(mdb_5_23_tables)
|
||||
if arbin_version >= (5, 26):
|
||||
tables_to_convert.extend(mdb_5_26_tables)
|
||||
|
||||
for table in reversed(tables_to_convert):
|
||||
s3db.execute('DROP TABLE IF EXISTS "%s";' % table)
|
||||
|
||||
for table in mdb_tables:
|
||||
|
||||
for table in tables_to_convert:
|
||||
s3db.executescript(mdb_create_scripts[table])
|
||||
mdb_get_data(s3db, input_file, table)
|
||||
if table in mdb_create_indices:
|
||||
print("Creating indices for %s..." % table)
|
||||
s3db.executescript(mdb_create_indices[table])
|
||||
|
||||
if (s3db.execute("SELECT Version_Schema_Field FROM Version_Table;").fetchone()[0] == "Results File 5.23"):
|
||||
for table in mdb_5_23_tables:
|
||||
s3db.executescript(mdb_create_scripts[table])
|
||||
mdb_get_data(input_file, table)
|
||||
if table in mdb_create_indices:
|
||||
s3db.executescript(mdb_create_indices[table])
|
||||
|
||||
|
||||
print("Creating helper table for capacity and energy totals...")
|
||||
s3db.executescript(helper_table_script)
|
||||
|
||||
|
||||
print("Vacuuming database...")
|
||||
s3db.executescript("VACUUM; ANALYZE;")
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
parser = argparse.ArgumentParser(description="Convert Arbin .res files to sqlite3 databases using mdb-export")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Convert Arbin .res files to sqlite3 databases using mdb-export",
|
||||
)
|
||||
parser.add_argument('input_file', type=str) # need file name to pass to sp.Popen
|
||||
parser.add_argument('output_file', type=str) # need file name to pass to sqlite3.connect
|
||||
|
||||
|
||||
@@ -7,20 +7,22 @@ mkdir -p tests/testdata
|
||||
cd tests/testdata
|
||||
|
||||
/usr/bin/wget --continue -i - <<END_FILELIST
|
||||
http://files.figshare.com/1778905/arbin1.res
|
||||
http://files.figshare.com/1778937/bio_logic2.mpt
|
||||
http://files.figshare.com/1778938/bio_logic5.mpt
|
||||
http://files.figshare.com/1778939/bio_logic1.mpr
|
||||
http://files.figshare.com/1778940/bio_logic6.mpr
|
||||
http://files.figshare.com/1778941/bio_logic4.mpt
|
||||
http://files.figshare.com/1778942/bio_logic5.mpr
|
||||
http://files.figshare.com/1778943/bio_logic2.mpr
|
||||
http://files.figshare.com/1778944/bio_logic6.mpt
|
||||
http://files.figshare.com/1778945/bio_logic1.mpt
|
||||
http://files.figshare.com/1778946/bio_logic3.mpr
|
||||
http://files.figshare.com/1780444/bio_logic4.mpr
|
||||
http://files.figshare.com/1780529/121_CA_455nm_6V_30min_C01.mpr
|
||||
http://files.figshare.com/1780530/121_CA_455nm_6V_30min_C01.mpt
|
||||
http://files.figshare.com/1780526/CV_C01.mpr
|
||||
http://files.figshare.com/1780527/CV_C01.mpt
|
||||
https://files.figshare.com/1778905/arbin1.res
|
||||
https://files.figshare.com/1778937/bio_logic2.mpt
|
||||
https://files.figshare.com/1778938/bio_logic5.mpt
|
||||
https://files.figshare.com/1778939/bio_logic1.mpr
|
||||
https://files.figshare.com/1778940/bio_logic6.mpr
|
||||
https://files.figshare.com/1778941/bio_logic4.mpt
|
||||
https://files.figshare.com/1778942/bio_logic5.mpr
|
||||
https://files.figshare.com/1778943/bio_logic2.mpr
|
||||
https://files.figshare.com/1778944/bio_logic6.mpt
|
||||
https://files.figshare.com/1778945/bio_logic1.mpt
|
||||
https://files.figshare.com/1778946/bio_logic3.mpr
|
||||
https://files.figshare.com/1780444/bio_logic4.mpr
|
||||
https://files.figshare.com/1780529/121_CA_455nm_6V_30min_C01.mpr
|
||||
https://files.figshare.com/1780530/121_CA_455nm_6V_30min_C01.mpt
|
||||
https://files.figshare.com/1780526/CV_C01.mpr
|
||||
https://files.figshare.com/1780527/CV_C01.mpt
|
||||
https://files.figshare.com/14752538/C019P-0ppb-A_C01.mpr
|
||||
https://files.figshare.com/25331510/UM34_Test005E.res
|
||||
END_FILELIST
|
||||
|
||||
14
setup.py
14
setup.py
@@ -9,11 +9,11 @@ with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
|
||||
|
||||
setup(
|
||||
name='galvani',
|
||||
version='0.0.1',
|
||||
version='0.2.0',
|
||||
description='Open and process battery charger log data files',
|
||||
long_description=readme,
|
||||
long_description_content_type="text/markdown",
|
||||
url='https://github.com/chatcannon/galvani',
|
||||
url='https://github.com/echemdata/galvani',
|
||||
author='Chris Kerr',
|
||||
author_email='chris.kerr@mykolab.ch',
|
||||
license='GPLv3+',
|
||||
@@ -23,10 +23,16 @@ setup(
|
||||
'Intended Audience :: Science/Research',
|
||||
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
|
||||
'Natural Language :: English',
|
||||
'Programming Language :: Python :: 3 :: Only',
|
||||
'Topic :: Scientific/Engineering :: Chemistry',
|
||||
],
|
||||
packages=['galvani'],
|
||||
entry_points={'console_scripts': [
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'res2sqlite = galvani.res2sqlite:main',
|
||||
]},
|
||||
],
|
||||
},
|
||||
python_requires='>=3.6',
|
||||
install_requires=['numpy'],
|
||||
tests_require=['pytest'],
|
||||
)
|
||||
|
||||
11
tests/conftest.py
Normal file
11
tests/conftest.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""Helpers for pytest tests."""
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def testdata_dir():
|
||||
"""Path to the testdata directory."""
|
||||
return os.path.join(os.path.dirname(__file__), 'testdata')
|
||||
56
tests/test_Arbin.py
Normal file
56
tests/test_Arbin.py
Normal file
@@ -0,0 +1,56 @@
|
||||
"""Tests for loading Arbin .res files."""
|
||||
|
||||
import os
|
||||
import sqlite3
|
||||
import subprocess
|
||||
|
||||
import pytest
|
||||
|
||||
from galvani import res2sqlite
|
||||
|
||||
|
||||
have_mdbtools = (subprocess.call(['which', 'mdb-export'],
|
||||
stdout=subprocess.DEVNULL) == 0)
|
||||
|
||||
|
||||
def test_res2sqlite_help():
|
||||
"""Test running `res2sqlite --help`.
|
||||
|
||||
This should work even when mdbtools is not installed.
|
||||
"""
|
||||
help_output = subprocess.check_output(['res2sqlite', '--help'])
|
||||
assert b'Convert Arbin .res files to sqlite3 databases' in help_output
|
||||
|
||||
|
||||
@pytest.mark.skipif(have_mdbtools, reason='This tests the failure when mdbtools is not installed')
|
||||
def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir):
|
||||
"""Checks that the conversion fails with an appropriate error message."""
|
||||
res_file = os.path.join(testdata_dir, 'arbin1.res')
|
||||
sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db')
|
||||
with pytest.raises(RuntimeError, match="Could not locate the `mdb-export` executable."):
|
||||
res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools')
|
||||
@pytest.mark.parametrize('basename', ['arbin1', 'UM34_Test005E'])
|
||||
def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename):
|
||||
"""Convert an Arbin file to SQLite using the functional interface."""
|
||||
res_file = os.path.join(testdata_dir, basename + '.res')
|
||||
sqlite_file = os.path.join(str(tmpdir), basename + '.s3db')
|
||||
res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file)
|
||||
assert os.path.isfile(sqlite_file)
|
||||
with sqlite3.connect(sqlite_file) as conn:
|
||||
csr = conn.execute('SELECT * FROM Channel_Normal_Table;')
|
||||
csr.fetchone()
|
||||
|
||||
|
||||
@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools')
|
||||
def test_convert_cmdline(testdata_dir, tmpdir):
|
||||
"""Checks that the conversion fails with an appropriate error message."""
|
||||
res_file = os.path.join(testdata_dir, 'arbin1.res')
|
||||
sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db')
|
||||
subprocess.check_call(['res2sqlite', res_file, sqlite_file])
|
||||
assert os.path.isfile(sqlite_file)
|
||||
with sqlite3.connect(sqlite_file) as conn:
|
||||
csr = conn.execute('SELECT * FROM Channel_Normal_Table;')
|
||||
csr.fetchone()
|
||||
@@ -2,98 +2,108 @@
|
||||
|
||||
import os.path
|
||||
import re
|
||||
from datetime import date, datetime
|
||||
from datetime import datetime
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_array_almost_equal, assert_array_equal
|
||||
from nose.tools import ok_, eq_, raises
|
||||
import pytest
|
||||
|
||||
from galvani import MPTfile, MPRfile
|
||||
from galvani.BioLogic import MPTfileCSV, str3 # not exported
|
||||
|
||||
testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata')
|
||||
from galvani import BioLogic, MPTfile, MPRfile
|
||||
from galvani.BioLogic import MPTfileCSV # not exported
|
||||
|
||||
|
||||
def test_open_MPT():
|
||||
def test_open_MPT(testdata_dir):
|
||||
mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt'))
|
||||
eq_(comments, [])
|
||||
eq_(mpt1.dtype.names, ("mode", "ox/red", "error", "control changes",
|
||||
"Ns changes", "counter inc.", "time/s",
|
||||
"control/V/mA", "Ewe/V", "dQ/mA.h", "P/W",
|
||||
"I/mA", "(Q-Qo)/mA.h", "x"))
|
||||
assert comments == []
|
||||
assert mpt1.dtype.names == (
|
||||
"mode", "ox/red", "error", "control changes", "Ns changes",
|
||||
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dQ/mA.h", "P/W",
|
||||
"I/mA", "(Q-Qo)/mA.h", "x",
|
||||
)
|
||||
|
||||
|
||||
@raises(ValueError)
|
||||
def test_open_MPT_fails_for_bad_file():
|
||||
mpt1 = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
|
||||
def test_open_MPT_fails_for_bad_file(testdata_dir):
|
||||
with pytest.raises(ValueError, match='Bad first line'):
|
||||
MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
|
||||
|
||||
|
||||
def test_open_MPT_csv():
|
||||
def test_open_MPT_csv(testdata_dir):
|
||||
mpt1, comments = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpt'))
|
||||
eq_(comments, [])
|
||||
eq_(mpt1.fieldnames, ["mode", "ox/red", "error", "control changes",
|
||||
"Ns changes", "counter inc.", "time/s",
|
||||
"control/V/mA", "Ewe/V", "dq/mA.h", "P/W",
|
||||
"<I>/mA", "(Q-Qo)/mA.h", "x"])
|
||||
assert comments == []
|
||||
assert mpt1.fieldnames == [
|
||||
"mode", "ox/red", "error", "control changes", "Ns changes",
|
||||
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h", "P/W",
|
||||
"<I>/mA", "(Q-Qo)/mA.h", "x",
|
||||
]
|
||||
|
||||
|
||||
@raises(ValueError)
|
||||
def test_open_MPT_csv_fails_for_bad_file():
|
||||
mpt1 = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr'))
|
||||
def test_open_MPT_csv_fails_for_bad_file(testdata_dir):
|
||||
with pytest.raises((ValueError, UnicodeDecodeError)):
|
||||
MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr'))
|
||||
|
||||
|
||||
def test_open_MPR1():
|
||||
mpr1 = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
|
||||
## Check the dates as a basic test that it has been read properly
|
||||
eq_(mpr1.startdate, date(2011, 10, 29))
|
||||
eq_(mpr1.enddate, date(2011, 10, 31))
|
||||
def test_colID_map_uniqueness():
|
||||
"""Check some uniqueness properties of the VMPdata_colID_xyz maps."""
|
||||
field_colIDs = set(BioLogic.VMPdata_colID_dtype_map.keys())
|
||||
flag_colIDs = set(BioLogic.VMPdata_colID_flag_map.keys())
|
||||
field_names = [v[0] for v in BioLogic.VMPdata_colID_dtype_map.values()]
|
||||
flag_names = [v[0] for v in BioLogic.VMPdata_colID_flag_map.values()]
|
||||
assert not field_colIDs.intersection(flag_colIDs)
|
||||
# 'I/mA' and 'dQ/mA.h' are duplicated
|
||||
# assert len(set(field_names)) == len(field_names)
|
||||
assert len(set(flag_names)) == len(flag_names)
|
||||
assert not set(field_names).intersection(flag_names)
|
||||
|
||||
|
||||
def test_open_MPR2():
|
||||
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr'))
|
||||
## Check the dates as a basic test that it has been read properly
|
||||
eq_(mpr2.startdate, date(2012, 9, 27))
|
||||
eq_(mpr2.enddate, date(2012, 9, 27))
|
||||
@pytest.mark.parametrize('colIDs, expected', [
|
||||
([1, 2, 3], [('flags', 'u1')]),
|
||||
([4, 6], [('time/s', '<f8'), ('Ewe/V', '<f4')]),
|
||||
([1, 4, 21], [('flags', 'u1'), ('time/s', '<f8')]),
|
||||
([4, 6, 4], [('time/s', '<f8'), ('Ewe/V', '<f4'), ('time/s 2', '<f8')]),
|
||||
([4, 9999], NotImplementedError),
|
||||
])
|
||||
def test_colID_to_dtype(colIDs, expected):
|
||||
"""Test converting column ID to numpy dtype."""
|
||||
if isinstance(expected, type) and issubclass(expected, Exception):
|
||||
with pytest.raises(expected):
|
||||
BioLogic.VMPdata_dtype_from_colIDs(colIDs)
|
||||
return
|
||||
expected_dtype = np.dtype(expected)
|
||||
dtype, flags_dict = BioLogic.VMPdata_dtype_from_colIDs(colIDs)
|
||||
assert dtype == expected_dtype
|
||||
|
||||
|
||||
def test_open_MPR3():
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic3.mpr'))
|
||||
## Check the dates as a basic test that it has been read properly
|
||||
eq_(mpr.startdate, date(2013, 3, 27))
|
||||
eq_(mpr.enddate, date(2013, 3, 27))
|
||||
@pytest.mark.parametrize('filename, startdate, enddate', [
|
||||
('bio_logic1.mpr', '2011-10-29', '2011-10-31'),
|
||||
('bio_logic2.mpr', '2012-09-27', '2012-09-27'),
|
||||
('bio_logic3.mpr', '2013-03-27', '2013-03-27'),
|
||||
('bio_logic4.mpr', '2011-11-01', '2011-11-02'),
|
||||
('bio_logic5.mpr', '2013-01-28', '2013-01-28'),
|
||||
# bio_logic6.mpr has no end date because it does not have a VMP LOG module
|
||||
('bio_logic6.mpr', '2012-09-11', None),
|
||||
# C019P-0ppb-A_C01.mpr stores the date in a different format
|
||||
('C019P-0ppb-A_C01.mpr', '2019-03-14', '2019-03-14'),
|
||||
])
|
||||
def test_MPR_dates(testdata_dir, filename, startdate, enddate):
|
||||
"""Check that the start and end dates in .mpr files are read correctly."""
|
||||
mpr = MPRfile(os.path.join(testdata_dir, filename))
|
||||
assert mpr.startdate.strftime('%Y-%m-%d') == startdate
|
||||
if enddate:
|
||||
assert mpr.enddate.strftime('%Y-%m-%d') == enddate
|
||||
else:
|
||||
assert not hasattr(mpr, 'enddate')
|
||||
|
||||
|
||||
def test_open_MPR4():
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr'))
|
||||
## Check the dates as a basic test that it has been read properly
|
||||
eq_(mpr.startdate, date(2011, 11, 1))
|
||||
eq_(mpr.enddate, date(2011, 11, 2))
|
||||
|
||||
|
||||
def test_open_MPR5():
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr'))
|
||||
## Check the dates as a basic test that it has been read properly
|
||||
eq_(mpr.startdate, date(2013, 1, 28))
|
||||
eq_(mpr.enddate, date(2013, 1, 28))
|
||||
|
||||
|
||||
def test_open_MPR6():
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr'))
|
||||
## Check the dates as a basic test that it has been read properly
|
||||
eq_(mpr.startdate, date(2012, 9, 11))
|
||||
## no end date because no VMP LOG module
|
||||
|
||||
|
||||
@raises(ValueError)
|
||||
def test_open_MPR_fails_for_bad_file():
|
||||
mpr1 = MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
|
||||
def test_open_MPR_fails_for_bad_file(testdata_dir):
|
||||
with pytest.raises(ValueError, match='Invalid magic for .mpr file'):
|
||||
MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
|
||||
|
||||
|
||||
def timestamp_from_comments(comments):
|
||||
for line in comments:
|
||||
time_match = re.match(b'Acquisition started on : ([0-9/]+ [0-9:]+)', line)
|
||||
if time_match:
|
||||
timestamp = datetime.strptime(str3(time_match.group(1)),
|
||||
timestamp = datetime.strptime(time_match.group(1).decode('ascii'),
|
||||
'%m/%d/%Y %H:%M:%S')
|
||||
return timestamp
|
||||
raise AttributeError("No timestamp in comments")
|
||||
@@ -117,7 +127,7 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
|
||||
assert_array_equal(mpr.get_flag("control changes"), mpt["control changes"])
|
||||
if "Ns changes" in mpt.dtype.fields:
|
||||
assert_array_equal(mpr.get_flag("Ns changes"), mpt["Ns changes"])
|
||||
## Nothing uses the 0x40 bit of the flags
|
||||
# Nothing uses the 0x40 bit of the flags
|
||||
assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."])
|
||||
|
||||
assert_array_almost_equal(mpr.data["time/s"],
|
||||
@@ -134,38 +144,39 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
|
||||
assert_field_matches("dQ/mA.h", decimal=17) # 64 bit float precision
|
||||
assert_field_matches("P/W", decimal=10) # 32 bit float precision for 1.xxE-5
|
||||
assert_field_matches("I/mA", decimal=6) # 32 bit float precision
|
||||
|
||||
|
||||
assert_field_exact("cycle number")
|
||||
assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision
|
||||
|
||||
|
||||
try:
|
||||
eq_(timestamp_from_comments(comments), mpr.timestamp)
|
||||
assert timestamp_from_comments(comments) == mpr.timestamp
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def test_MPR1_matches_MPT1():
|
||||
mpr1 = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
|
||||
mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt'))
|
||||
assert_MPR_matches_MPT(mpr1, mpt1, comments)
|
||||
@pytest.mark.parametrize('basename', [
|
||||
'bio_logic1',
|
||||
'bio_logic2',
|
||||
# No bio_logic3.mpt file
|
||||
'bio_logic4',
|
||||
# bio_logic5 and bio_logic6 are special cases
|
||||
'CV_C01',
|
||||
'121_CA_455nm_6V_30min_C01',
|
||||
])
|
||||
def test_MPR_matches_MPT(testdata_dir, basename):
|
||||
"""Check the MPR parser against the MPT parser.
|
||||
|
||||
Load a binary .mpr file and a text .mpt file which should contain
|
||||
exactly the same data. Check that the loaded data actually match.
|
||||
"""
|
||||
binpath = os.path.join(testdata_dir, basename + '.mpr')
|
||||
txtpath = os.path.join(testdata_dir, basename + '.mpt')
|
||||
mpr = MPRfile(binpath)
|
||||
mpt, comments = MPTfile(txtpath)
|
||||
assert_MPR_matches_MPT(mpr, mpt, comments)
|
||||
|
||||
|
||||
def test_MPR2_matches_MPT2():
|
||||
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr'))
|
||||
mpt2, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic2.mpt'))
|
||||
assert_MPR_matches_MPT(mpr2, mpt2, comments)
|
||||
|
||||
|
||||
## No bio_logic3.mpt file
|
||||
|
||||
|
||||
def test_MPR4_matches_MPT4():
|
||||
mpr4 = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr'))
|
||||
mpt4, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic4.mpt'))
|
||||
assert_MPR_matches_MPT(mpr4, mpt4, comments)
|
||||
|
||||
|
||||
def test_MPR5_matches_MPT5():
|
||||
def test_MPR5_matches_MPT5(testdata_dir):
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr'))
|
||||
mpt, comments = MPTfile((re.sub(b'\tXXX\t', b'\t0\t', line) for line in
|
||||
open(os.path.join(testdata_dir, 'bio_logic5.mpt'),
|
||||
@@ -173,23 +184,8 @@ def test_MPR5_matches_MPT5():
|
||||
assert_MPR_matches_MPT(mpr, mpt, comments)
|
||||
|
||||
|
||||
def test_MPR6_matches_MPT6():
|
||||
def test_MPR6_matches_MPT6(testdata_dir):
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr'))
|
||||
mpt, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic6.mpt'))
|
||||
mpr.data = mpr.data[:958] # .mpt file is incomplete
|
||||
assert_MPR_matches_MPT(mpr, mpt, comments)
|
||||
|
||||
|
||||
## Tests for issue #1 -- new dtypes ##
|
||||
|
||||
|
||||
def test_CV_C01():
|
||||
mpr = MPRfile(os.path.join(testdata_dir, 'CV_C01.mpr'))
|
||||
mpt, comments = MPTfile(os.path.join(testdata_dir, 'CV_C01.mpt'))
|
||||
assert_MPR_matches_MPT(mpr, mpt, comments)
|
||||
|
||||
|
||||
def test_CA_455nm():
|
||||
mpr = MPRfile(os.path.join(testdata_dir, '121_CA_455nm_6V_30min_C01.mpr'))
|
||||
mpt, comments = MPTfile(os.path.join(testdata_dir, '121_CA_455nm_6V_30min_C01.mpt'))
|
||||
assert_MPR_matches_MPT(mpr, mpt, comments)
|
||||
|
||||
Reference in New Issue
Block a user