16 Commits
0.0.1 ... 0.0.2

Author SHA1 Message Date
85cc3f523e Release version 0.0.2 2019-03-30 15:52:44 +01:00
b977115d6e Also try parsing dates as '%m-%d-%y' - fixes #20 2019-03-30 15:39:40 +01:00
2471148c21 Add new test data file with different date format
This tests for issue #20

Thanks @JBWarrington for providing this file
2019-03-30 15:38:58 +01:00
7a5887fb38 Update existing get_testdata links to HTTPS version 2019-03-30 15:31:18 +01:00
2738396c9e Merge branch 'numpy-deprecations'
Fixes some deprecation warnings with recent versions of Numpy and Python

Closes #22
2019-03-16 13:51:39 +01:00
dcc8ec7fcc Fix 'invalid escape sequence' warnings 2019-03-16 13:50:50 +01:00
b08c2f4435 Use array.item() instead of np.asscalar() 2019-03-16 13:43:26 +01:00
1bcbc16bab Use np.frombuffer instead of np.fromstring
Fixes #22
2019-03-16 13:41:49 +01:00
e52efeb9bd Merge branch 'pytest'
Changes from `nosetests` to `pytest` for running the tests - fixes #5
Pytest is considerably more flexible, and Nose is no longer maintained.
2019-03-16 13:35:53 +01:00
d1e8616f1e Use pytest.mark.parametrize to combine test cases 2019-03-16 13:35:13 +01:00
a618f75bb6 Change testing config to use pytest instead of nosetests
Closes #5
2019-03-16 13:10:08 +01:00
b110162763 Replace nose.raises with pytest.raises 2019-03-16 13:03:54 +01:00
de29b0863c Replace nose.eq_ with assert x == y 2019-03-16 12:59:23 +01:00
4365c08e8b Merge pull request #19 from bayesfactor/patch-2
new column types
2019-03-16 11:53:17 +01:00
Tim
880b4a0a2d new column types
Introduced new column types that show up in GEIS files
2019-03-11 10:23:26 -07:00
da67a36308 Merge branch 'pypi-release'
Closes #7
2019-03-10 10:44:49 +01:00
5 changed files with 111 additions and 126 deletions

View File

@@ -70,7 +70,8 @@ def MPTfile(file_or_path):
if magic != b'EC-Lab ASCII FILE\r\n':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match(b'Nb header lines : (\d+)\s*$', next(mpt_file))
# TODO use rb'string' here once Python 2 is no longer supported
nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
@@ -107,7 +108,7 @@ def MPTfileCSV(file_or_path):
if magic.rstrip() != 'EC-Lab ASCII FILE':
raise ValueError("Bad first line for EC-Lab file: '%s'" % magic)
nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers_match = re.match(r'Nb header lines : (\d+)\s*$', next(mpt_file))
nb_headers = int(nb_headers_match.group(1))
if nb_headers < 3:
raise ValueError("Too few header lines: %d" % nb_headers)
@@ -184,6 +185,8 @@ def VMPdata_dtype_from_colIDs(colIDs):
dtype_dict['I/mA'] = '<f8'
elif colID == 13:
dtype_dict['(Q-Qo)/mA.h'] = '<f8'
elif colID == 16:
dtype_dict['Analog IN 1/V'] = '<f4'
elif colID == 19:
dtype_dict['control/V'] = '<f4'
elif colID == 20:
@@ -208,6 +211,10 @@ def VMPdata_dtype_from_colIDs(colIDs):
dtype_dict['I Range'] = '<u2'
elif colID == 70:
dtype_dict['P/W'] = '<f4'
elif colID == 74:
dtype_dict['Energy/W.h'] = '<f8'
elif colID == 78:
dtype_dict['Cs-2/µf-2'] = '<f4'
elif colID == 123:
dtype_dict['Energy charge/W.h'] = '<f8'
elif colID == 124:
@@ -222,6 +229,8 @@ def VMPdata_dtype_from_colIDs(colIDs):
dtype_dict['Cs/µF'] = '<f4'
elif colID == 172:
dtype_dict['Cp/µF'] = '<f4'
elif colID == 173:
dtype_dict['Cp-2/µF-2'] = '<f4'
elif colID == 434:
dtype_dict['(Q-Qo)/C'] = '<f4'
elif colID == 435:
@@ -230,6 +239,8 @@ def VMPdata_dtype_from_colIDs(colIDs):
dtype_dict['Q charge/discharge/mA.h'] = '<f8'
elif colID == 468:
dtype_dict['half cycle'] = '<u4'
elif colID == 469:
dtype_dict['z cycle'] = '<u4'
elif colID == 473:
dtype_dict['THD Ewe/%'] = '<f4'
elif colID == 476:
@@ -265,7 +276,7 @@ def read_VMP_modules(fileobj, read_module_data=True):
if len(hdr_bytes) < VMPmodule_hdr.itemsize:
raise IOError("Unexpected end of file while reading module header")
hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1)
hdr = np.frombuffer(hdr_bytes, dtype=VMPmodule_hdr, count=1)
hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names))
hdr_dict['offset'] = fileobj.tell()
if read_module_data:
@@ -316,17 +327,16 @@ class MPRfile:
data_module, = (m for m in modules if m['shortname'] == b'VMP data ')
maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG ']
n_data_points = np.fromstring(data_module['data'][:4], dtype='<u4')
n_columns = np.fromstring(data_module['data'][4:5], dtype='u1')
n_columns = np.asscalar(n_columns) # Compatibility with recent numpy
n_data_points = np.frombuffer(data_module['data'][:4], dtype='<u4')
n_columns = np.frombuffer(data_module['data'][4:5], dtype='u1').item()
if data_module['version'] == 0:
column_types = np.fromstring(data_module['data'][5:], dtype='u1',
column_types = np.frombuffer(data_module['data'][5:], dtype='u1',
count=n_columns)
remaining_headers = data_module['data'][5 + n_columns:100]
main_data = data_module['data'][100:]
elif data_module['version'] == 2:
column_types = np.fromstring(data_module['data'][5:], dtype='<u2',
column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
count=n_columns)
## There is 405 bytes of data before the main array starts
remaining_headers = data_module['data'][5 + 2 * n_columns:405]
@@ -341,7 +351,7 @@ class MPRfile:
assert(not any(remaining_headers))
self.dtype, self.flags_dict, self.flags2_dict = VMPdata_dtype_from_colIDs(column_types)
self.data = np.fromstring(main_data, dtype=self.dtype)
self.data = np.frombuffer(main_data, dtype=self.dtype)
assert(self.data.shape[0] == n_data_points)
## No idea what these 'column types' mean or even if they are actually
@@ -350,24 +360,30 @@ class MPRfile:
self.cols = column_types
self.npts = n_data_points
tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y')
try:
tm = time.strptime(str3(settings_mod['date']), '%m/%d/%y')
except ValueError:
tm = time.strptime(str3(settings_mod['date']), '%m-%d-%y')
self.startdate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
if maybe_log_module:
log_module, = maybe_log_module
tm = time.strptime(str3(log_module['date']), '%m/%d/%y')
try:
tm = time.strptime(str3(log_module['date']), '%m/%d/%y')
except ValueError:
tm = time.strptime(str3(log_module['date']), '%m-%d-%y')
self.enddate = date(tm.tm_year, tm.tm_mon, tm.tm_mday)
## There is a timestamp at either 465 or 469 bytes
## I can't find any reason why it is one or the other in any
## given file
ole_timestamp1 = np.fromstring(log_module['data'][465:],
ole_timestamp1 = np.frombuffer(log_module['data'][465:],
dtype='<f8', count=1)
ole_timestamp2 = np.fromstring(log_module['data'][469:],
ole_timestamp2 = np.frombuffer(log_module['data'][469:],
dtype='<f8', count=1)
ole_timestamp3 = np.fromstring(log_module['data'][473:],
ole_timestamp3 = np.frombuffer(log_module['data'][473:],
dtype='<f8', count=1)
ole_timestamp4 = np.fromstring(log_module['data'][585:],
ole_timestamp4 = np.frombuffer(log_module['data'][585:],
dtype='<f8', count=1)
if ole_timestamp1 > 40000 and ole_timestamp1 < 50000:

View File

@@ -7,20 +7,21 @@ mkdir -p tests/testdata
cd tests/testdata
/usr/bin/wget --continue -i - <<END_FILELIST
http://files.figshare.com/1778905/arbin1.res
http://files.figshare.com/1778937/bio_logic2.mpt
http://files.figshare.com/1778938/bio_logic5.mpt
http://files.figshare.com/1778939/bio_logic1.mpr
http://files.figshare.com/1778940/bio_logic6.mpr
http://files.figshare.com/1778941/bio_logic4.mpt
http://files.figshare.com/1778942/bio_logic5.mpr
http://files.figshare.com/1778943/bio_logic2.mpr
http://files.figshare.com/1778944/bio_logic6.mpt
http://files.figshare.com/1778945/bio_logic1.mpt
http://files.figshare.com/1778946/bio_logic3.mpr
http://files.figshare.com/1780444/bio_logic4.mpr
http://files.figshare.com/1780529/121_CA_455nm_6V_30min_C01.mpr
http://files.figshare.com/1780530/121_CA_455nm_6V_30min_C01.mpt
http://files.figshare.com/1780526/CV_C01.mpr
http://files.figshare.com/1780527/CV_C01.mpt
https://files.figshare.com/1778905/arbin1.res
https://files.figshare.com/1778937/bio_logic2.mpt
https://files.figshare.com/1778938/bio_logic5.mpt
https://files.figshare.com/1778939/bio_logic1.mpr
https://files.figshare.com/1778940/bio_logic6.mpr
https://files.figshare.com/1778941/bio_logic4.mpt
https://files.figshare.com/1778942/bio_logic5.mpr
https://files.figshare.com/1778943/bio_logic2.mpr
https://files.figshare.com/1778944/bio_logic6.mpt
https://files.figshare.com/1778945/bio_logic1.mpt
https://files.figshare.com/1778946/bio_logic3.mpr
https://files.figshare.com/1780444/bio_logic4.mpr
https://files.figshare.com/1780529/121_CA_455nm_6V_30min_C01.mpr
https://files.figshare.com/1780530/121_CA_455nm_6V_30min_C01.mpt
https://files.figshare.com/1780526/CV_C01.mpr
https://files.figshare.com/1780527/CV_C01.mpt
https://files.figshare.com/14752538/C019P-0ppb-A_C01.mpr
END_FILELIST

View File

@@ -9,7 +9,7 @@ with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
setup(
name='galvani',
version='0.0.1',
version='0.0.2',
description='Open and process battery charger log data files',
long_description=readme,
long_description_content_type="text/markdown",
@@ -29,4 +29,5 @@ setup(
'res2sqlite = galvani.res2sqlite:main',
]},
install_requires=['numpy'],
tests_require=['pytest'],
)

View File

@@ -2,11 +2,11 @@
import os.path
import re
from datetime import date, datetime
from datetime import datetime
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import ok_, eq_, raises
import pytest
from galvani import MPTfile, MPRfile
from galvani.BioLogic import MPTfileCSV, str3 # not exported
@@ -16,77 +16,58 @@ testdata_dir = os.path.join(os.path.dirname(__file__), 'testdata')
def test_open_MPT():
mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt'))
eq_(comments, [])
eq_(mpt1.dtype.names, ("mode", "ox/red", "error", "control changes",
"Ns changes", "counter inc.", "time/s",
"control/V/mA", "Ewe/V", "dQ/mA.h", "P/W",
"I/mA", "(Q-Qo)/mA.h", "x"))
assert comments == []
assert mpt1.dtype.names == (
"mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dQ/mA.h", "P/W",
"I/mA", "(Q-Qo)/mA.h", "x",
)
@raises(ValueError)
def test_open_MPT_fails_for_bad_file():
mpt1 = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
with pytest.raises(ValueError, match='Bad first line'):
MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
def test_open_MPT_csv():
mpt1, comments = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpt'))
eq_(comments, [])
eq_(mpt1.fieldnames, ["mode", "ox/red", "error", "control changes",
"Ns changes", "counter inc.", "time/s",
"control/V/mA", "Ewe/V", "dq/mA.h", "P/W",
"<I>/mA", "(Q-Qo)/mA.h", "x"])
assert comments == []
assert mpt1.fieldnames == [
"mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h", "P/W",
"<I>/mA", "(Q-Qo)/mA.h", "x",
]
@raises(ValueError)
def test_open_MPT_csv_fails_for_bad_file():
mpt1 = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr'))
with pytest.raises((ValueError, UnicodeDecodeError)):
MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr'))
def test_open_MPR1():
mpr1 = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
## Check the dates as a basic test that it has been read properly
eq_(mpr1.startdate, date(2011, 10, 29))
eq_(mpr1.enddate, date(2011, 10, 31))
@pytest.mark.parametrize('filename, startdate, enddate', [
('bio_logic1.mpr', '2011-10-29', '2011-10-31'),
('bio_logic2.mpr', '2012-09-27', '2012-09-27'),
('bio_logic3.mpr', '2013-03-27', '2013-03-27'),
('bio_logic4.mpr', '2011-11-01', '2011-11-02'),
('bio_logic5.mpr', '2013-01-28', '2013-01-28'),
# bio_logic6.mpr has no end date because it does not have a VMP LOG module
('bio_logic6.mpr', '2012-09-11', None),
# C019P-0ppb-A_C01.mpr stores the date in a different format
('C019P-0ppb-A_C01.mpr', '2019-03-14', '2019-03-14'),
])
def test_MPR_dates(filename, startdate, enddate):
"""Check that the start and end dates in .mpr files are read correctly."""
mpr = MPRfile(os.path.join(testdata_dir, filename))
assert mpr.startdate.strftime('%Y-%m-%d') == startdate
if enddate:
assert mpr.enddate.strftime('%Y-%m-%d') == enddate
else:
assert not hasattr(mpr, 'enddate')
def test_open_MPR2():
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr'))
## Check the dates as a basic test that it has been read properly
eq_(mpr2.startdate, date(2012, 9, 27))
eq_(mpr2.enddate, date(2012, 9, 27))
def test_open_MPR3():
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic3.mpr'))
## Check the dates as a basic test that it has been read properly
eq_(mpr.startdate, date(2013, 3, 27))
eq_(mpr.enddate, date(2013, 3, 27))
def test_open_MPR4():
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr'))
## Check the dates as a basic test that it has been read properly
eq_(mpr.startdate, date(2011, 11, 1))
eq_(mpr.enddate, date(2011, 11, 2))
def test_open_MPR5():
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr'))
## Check the dates as a basic test that it has been read properly
eq_(mpr.startdate, date(2013, 1, 28))
eq_(mpr.enddate, date(2013, 1, 28))
def test_open_MPR6():
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr'))
## Check the dates as a basic test that it has been read properly
eq_(mpr.startdate, date(2012, 9, 11))
## no end date because no VMP LOG module
@raises(ValueError)
def test_open_MPR_fails_for_bad_file():
mpr1 = MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
with pytest.raises(ValueError, match='Invalid magic for .mpr file'):
MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
def timestamp_from_comments(comments):
@@ -139,30 +120,31 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
assert_field_matches("(Q-Qo)/C", decimal=6) # 32 bit float precision
try:
eq_(timestamp_from_comments(comments), mpr.timestamp)
assert timestamp_from_comments(comments) == mpr.timestamp
except AttributeError:
pass
def test_MPR1_matches_MPT1():
mpr1 = MPRfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt'))
assert_MPR_matches_MPT(mpr1, mpt1, comments)
@pytest.mark.parametrize('basename', [
'bio_logic1',
'bio_logic2',
# No bio_logic3.mpt file
'bio_logic4',
# bio_logic5 and bio_logic6 are special cases
'CV_C01',
'121_CA_455nm_6V_30min_C01',
])
def test_MPR_matches_MPT(basename):
"""Check the MPR parser against the MPT parser.
def test_MPR2_matches_MPT2():
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio_logic2.mpr'))
mpt2, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic2.mpt'))
assert_MPR_matches_MPT(mpr2, mpt2, comments)
## No bio_logic3.mpt file
def test_MPR4_matches_MPT4():
mpr4 = MPRfile(os.path.join(testdata_dir, 'bio_logic4.mpr'))
mpt4, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic4.mpt'))
assert_MPR_matches_MPT(mpr4, mpt4, comments)
Load a binary .mpr file and a text .mpt file which should contain
exactly the same data. Check that the loaded data actually match.
"""
binpath = os.path.join(testdata_dir, basename + '.mpr')
txtpath = os.path.join(testdata_dir, basename + '.mpt')
mpr = MPRfile(binpath)
mpt, comments = MPTfile(txtpath)
assert_MPR_matches_MPT(mpr, mpt, comments)
def test_MPR5_matches_MPT5():
@@ -178,18 +160,3 @@ def test_MPR6_matches_MPT6():
mpt, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic6.mpt'))
mpr.data = mpr.data[:958] # .mpt file is incomplete
assert_MPR_matches_MPT(mpr, mpt, comments)
## Tests for issue #1 -- new dtypes ##
def test_CV_C01():
mpr = MPRfile(os.path.join(testdata_dir, 'CV_C01.mpr'))
mpt, comments = MPTfile(os.path.join(testdata_dir, 'CV_C01.mpt'))
assert_MPR_matches_MPT(mpr, mpt, comments)
def test_CA_455nm():
mpr = MPRfile(os.path.join(testdata_dir, '121_CA_455nm_6V_30min_C01.mpr'))
mpt, comments = MPTfile(os.path.join(testdata_dir, '121_CA_455nm_6V_30min_C01.mpt'))
assert_MPR_matches_MPT(mpr, mpt, comments)

View File

@@ -1,5 +1,5 @@
[tox]
envlist = py27,py35,py37
[testenv]
deps=nose
commands=nosetests
deps=pytest
commands=pytest