mirror of
https://github.com/echemdata/galvani.git
synced 2025-12-14 09:15:34 +00:00
Deal with yet another format - with more flags data (I think)
This commit is contained in:
45
BioLogic.py
45
BioLogic.py
@@ -135,6 +135,13 @@ VMPdata_dtype = np.dtype([('flags', 'u1'),
|
|||||||
("Ewe/V", '<f4'),
|
("Ewe/V", '<f4'),
|
||||||
("dQ/mA.h", '<f8'),
|
("dQ/mA.h", '<f8'),
|
||||||
("P/W", '<f4')])
|
("P/W", '<f4')])
|
||||||
|
VMPdata_dtype_extra = np.dtype([('flags', '<u2'),
|
||||||
|
('blank', 'a1'),
|
||||||
|
("time/s", '<f8'),
|
||||||
|
("control/V/mA", '<f4'),
|
||||||
|
("Ewe/V", '<f4'),
|
||||||
|
("dQ/mA.h", '<f8'),
|
||||||
|
("P/W", '<f4')])
|
||||||
|
|
||||||
|
|
||||||
def read_VMP_modules(fileobj, read_module_data=True):
|
def read_VMP_modules(fileobj, read_module_data=True):
|
||||||
@@ -211,27 +218,47 @@ class MPRfile:
|
|||||||
n_columns = int(data_module['data'][4])
|
n_columns = int(data_module['data'][4])
|
||||||
if data_module['version'] == 0:
|
if data_module['version'] == 0:
|
||||||
## There is 100 bytes of data before the main array starts
|
## There is 100 bytes of data before the main array starts
|
||||||
|
assert(n_columns == 11)
|
||||||
column_types = np.fromstring(data_module['data'][5:], dtype='u1',
|
column_types = np.fromstring(data_module['data'][5:], dtype='u1',
|
||||||
count=n_columns)
|
count=n_columns)
|
||||||
|
np.testing.assert_array_equal(column_types,
|
||||||
|
[1, 2, 3, 21, 31, 65,
|
||||||
|
4, 5, 6, 7, 70])
|
||||||
assert(data_module['length'] - 100 ==
|
assert(data_module['length'] - 100 ==
|
||||||
VMPdata_dtype.itemsize * n_data_points)
|
VMPdata_dtype.itemsize * n_data_points)
|
||||||
self.data = np.frombuffer(data_module['data'], dtype=VMPdata_dtype,
|
self.data = np.frombuffer(data_module['data'],
|
||||||
|
dtype=VMPdata_dtype,
|
||||||
offset=100)
|
offset=100)
|
||||||
elif data_module['version'] == 2:
|
elif data_module['version'] == 2:
|
||||||
## There is 405 bytes of data before the main array starts
|
## There is 405 bytes of data before the main array starts
|
||||||
column_types = np.fromstring(data_module['data'][5:], dtype='<u2',
|
column_types = np.fromstring(data_module['data'][5:], dtype='<u2',
|
||||||
count=n_columns)
|
count=n_columns)
|
||||||
assert(data_module['length'] - 405 ==
|
if n_columns == 11:
|
||||||
VMPdata_dtype.itemsize * n_data_points)
|
np.testing.assert_array_equal(column_types,
|
||||||
self.data = np.frombuffer(data_module['data'], dtype=VMPdata_dtype,
|
[1, 2, 3, 21, 31, 65,
|
||||||
offset=405)
|
4, 5, 6, 7, 70])
|
||||||
|
assert(data_module['length'] - 405 ==
|
||||||
|
VMPdata_dtype.itemsize * n_data_points)
|
||||||
|
self.data = np.frombuffer(data_module['data'],
|
||||||
|
dtype=VMPdata_dtype,
|
||||||
|
offset=405)
|
||||||
|
elif n_columns == 12:
|
||||||
|
np.testing.assert_array_equal(column_types,
|
||||||
|
[1, 2, 3, 21, 31, 65,
|
||||||
|
131, 4, 5, 6, 7, 70])
|
||||||
|
assert(data_module['length'] - 405 ==
|
||||||
|
VMPdata_dtype_extra.itemsize * n_data_points)
|
||||||
|
self.data = np.frombuffer(data_module['data'],
|
||||||
|
dtype=VMPdata_dtype_extra,
|
||||||
|
offset=405)
|
||||||
|
assert(np.all(self.data['blank'] == b'\x00'))
|
||||||
|
else:
|
||||||
|
raise ValueError("Cannot deal with n_columns = %d" % n_columns)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unrecognised version for data module: %d" %
|
raise ValueError("Unrecognised version for data module: %d" %
|
||||||
data_module['version'])
|
data_module['version'])
|
||||||
## No idea what these 'column types' mean or even if they are actually
|
## No idea what these 'column types' mean or even if they are actually
|
||||||
## column types at all
|
## column types at all
|
||||||
np.testing.assert_array_equal(column_types,
|
|
||||||
[1, 2, 3, 21, 31, 65, 4, 5, 6, 7, 70])
|
|
||||||
|
|
||||||
tm = time.strptime(str(settings_mod['date'], encoding='ascii'),
|
tm = time.strptime(str(settings_mod['date'], encoding='ascii'),
|
||||||
'%m/%d/%y')
|
'%m/%d/%y')
|
||||||
|
|||||||
@@ -41,18 +41,27 @@ def test_open_MPT_csv_fails_for_bad_file():
|
|||||||
mpt1 = MPTfileCSV(os.path.join(testdata_dir, 'bio-logic1.mpr'))
|
mpt1 = MPTfileCSV(os.path.join(testdata_dir, 'bio-logic1.mpr'))
|
||||||
|
|
||||||
|
|
||||||
def test_MPR1_read_dates():
|
def test_open_MPR1():
|
||||||
mpr1 = MPRfile(os.path.join(testdata_dir, 'bio-logic1.mpr'))
|
mpr1 = MPRfile(os.path.join(testdata_dir, 'bio-logic1.mpr'))
|
||||||
|
## Check the dates as a basic test that it has been read properly
|
||||||
eq_(mpr1.startdate, date(2011, 10, 29))
|
eq_(mpr1.startdate, date(2011, 10, 29))
|
||||||
eq_(mpr1.enddate, date(2011, 10, 31))
|
eq_(mpr1.enddate, date(2011, 10, 31))
|
||||||
|
|
||||||
|
|
||||||
def test_MPR2_read_dates():
|
def test_open_MPR2():
|
||||||
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio-logic2.mpr'))
|
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio-logic2.mpr'))
|
||||||
|
## Check the dates as a basic test that it has been read properly
|
||||||
eq_(mpr2.startdate, date(2012, 9, 27))
|
eq_(mpr2.startdate, date(2012, 9, 27))
|
||||||
eq_(mpr2.enddate, date(2012, 9, 27))
|
eq_(mpr2.enddate, date(2012, 9, 27))
|
||||||
|
|
||||||
|
|
||||||
|
def test_open_MPR3():
|
||||||
|
mpr2 = MPRfile(os.path.join(testdata_dir, 'bio-logic3.mpr'))
|
||||||
|
## Check the dates as a basic test that it has been read properly
|
||||||
|
eq_(mpr2.startdate, date(2013, 3, 27))
|
||||||
|
eq_(mpr2.enddate, date(2013, 3, 27))
|
||||||
|
|
||||||
|
|
||||||
@raises(ValueError)
|
@raises(ValueError)
|
||||||
def test_open_MPR_fails_for_bad_file():
|
def test_open_MPR_fails_for_bad_file():
|
||||||
mpr1 = MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
|
mpr1 = MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
|
||||||
|
|||||||
Reference in New Issue
Block a user