Use np.frombuffer instead of np.fromstring

Fixes #22
This commit is contained in:
2019-03-16 13:41:49 +01:00
parent e52efeb9bd
commit 1bcbc16bab

View File

@@ -275,7 +275,7 @@ def read_VMP_modules(fileobj, read_module_data=True):
if len(hdr_bytes) < VMPmodule_hdr.itemsize: if len(hdr_bytes) < VMPmodule_hdr.itemsize:
raise IOError("Unexpected end of file while reading module header") raise IOError("Unexpected end of file while reading module header")
hdr = np.fromstring(hdr_bytes, dtype=VMPmodule_hdr, count=1) hdr = np.frombuffer(hdr_bytes, dtype=VMPmodule_hdr, count=1)
hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names)) hdr_dict = dict(((n, hdr[n][0]) for n in VMPmodule_hdr.names))
hdr_dict['offset'] = fileobj.tell() hdr_dict['offset'] = fileobj.tell()
if read_module_data: if read_module_data:
@@ -326,17 +326,17 @@ class MPRfile:
data_module, = (m for m in modules if m['shortname'] == b'VMP data ') data_module, = (m for m in modules if m['shortname'] == b'VMP data ')
maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG '] maybe_log_module = [m for m in modules if m['shortname'] == b'VMP LOG ']
n_data_points = np.fromstring(data_module['data'][:4], dtype='<u4') n_data_points = np.frombuffer(data_module['data'][:4], dtype='<u4')
n_columns = np.fromstring(data_module['data'][4:5], dtype='u1') n_columns = np.frombuffer(data_module['data'][4:5], dtype='u1')
n_columns = np.asscalar(n_columns) # Compatibility with recent numpy n_columns = np.asscalar(n_columns) # Compatibility with recent numpy
if data_module['version'] == 0: if data_module['version'] == 0:
column_types = np.fromstring(data_module['data'][5:], dtype='u1', column_types = np.frombuffer(data_module['data'][5:], dtype='u1',
count=n_columns) count=n_columns)
remaining_headers = data_module['data'][5 + n_columns:100] remaining_headers = data_module['data'][5 + n_columns:100]
main_data = data_module['data'][100:] main_data = data_module['data'][100:]
elif data_module['version'] == 2: elif data_module['version'] == 2:
column_types = np.fromstring(data_module['data'][5:], dtype='<u2', column_types = np.frombuffer(data_module['data'][5:], dtype='<u2',
count=n_columns) count=n_columns)
## There is 405 bytes of data before the main array starts ## There is 405 bytes of data before the main array starts
remaining_headers = data_module['data'][5 + 2 * n_columns:405] remaining_headers = data_module['data'][5 + 2 * n_columns:405]
@@ -351,7 +351,7 @@ class MPRfile:
assert(not any(remaining_headers)) assert(not any(remaining_headers))
self.dtype, self.flags_dict, self.flags2_dict = VMPdata_dtype_from_colIDs(column_types) self.dtype, self.flags_dict, self.flags2_dict = VMPdata_dtype_from_colIDs(column_types)
self.data = np.fromstring(main_data, dtype=self.dtype) self.data = np.frombuffer(main_data, dtype=self.dtype)
assert(self.data.shape[0] == n_data_points) assert(self.data.shape[0] == n_data_points)
## No idea what these 'column types' mean or even if they are actually ## No idea what these 'column types' mean or even if they are actually
@@ -371,13 +371,13 @@ class MPRfile:
## There is a timestamp at either 465 or 469 bytes ## There is a timestamp at either 465 or 469 bytes
## I can't find any reason why it is one or the other in any ## I can't find any reason why it is one or the other in any
## given file ## given file
ole_timestamp1 = np.fromstring(log_module['data'][465:], ole_timestamp1 = np.frombuffer(log_module['data'][465:],
dtype='<f8', count=1) dtype='<f8', count=1)
ole_timestamp2 = np.fromstring(log_module['data'][469:], ole_timestamp2 = np.frombuffer(log_module['data'][469:],
dtype='<f8', count=1) dtype='<f8', count=1)
ole_timestamp3 = np.fromstring(log_module['data'][473:], ole_timestamp3 = np.frombuffer(log_module['data'][473:],
dtype='<f8', count=1) dtype='<f8', count=1)
ole_timestamp4 = np.fromstring(log_module['data'][585:], ole_timestamp4 = np.frombuffer(log_module['data'][585:],
dtype='<f8', count=1) dtype='<f8', count=1)
if ole_timestamp1 > 40000 and ole_timestamp1 < 50000: if ole_timestamp1 > 40000 and ole_timestamp1 < 50000: