From f2b62265b97d19ed6bfb3f788d24ffc095595453 Mon Sep 17 00:00:00 2001 From: Chris Kerr Date: Fri, 3 May 2019 20:30:12 +0200 Subject: [PATCH] Fixed some flake8 warnings in BioLogic.py --- galvani/BioLogic.py | 47 ++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/galvani/BioLogic.py b/galvani/BioLogic.py index 78229d7..da3519c 100644 --- a/galvani/BioLogic.py +++ b/galvani/BioLogic.py @@ -71,19 +71,20 @@ def MPTfile(file_or_path): raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) # TODO use rb'string' here once Python 2 is no longer supported - nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', next(mpt_file)) + nb_headers_match = re.match(b'Nb header lines : (\\d+)\\s*$', + next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) - ## The 'magic number' line, the 'Nb headers' line and the column headers - ## make three lines. Every additional line is a comment line. + # The 'magic number' line, the 'Nb headers' line and the column headers + # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] fieldnames = str3(next(mpt_file)).strip().split('\t') record_type = np.dtype(list(map(fieldname_to_dtype, fieldnames))) - ## Must be able to parse files where commas are used for decimal points + # Must be able to parse files where commas are used for decimal points converter_dict = dict(((i, comma_converter) for i in range(len(fieldnames)))) mpt_array = np.loadtxt(mpt_file, dtype=record_type, @@ -113,8 +114,8 @@ def MPTfileCSV(file_or_path): if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) - ## The 'magic number' line, the 'Nb headers' line and the column headers - ## make three lines. Every additional line is a comment line. + # The 'magic number' line, the 'Nb headers' line and the column headers + # make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab') @@ -270,7 +271,8 @@ def read_VMP_modules(fileobj, read_module_data=True): if len(module_magic) == 0: # end of file break elif module_magic != b'MODULE': - raise ValueError("Found %r, expecting start of new VMP MODULE" % module_magic) + raise ValueError("Found %r, expecting start of new VMP MODULE" + % module_magic) hdr_bytes = fileobj.read(VMPmodule_hdr.itemsize) if len(hdr_bytes) < VMPmodule_hdr.itemsize: @@ -294,6 +296,9 @@ def read_VMP_modules(fileobj, read_module_data=True): fileobj.seek(hdr_dict['offset'] + hdr_dict['length'], SEEK_SET) +MPR_MAGIC = b'BIO-LOGIC MODULAR FILE\x1a'.ljust(48) + b'\x00\x00\x00\x00' + + class MPRfile: """Bio-Logic .mpr file @@ -316,10 +321,8 @@ class MPRfile: mpr_file = open(file_or_path, 'rb') else: mpr_file = file_or_path - - mpr_magic = b'BIO-LOGIC MODULAR FILE\x1a \x00\x00\x00\x00' - magic = mpr_file.read(len(mpr_magic)) - if magic != mpr_magic: + magic = mpr_file.read(len(MPR_MAGIC)) + if magic != MPR_MAGIC: raise ValueError('Invalid magic for .mpr file: %s' % magic) modules = list(read_VMP_modules(mpr_file)) @@ -340,7 +343,7 @@ class MPRfile: elif data_module['version'] == 2: column_types = np.frombuffer(data_module['data'][5:], dtype=' 40000 and ole_timestamp4 < 50000: ole_timestamp = ole_timestamp4 - + else: raise ValueError("Could not find timestamp in the LOG module") @@ -414,10 +417,10 @@ class MPRfile: ole_timedelta = timedelta(days=ole_timestamp[0]) self.timestamp = ole_base + ole_timedelta if self.startdate != self.timestamp.date(): - raise ValueError("""Date mismatch: - Start date: %s - End date: %s - Timestamp: %s""" % (self.startdate, self.enddate, self.timestamp)) + raise ValueError("Date mismatch:\n" + + " Start date: %s\n" % self.startdate + + " End date: %s\n" % self.enddate + + " Timestamp: %s\n" % self.timestamp) def get_flag(self, flagname): if flagname in self.flags_dict: