Reformatted test scripts with black 23.12.1

This commit is contained in:
2024-01-20 21:57:31 +02:00
parent 239db97c69
commit 1cd5bd6239
3 changed files with 140 additions and 96 deletions

View File

@@ -9,7 +9,7 @@ import os
import pytest
@pytest.fixture(scope='session')
@pytest.fixture(scope="session")
def testdata_dir():
"""Path to the testdata directory."""
return os.path.join(os.path.dirname(__file__), 'testdata')
return os.path.join(os.path.dirname(__file__), "testdata")

View File

@@ -13,8 +13,7 @@ import pytest
from galvani import res2sqlite
have_mdbtools = (subprocess.call(['which', 'mdb-export'],
stdout=subprocess.DEVNULL) == 0)
have_mdbtools = subprocess.call(["which", "mdb-export"], stdout=subprocess.DEVNULL) == 0
def test_res2sqlite_help():
@@ -22,39 +21,47 @@ def test_res2sqlite_help():
This should work even when mdbtools is not installed.
"""
help_output = subprocess.check_output(['res2sqlite', '--help'])
assert b'Convert Arbin .res files to sqlite3 databases' in help_output
help_output = subprocess.check_output(["res2sqlite", "--help"])
assert b"Convert Arbin .res files to sqlite3 databases" in help_output
@pytest.mark.skipif(have_mdbtools, reason='This tests the failure when mdbtools is not installed')
@pytest.mark.skipif(
have_mdbtools, reason="This tests the failure when mdbtools is not installed"
)
def test_convert_Arbin_no_mdbtools(testdata_dir, tmpdir):
"""Checks that the conversion fails with an appropriate error message."""
res_file = os.path.join(testdata_dir, 'arbin1.res')
sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db')
with pytest.raises(RuntimeError, match="Could not locate the `mdb-export` executable."):
res_file = os.path.join(testdata_dir, "arbin1.res")
sqlite_file = os.path.join(str(tmpdir), "arbin1.s3db")
with pytest.raises(
RuntimeError, match="Could not locate the `mdb-export` executable."
):
res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file)
@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools')
@pytest.mark.parametrize('basename', ['arbin1', 'UM34_Test005E'])
@pytest.mark.skipif(
not have_mdbtools, reason="Reading the Arbin file requires MDBTools"
)
@pytest.mark.parametrize("basename", ["arbin1", "UM34_Test005E"])
def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename):
"""Convert an Arbin file to SQLite using the functional interface."""
res_file = os.path.join(testdata_dir, basename + '.res')
sqlite_file = os.path.join(str(tmpdir), basename + '.s3db')
res_file = os.path.join(testdata_dir, basename + ".res")
sqlite_file = os.path.join(str(tmpdir), basename + ".s3db")
res2sqlite.convert_arbin_to_sqlite(res_file, sqlite_file)
assert os.path.isfile(sqlite_file)
with sqlite3.connect(sqlite_file) as conn:
csr = conn.execute('SELECT * FROM Channel_Normal_Table;')
csr = conn.execute("SELECT * FROM Channel_Normal_Table;")
csr.fetchone()
@pytest.mark.skipif(not have_mdbtools, reason='Reading the Arbin file requires MDBTools')
@pytest.mark.skipif(
not have_mdbtools, reason="Reading the Arbin file requires MDBTools"
)
def test_convert_cmdline(testdata_dir, tmpdir):
"""Checks that the conversion fails with an appropriate error message."""
res_file = os.path.join(testdata_dir, 'arbin1.res')
sqlite_file = os.path.join(str(tmpdir), 'arbin1.s3db')
subprocess.check_call(['res2sqlite', res_file, sqlite_file])
res_file = os.path.join(testdata_dir, "arbin1.res")
sqlite_file = os.path.join(str(tmpdir), "arbin1.s3db")
subprocess.check_call(["res2sqlite", res_file, sqlite_file])
assert os.path.isfile(sqlite_file)
with sqlite3.connect(sqlite_file) as conn:
csr = conn.execute('SELECT * FROM Channel_Normal_Table;')
csr = conn.execute("SELECT * FROM Channel_Normal_Table;")
csr.fetchone()

View File

@@ -17,33 +17,55 @@ from galvani.BioLogic import MPTfileCSV # not exported
def test_open_MPT(testdata_dir):
mpt1, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpt'))
mpt1, comments = MPTfile(os.path.join(testdata_dir, "bio_logic1.mpt"))
assert comments == []
assert mpt1.dtype.names == (
"mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dQ/mA.h", "P/W",
"I/mA", "(Q-Qo)/mA.h", "x",
"mode",
"ox/red",
"error",
"control changes",
"Ns changes",
"counter inc.",
"time/s",
"control/V/mA",
"Ewe/V",
"dQ/mA.h",
"P/W",
"I/mA",
"(Q-Qo)/mA.h",
"x",
)
def test_open_MPT_fails_for_bad_file(testdata_dir):
with pytest.raises(ValueError, match='Bad first line'):
MPTfile(os.path.join(testdata_dir, 'bio_logic1.mpr'))
with pytest.raises(ValueError, match="Bad first line"):
MPTfile(os.path.join(testdata_dir, "bio_logic1.mpr"))
def test_open_MPT_csv(testdata_dir):
mpt1, comments = MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpt'))
mpt1, comments = MPTfileCSV(os.path.join(testdata_dir, "bio_logic1.mpt"))
assert comments == []
assert mpt1.fieldnames == [
"mode", "ox/red", "error", "control changes", "Ns changes",
"counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h", "P/W",
"<I>/mA", "(Q-Qo)/mA.h", "x",
"mode",
"ox/red",
"error",
"control changes",
"Ns changes",
"counter inc.",
"time/s",
"control/V/mA",
"Ewe/V",
"dq/mA.h",
"P/W",
"<I>/mA",
"(Q-Qo)/mA.h",
"x",
]
def test_open_MPT_csv_fails_for_bad_file(testdata_dir):
with pytest.raises((ValueError, UnicodeDecodeError)):
MPTfileCSV(os.path.join(testdata_dir, 'bio_logic1.mpr'))
MPTfileCSV(os.path.join(testdata_dir, "bio_logic1.mpr"))
def test_colID_map_uniqueness():
@@ -59,13 +81,16 @@ def test_colID_map_uniqueness():
assert not set(field_names).intersection(flag_names)
@pytest.mark.parametrize('colIDs, expected', [
([1, 2, 3], [('flags', 'u1')]),
([4, 6], [('time/s', '<f8'), ('Ewe/V', '<f4')]),
([1, 4, 21], [('flags', 'u1'), ('time/s', '<f8')]),
([4, 6, 4], [('time/s', '<f8'), ('Ewe/V', '<f4'), ('time/s 2', '<f8')]),
([4, 9999], NotImplementedError),
])
@pytest.mark.parametrize(
"colIDs, expected",
[
([1, 2, 3], [("flags", "u1")]),
([4, 6], [("time/s", "<f8"), ("Ewe/V", "<f4")]),
([1, 4, 21], [("flags", "u1"), ("time/s", "<f8")]),
([4, 6, 4], [("time/s", "<f8"), ("Ewe/V", "<f4"), ("time/s 2", "<f8")]),
([4, 9999], NotImplementedError),
],
)
def test_colID_to_dtype(colIDs, expected):
"""Test converting column ID to numpy dtype."""
if isinstance(expected, type) and issubclass(expected, Exception):
@@ -77,14 +102,17 @@ def test_colID_to_dtype(colIDs, expected):
assert dtype == expected_dtype
@pytest.mark.parametrize('data, expected', [
('02/23/17', date(2017, 2, 23)),
('10-03-05', date(2005, 10, 3)),
('11.12.20', date(2020, 11, 12)),
(b'01/02/03', date(2003, 1, 2)),
('13.08.07', ValueError),
('03-04/05', ValueError),
])
@pytest.mark.parametrize(
"data, expected",
[
("02/23/17", date(2017, 2, 23)),
("10-03-05", date(2005, 10, 3)),
("11.12.20", date(2020, 11, 12)),
(b"01/02/03", date(2003, 1, 2)),
("13.08.07", ValueError),
("03-04/05", ValueError),
],
)
def test_parse_BioLogic_date(data, expected):
"""Test the parse_BioLogic_date function."""
if isinstance(expected, type) and issubclass(expected, Exception):
@@ -95,51 +123,54 @@ def test_parse_BioLogic_date(data, expected):
assert result == expected
@pytest.mark.parametrize('filename, startdate, enddate', [
('bio_logic1.mpr', '2011-10-29', '2011-10-31'),
('bio_logic2.mpr', '2012-09-27', '2012-09-27'),
('bio_logic3.mpr', '2013-03-27', '2013-03-27'),
('bio_logic4.mpr', '2011-11-01', '2011-11-02'),
('bio_logic5.mpr', '2013-01-28', '2013-01-28'),
# bio_logic6.mpr has no end date because it does not have a VMP LOG module
('bio_logic6.mpr', '2012-09-11', None),
# C019P-0ppb-A_C01.mpr stores the date in a different format
('C019P-0ppb-A_C01.mpr', '2019-03-14', '2019-03-14'),
('Rapp_Error.mpr', '2010-12-02', '2010-12-02'),
('Ewe_Error.mpr', '2021-11-18', '2021-11-19'),
])
@pytest.mark.parametrize(
"filename, startdate, enddate",
[
("bio_logic1.mpr", "2011-10-29", "2011-10-31"),
("bio_logic2.mpr", "2012-09-27", "2012-09-27"),
("bio_logic3.mpr", "2013-03-27", "2013-03-27"),
("bio_logic4.mpr", "2011-11-01", "2011-11-02"),
("bio_logic5.mpr", "2013-01-28", "2013-01-28"),
# bio_logic6.mpr has no end date because it does not have a VMP LOG module
("bio_logic6.mpr", "2012-09-11", None),
# C019P-0ppb-A_C01.mpr stores the date in a different format
("C019P-0ppb-A_C01.mpr", "2019-03-14", "2019-03-14"),
("Rapp_Error.mpr", "2010-12-02", "2010-12-02"),
("Ewe_Error.mpr", "2021-11-18", "2021-11-19"),
],
)
def test_MPR_dates(testdata_dir, filename, startdate, enddate):
"""Check that the start and end dates in .mpr files are read correctly."""
mpr = MPRfile(os.path.join(testdata_dir, filename))
assert mpr.startdate.strftime('%Y-%m-%d') == startdate
assert mpr.startdate.strftime("%Y-%m-%d") == startdate
if enddate:
assert mpr.enddate.strftime('%Y-%m-%d') == enddate
assert mpr.enddate.strftime("%Y-%m-%d") == enddate
else:
assert not hasattr(mpr, 'enddate')
assert not hasattr(mpr, "enddate")
def test_open_MPR_fails_for_bad_file(testdata_dir):
with pytest.raises(ValueError, match='Invalid magic for .mpr file'):
MPRfile(os.path.join(testdata_dir, 'arbin1.res'))
with pytest.raises(ValueError, match="Invalid magic for .mpr file"):
MPRfile(os.path.join(testdata_dir, "arbin1.res"))
def timestamp_from_comments(comments):
for line in comments:
time_match = re.match(b'Acquisition started on : ([0-9/]+ [0-9:]+)', line)
time_match = re.match(b"Acquisition started on : ([0-9/]+ [0-9:]+)", line)
if time_match:
timestamp = datetime.strptime(time_match.group(1).decode('ascii'),
'%m/%d/%Y %H:%M:%S')
timestamp = datetime.strptime(
time_match.group(1).decode("ascii"), "%m/%d/%Y %H:%M:%S"
)
return timestamp
raise AttributeError("No timestamp in comments")
def assert_MPR_matches_MPT(mpr, mpt, comments):
def assert_field_matches(fieldname, decimal):
if fieldname in mpr.dtype.fields:
assert_array_almost_equal(mpr.data[fieldname],
mpt[fieldname],
decimal=decimal)
assert_array_almost_equal(
mpr.data[fieldname], mpt[fieldname], decimal=decimal
)
def assert_field_exact(fieldname):
if fieldname in mpr.dtype.fields:
@@ -154,16 +185,16 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
# Nothing uses the 0x40 bit of the flags
assert_array_equal(mpr.get_flag("counter inc."), mpt["counter inc."])
assert_array_almost_equal(mpr.data["time/s"],
mpt["time/s"],
decimal=2) # 2 digits in CSV
assert_array_almost_equal(
mpr.data["time/s"], mpt["time/s"], decimal=2
) # 2 digits in CSV
assert_field_matches("control/V/mA", decimal=6)
assert_field_matches("control/V", decimal=6)
assert_array_almost_equal(mpr.data["Ewe/V"],
mpt["Ewe/V"],
decimal=6) # 32 bit float precision
assert_array_almost_equal(
mpr.data["Ewe/V"], mpt["Ewe/V"], decimal=6
) # 32 bit float precision
assert_field_matches("dQ/mA.h", decimal=16) # 64 bit float precision
assert_field_matches("P/W", decimal=10) # 32 bit float precision for 1.xxE-5
@@ -178,39 +209,45 @@ def assert_MPR_matches_MPT(mpr, mpt, comments):
pass
@pytest.mark.parametrize('basename', [
'bio_logic1',
'bio_logic2',
# No bio_logic3.mpt file
'bio_logic4',
# bio_logic5 and bio_logic6 are special cases
'CV_C01',
'121_CA_455nm_6V_30min_C01',
'020-formation_CB5',
])
@pytest.mark.parametrize(
"basename",
[
"bio_logic1",
"bio_logic2",
# No bio_logic3.mpt file
"bio_logic4",
# bio_logic5 and bio_logic6 are special cases
"CV_C01",
"121_CA_455nm_6V_30min_C01",
"020-formation_CB5",
],
)
def test_MPR_matches_MPT(testdata_dir, basename):
"""Check the MPR parser against the MPT parser.
Load a binary .mpr file and a text .mpt file which should contain
exactly the same data. Check that the loaded data actually match.
"""
binpath = os.path.join(testdata_dir, basename + '.mpr')
txtpath = os.path.join(testdata_dir, basename + '.mpt')
binpath = os.path.join(testdata_dir, basename + ".mpr")
txtpath = os.path.join(testdata_dir, basename + ".mpt")
mpr = MPRfile(binpath)
mpt, comments = MPTfile(txtpath, encoding='latin1')
mpt, comments = MPTfile(txtpath, encoding="latin1")
assert_MPR_matches_MPT(mpr, mpt, comments)
def test_MPR5_matches_MPT5(testdata_dir):
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic5.mpr'))
mpt, comments = MPTfile((re.sub(b'\tXXX\t', b'\t0\t', line) for line in
open(os.path.join(testdata_dir, 'bio_logic5.mpt'),
mode='rb')))
mpr = MPRfile(os.path.join(testdata_dir, "bio_logic5.mpr"))
mpt, comments = MPTfile(
(
re.sub(b"\tXXX\t", b"\t0\t", line)
for line in open(os.path.join(testdata_dir, "bio_logic5.mpt"), mode="rb")
)
)
assert_MPR_matches_MPT(mpr, mpt, comments)
def test_MPR6_matches_MPT6(testdata_dir):
mpr = MPRfile(os.path.join(testdata_dir, 'bio_logic6.mpr'))
mpt, comments = MPTfile(os.path.join(testdata_dir, 'bio_logic6.mpt'))
mpr = MPRfile(os.path.join(testdata_dir, "bio_logic6.mpr"))
mpt, comments = MPTfile(os.path.join(testdata_dir, "bio_logic6.mpt"))
mpr.data = mpr.data[:958] # .mpt file is incomplete
assert_MPR_matches_MPT(mpr, mpt, comments)