16 Commits

Author SHA1 Message Date
Matthew Evans
c8e5bb12b8 Merge pull request #122 from echemdata/ml-evs/fix-ci
Pin and update release downloader action
2025-03-22 15:56:57 +00:00
Matthew Evans
1d913dd2f1 Pin and update release downloader action 2025-03-22 13:47:42 +00:00
Matthew Evans
3c1446ff07 Merge pull request #119 from d-cogswell/master
Fix deprecated numpy aliases which were removed in 2.0.0
2024-07-31 15:54:27 +01:00
Dan Cogswell
e18a21ffbc Reverses 79e3df0 which pins numpy version. 2024-07-31 10:18:47 -04:00
Dan Cogswell
260ad72a6e Fix deprecated numpy aliases which were removed in numpy version 2.0.0. 2024-07-30 10:55:48 -04:00
Matthew Evans
7d264999db Merge pull request #118 from echemdata/ml-evs/lfs
LFS workaround using archived releases in CI
2024-07-12 15:29:02 +01:00
Matthew Evans
1e53de56ef LFS note formatting and location in README 2024-07-12 14:33:53 +01:00
Matthew Evans
f44851ec37 Add flake8 skip 2024-07-12 14:31:15 +01:00
Matthew Evans
3b5dc48fc6 Add LFS warning note 2024-07-12 14:31:14 +01:00
Matthew Evans
56bebfe498 Replace failing lfs caching with downloading test files from release tarballs 2024-07-12 14:31:11 +01:00
Matthew Evans
d33c6f7561 Merge pull request #117 from echemdata/ml-evs/pin-numpy
Add upper numpy pin
2024-07-12 13:20:03 +01:00
Matthew Evans
79e3df0ed9 Add upper numpy pin 2024-07-12 12:45:02 +01:00
3c904db04e Merge pull request #105 from echemdata/ml-evs/arbin-in-memory
Optionally read Arbin into in-memory sqlite without temporary file
2024-03-03 10:32:30 +02:00
Matthew Evans
fbc90fc961 Update tests/test_Arbin.py
Co-authored-by: Chris Kerr <chris.kerr@mykolab.ch>
2024-03-02 18:13:40 +01:00
545a82ec35 Bump version to 0.4.1
I forgot to update the version before tagging 0.4.0 so I will have to
tag a 0.4.1 release instead.
2024-03-02 16:29:59 +02:00
Matthew Evans
a845731131 Optionally read Arbin into in-memory sqlite without temporary file 2024-02-12 10:55:52 +00:00
6 changed files with 76 additions and 16 deletions

View File

@@ -26,11 +26,28 @@ jobs:
python-version: ['3.8', '3.9', '3.10', '3.11']
steps:
- name: Cached LFS checkout
uses: nschloe/action-cached-lfs-checkout@v1.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 0
enableCrossOsArchive: true
lfs: false
# Due to limited LFS bandwidth, it is preferable to download
# test files from the last release.
#
# This does mean that testing new LFS files in the CI is tricky;
# care should be taken to also test new files locally first
# Tests missing these files in the CI should still fail.
- name: Download static files from last release for testing
uses: robinraju/release-downloader@v1.12
with:
latest: true
tarBall: true
out-file-path: /home/runner/work/last-release
extract: true
- name: Copy test files from static downloaded release
run: |
cp -r /home/runner/work/last-release/*/tests/testdata tests
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
@@ -50,5 +67,5 @@ jobs:
tox -vv --notest
- name: Run all tests
run: |
run: |-
tox --skip-pkg-install

View File

@@ -47,14 +47,30 @@ The latest galvani releases can be installed from [PyPI](https://pypi.org/projec
pip install galvani
```
The latest development version can be installed with `pip` directly from GitHub:
The latest development version can be installed with `pip` directly from GitHub (see note about git-lfs below):
```shell
pip install git+https://github.com/echemdata/galvani
GIT_LFS_SKIP_SMUDGE=1 pip install git+https://github.com/echemdata/galvani
```
## Development installation and contributing
> [!WARNING]
>
> This project uses Git Large File Storage (LFS) to store its test files,
> however the LFS quota provided by GitHub is frequently exceeded.
> This means that anyone cloning the repository with LFS installed will get
> failures unless they set the `GIT_LFS_SKIP_SMUDGE=1` environment variable when
> cloning.
> The full test data from the last release can always be obtained by
> downloading the GitHub release archives (tar or zip), at
> https://github.com/echemdata/galvani/releases/latest
>
> If you wish to add test files, please ensure they are as small as possible,
> and take care that your tests work locally without the need for the LFS files.
> Ideally, you could commit them to your fork when making a PR, and then they
> can be converted to LFS files as part of the review.
If you wish to contribute to galvani, please clone the repository and install the testing dependencies:
```shell

View File

@@ -56,7 +56,7 @@ def fieldname_to_dtype(fieldname):
"|Permittivity|",
"Tan(Delta)",
):
return (fieldname, np.float_)
return (fieldname, np.float64)
elif fieldname in (
"Q charge/discharge/mA.h",
"step time/s",
@@ -66,15 +66,15 @@ def fieldname_to_dtype(fieldname):
"Efficiency/%",
"Capacity/mA.h",
):
return (fieldname, np.float_)
return (fieldname, np.float64)
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle", "z cycle"):
return (fieldname, np.int_)
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
return ("dQ/mA.h", np.float_)
return ("dQ/mA.h", np.float64)
elif fieldname in ("I/mA", "<I>/mA"):
return ("I/mA", np.float_)
return ("I/mA", np.float64)
elif fieldname in ("Ewe/V", "<Ewe>/V", "Ecell/V", "<Ewe/V>"):
return ("Ewe/V", np.float_)
return ("Ewe/V", np.float64)
elif fieldname.endswith(
(
"/s",
@@ -103,7 +103,7 @@ def fieldname_to_dtype(fieldname):
"/%",
)
):
return (fieldname, np.float_)
return (fieldname, np.float64)
else:
raise ValueError("Invalid column header: %s" % fieldname)
@@ -276,7 +276,7 @@ VMPdata_colID_dtype_map = {
11: ("<I>/mA", "<f8"),
13: ("(Q-Qo)/mA.h", "<f8"),
16: ("Analog IN 1/V", "<f4"),
17: ("Analog IN 2/V", "<f4"), # Probably column 18 is Analog IN 3/V, if anyone hits this error in the future
17: ("Analog IN 2/V", "<f4"), # Probably column 18 is Analog IN 3/V, if anyone hits this error in the future # noqa: E501
19: ("control/V", "<f4"),
20: ("control/mA", "<f4"),
23: ("dQ/mA.h", "<f8"), # Same as 7?

View File

@@ -571,13 +571,25 @@ def mdb_get_version(filename):
return version_tuple
def convert_arbin_to_sqlite(input_file, output_file):
def convert_arbin_to_sqlite(input_file, output_file=None):
"""Read data from an Arbin .res data file and write to a sqlite file.
Any data currently in the sqlite file will be erased!
Any data currently in an sqlite file at `output_file` will be erased!
Parameters:
input_file (str): The path to the Arbin .res file to read from.
output_file (str or None): The path to the sqlite file to write to; if None,
return a `sqlite3.Connection` into an in-memory database.
Returns:
None or sqlite3.Connection
"""
arbin_version = mdb_get_version(input_file)
if output_file is None:
output_file = ":memory:"
s3db = sqlite3.connect(output_file)
tables_to_convert = copy(mdb_tables)
@@ -602,6 +614,11 @@ def convert_arbin_to_sqlite(input_file, output_file):
print("Vacuuming database...")
s3db.executescript("VACUUM; ANALYZE;")
if output_file == ":memory:":
return s3db
s3db.close()
def main(argv=None):
parser = argparse.ArgumentParser(

View File

@@ -12,7 +12,7 @@ with open(os.path.join(os.path.dirname(__file__), "README.md")) as f:
setup(
name="galvani",
version="0.3.0",
version="0.4.1",
description="Open and process battery charger log data files",
long_description=readme,
long_description_content_type="text/markdown",

View File

@@ -53,6 +53,16 @@ def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename):
csr.fetchone()
@pytest.mark.parametrize("basename", ["arbin1", "UM34_Test005E"])
def test_convert_Arbin_to_sqlite_function_in_memory(testdata_dir, tmpdir, basename):
"""Convert an Arbin file to an in-memory SQLite database."""
res_file = os.path.join(testdata_dir, basename + ".res")
conn = None
with res2sqlite.convert_arbin_to_sqlite(res_file) as conn:
csr = conn.execute("SELECT * FROM Channel_Normal_Table;")
csr.fetchone()
@pytest.mark.skipif(
not have_mdbtools, reason="Reading the Arbin file requires MDBTools"
)