mirror of
https://github.com/echemdata/galvani.git
synced 2025-12-14 09:15:34 +00:00
Compare commits
16 Commits
0.4.0
...
c8e5bb12b8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c8e5bb12b8 | ||
|
|
1d913dd2f1 | ||
|
|
3c1446ff07 | ||
|
|
e18a21ffbc | ||
|
|
260ad72a6e | ||
|
|
7d264999db | ||
|
|
1e53de56ef | ||
|
|
f44851ec37 | ||
|
|
3b5dc48fc6 | ||
|
|
56bebfe498 | ||
|
|
d33c6f7561 | ||
|
|
79e3df0ed9 | ||
| 3c904db04e | |||
|
|
fbc90fc961 | ||
| 545a82ec35 | |||
|
|
a845731131 |
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@@ -26,11 +26,28 @@ jobs:
|
|||||||
python-version: ['3.8', '3.9', '3.10', '3.11']
|
python-version: ['3.8', '3.9', '3.10', '3.11']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Cached LFS checkout
|
- uses: actions/checkout@v4
|
||||||
uses: nschloe/action-cached-lfs-checkout@v1.2.2
|
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
enableCrossOsArchive: true
|
lfs: false
|
||||||
|
|
||||||
|
# Due to limited LFS bandwidth, it is preferable to download
|
||||||
|
# test files from the last release.
|
||||||
|
#
|
||||||
|
# This does mean that testing new LFS files in the CI is tricky;
|
||||||
|
# care should be taken to also test new files locally first
|
||||||
|
# Tests missing these files in the CI should still fail.
|
||||||
|
- name: Download static files from last release for testing
|
||||||
|
uses: robinraju/release-downloader@v1.12
|
||||||
|
with:
|
||||||
|
latest: true
|
||||||
|
tarBall: true
|
||||||
|
out-file-path: /home/runner/work/last-release
|
||||||
|
extract: true
|
||||||
|
|
||||||
|
- name: Copy test files from static downloaded release
|
||||||
|
run: |
|
||||||
|
cp -r /home/runner/work/last-release/*/tests/testdata tests
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
@@ -50,5 +67,5 @@ jobs:
|
|||||||
tox -vv --notest
|
tox -vv --notest
|
||||||
|
|
||||||
- name: Run all tests
|
- name: Run all tests
|
||||||
run: |
|
run: |-
|
||||||
tox --skip-pkg-install
|
tox --skip-pkg-install
|
||||||
|
|||||||
20
README.md
20
README.md
@@ -47,14 +47,30 @@ The latest galvani releases can be installed from [PyPI](https://pypi.org/projec
|
|||||||
pip install galvani
|
pip install galvani
|
||||||
```
|
```
|
||||||
|
|
||||||
The latest development version can be installed with `pip` directly from GitHub:
|
The latest development version can be installed with `pip` directly from GitHub (see note about git-lfs below):
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
pip install git+https://github.com/echemdata/galvani
|
GIT_LFS_SKIP_SMUDGE=1 pip install git+https://github.com/echemdata/galvani
|
||||||
```
|
```
|
||||||
|
|
||||||
## Development installation and contributing
|
## Development installation and contributing
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
>
|
||||||
|
> This project uses Git Large File Storage (LFS) to store its test files,
|
||||||
|
> however the LFS quota provided by GitHub is frequently exceeded.
|
||||||
|
> This means that anyone cloning the repository with LFS installed will get
|
||||||
|
> failures unless they set the `GIT_LFS_SKIP_SMUDGE=1` environment variable when
|
||||||
|
> cloning.
|
||||||
|
> The full test data from the last release can always be obtained by
|
||||||
|
> downloading the GitHub release archives (tar or zip), at
|
||||||
|
> https://github.com/echemdata/galvani/releases/latest
|
||||||
|
>
|
||||||
|
> If you wish to add test files, please ensure they are as small as possible,
|
||||||
|
> and take care that your tests work locally without the need for the LFS files.
|
||||||
|
> Ideally, you could commit them to your fork when making a PR, and then they
|
||||||
|
> can be converted to LFS files as part of the review.
|
||||||
|
|
||||||
If you wish to contribute to galvani, please clone the repository and install the testing dependencies:
|
If you wish to contribute to galvani, please clone the repository and install the testing dependencies:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ def fieldname_to_dtype(fieldname):
|
|||||||
"|Permittivity|",
|
"|Permittivity|",
|
||||||
"Tan(Delta)",
|
"Tan(Delta)",
|
||||||
):
|
):
|
||||||
return (fieldname, np.float_)
|
return (fieldname, np.float64)
|
||||||
elif fieldname in (
|
elif fieldname in (
|
||||||
"Q charge/discharge/mA.h",
|
"Q charge/discharge/mA.h",
|
||||||
"step time/s",
|
"step time/s",
|
||||||
@@ -66,15 +66,15 @@ def fieldname_to_dtype(fieldname):
|
|||||||
"Efficiency/%",
|
"Efficiency/%",
|
||||||
"Capacity/mA.h",
|
"Capacity/mA.h",
|
||||||
):
|
):
|
||||||
return (fieldname, np.float_)
|
return (fieldname, np.float64)
|
||||||
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle", "z cycle"):
|
elif fieldname in ("cycle number", "I Range", "Ns", "half cycle", "z cycle"):
|
||||||
return (fieldname, np.int_)
|
return (fieldname, np.int_)
|
||||||
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
|
elif fieldname in ("dq/mA.h", "dQ/mA.h"):
|
||||||
return ("dQ/mA.h", np.float_)
|
return ("dQ/mA.h", np.float64)
|
||||||
elif fieldname in ("I/mA", "<I>/mA"):
|
elif fieldname in ("I/mA", "<I>/mA"):
|
||||||
return ("I/mA", np.float_)
|
return ("I/mA", np.float64)
|
||||||
elif fieldname in ("Ewe/V", "<Ewe>/V", "Ecell/V", "<Ewe/V>"):
|
elif fieldname in ("Ewe/V", "<Ewe>/V", "Ecell/V", "<Ewe/V>"):
|
||||||
return ("Ewe/V", np.float_)
|
return ("Ewe/V", np.float64)
|
||||||
elif fieldname.endswith(
|
elif fieldname.endswith(
|
||||||
(
|
(
|
||||||
"/s",
|
"/s",
|
||||||
@@ -103,7 +103,7 @@ def fieldname_to_dtype(fieldname):
|
|||||||
"/%",
|
"/%",
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
return (fieldname, np.float_)
|
return (fieldname, np.float64)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid column header: %s" % fieldname)
|
raise ValueError("Invalid column header: %s" % fieldname)
|
||||||
|
|
||||||
@@ -276,7 +276,7 @@ VMPdata_colID_dtype_map = {
|
|||||||
11: ("<I>/mA", "<f8"),
|
11: ("<I>/mA", "<f8"),
|
||||||
13: ("(Q-Qo)/mA.h", "<f8"),
|
13: ("(Q-Qo)/mA.h", "<f8"),
|
||||||
16: ("Analog IN 1/V", "<f4"),
|
16: ("Analog IN 1/V", "<f4"),
|
||||||
17: ("Analog IN 2/V", "<f4"), # Probably column 18 is Analog IN 3/V, if anyone hits this error in the future
|
17: ("Analog IN 2/V", "<f4"), # Probably column 18 is Analog IN 3/V, if anyone hits this error in the future # noqa: E501
|
||||||
19: ("control/V", "<f4"),
|
19: ("control/V", "<f4"),
|
||||||
20: ("control/mA", "<f4"),
|
20: ("control/mA", "<f4"),
|
||||||
23: ("dQ/mA.h", "<f8"), # Same as 7?
|
23: ("dQ/mA.h", "<f8"), # Same as 7?
|
||||||
|
|||||||
@@ -571,13 +571,25 @@ def mdb_get_version(filename):
|
|||||||
return version_tuple
|
return version_tuple
|
||||||
|
|
||||||
|
|
||||||
def convert_arbin_to_sqlite(input_file, output_file):
|
def convert_arbin_to_sqlite(input_file, output_file=None):
|
||||||
"""Read data from an Arbin .res data file and write to a sqlite file.
|
"""Read data from an Arbin .res data file and write to a sqlite file.
|
||||||
|
|
||||||
Any data currently in the sqlite file will be erased!
|
Any data currently in an sqlite file at `output_file` will be erased!
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
input_file (str): The path to the Arbin .res file to read from.
|
||||||
|
output_file (str or None): The path to the sqlite file to write to; if None,
|
||||||
|
return a `sqlite3.Connection` into an in-memory database.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None or sqlite3.Connection
|
||||||
|
|
||||||
"""
|
"""
|
||||||
arbin_version = mdb_get_version(input_file)
|
arbin_version = mdb_get_version(input_file)
|
||||||
|
|
||||||
|
if output_file is None:
|
||||||
|
output_file = ":memory:"
|
||||||
|
|
||||||
s3db = sqlite3.connect(output_file)
|
s3db = sqlite3.connect(output_file)
|
||||||
|
|
||||||
tables_to_convert = copy(mdb_tables)
|
tables_to_convert = copy(mdb_tables)
|
||||||
@@ -602,6 +614,11 @@ def convert_arbin_to_sqlite(input_file, output_file):
|
|||||||
print("Vacuuming database...")
|
print("Vacuuming database...")
|
||||||
s3db.executescript("VACUUM; ANALYZE;")
|
s3db.executescript("VACUUM; ANALYZE;")
|
||||||
|
|
||||||
|
if output_file == ":memory:":
|
||||||
|
return s3db
|
||||||
|
|
||||||
|
s3db.close()
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -12,7 +12,7 @@ with open(os.path.join(os.path.dirname(__file__), "README.md")) as f:
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="galvani",
|
name="galvani",
|
||||||
version="0.3.0",
|
version="0.4.1",
|
||||||
description="Open and process battery charger log data files",
|
description="Open and process battery charger log data files",
|
||||||
long_description=readme,
|
long_description=readme,
|
||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
|
|||||||
@@ -53,6 +53,16 @@ def test_convert_Arbin_to_sqlite_function(testdata_dir, tmpdir, basename):
|
|||||||
csr.fetchone()
|
csr.fetchone()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("basename", ["arbin1", "UM34_Test005E"])
|
||||||
|
def test_convert_Arbin_to_sqlite_function_in_memory(testdata_dir, tmpdir, basename):
|
||||||
|
"""Convert an Arbin file to an in-memory SQLite database."""
|
||||||
|
res_file = os.path.join(testdata_dir, basename + ".res")
|
||||||
|
conn = None
|
||||||
|
with res2sqlite.convert_arbin_to_sqlite(res_file) as conn:
|
||||||
|
csr = conn.execute("SELECT * FROM Channel_Normal_Table;")
|
||||||
|
csr.fetchone()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
not have_mdbtools, reason="Reading the Arbin file requires MDBTools"
|
not have_mdbtools, reason="Reading the Arbin file requires MDBTools"
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user