From d35192bb1134c4a7e6474de02b21f94f24545aa0 Mon Sep 17 00:00:00 2001 From: Tom Pollard Date: Tue, 3 Feb 2026 11:57:10 -0500 Subject: [PATCH] Fix style violations flagged by formatter Remove extra blank lines, unnecessary parentheses in tuple unpacking, and reformat multi-line exception strings --- tests/__init__.py | 1 - wfdb/io/_coreio.py | 1 - wfdb/io/_url.py | 5 ++--- wfdb/io/annotation.py | 12 +++++++----- wfdb/io/convert/csv.py | 12 ++++-------- wfdb/io/download.py | 1 - wfdb/io/record.py | 1 - 7 files changed, 13 insertions(+), 20 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index b280897f..1c804129 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,6 +1,5 @@ import numpy as np - _np_error_state = {} diff --git a/wfdb/io/_coreio.py b/wfdb/io/_coreio.py index d5a1d1b6..2835213b 100644 --- a/wfdb/io/_coreio.py +++ b/wfdb/io/_coreio.py @@ -5,7 +5,6 @@ from wfdb.io import _url from wfdb.io.download import config - # Cloud protocols CLOUD_PROTOCOLS = ["az://", "azureml://", "s3://", "gs://"] diff --git a/wfdb/io/_url.py b/wfdb/io/_url.py index 5200c38c..a5b3c5d8 100644 --- a/wfdb/io/_url.py +++ b/wfdb/io/_url.py @@ -9,7 +9,6 @@ from wfdb.version import __version__ - # Value for 'buffering' indicating that the entire file should be # buffered at once. BUFFER_WHOLE_FILE = -2 @@ -486,7 +485,7 @@ def _read_range(self, start, end): if buffer_store: # Load data into buffer and then return a copy to the # caller. - (start, data) = xfer.content() + start, data = xfer.content() self._buffer = data self._buffer_start = start self._buffer_end = start + len(data) @@ -767,7 +766,7 @@ def openurl( (io.BufferedIOBase) or text file API (io.TextIOBase). """ - (scheme, netloc, path, _, _, _) = urllib.parse.urlparse(url) + scheme, netloc, path, _, _, _ = urllib.parse.urlparse(url) if scheme == "": raise NetFileError("no scheme specified for URL: %r" % (url,), url=url) diff --git a/wfdb/io/annotation.py b/wfdb/io/annotation.py index c2fe8d0d..1cbadf76 100644 --- a/wfdb/io/annotation.py +++ b/wfdb/io/annotation.py @@ -1469,7 +1469,9 @@ def convert_label_attribute( return label_map = self.create_label_map(inplace=False) - label_map.set_index(keys=pd.Index(label_map[source_field].values), inplace=True) + label_map.set_index( + keys=pd.Index(label_map[source_field].values), inplace=True + ) try: target_item = label_map.loc[ @@ -1959,7 +1961,7 @@ def rdann( filebytes = load_byte_pairs(record_name, extension, pn_dir) # Get WFDB annotation fields from the file bytes - (sample, label_store, subtype, chan, num, aux_note) = proc_ann_bytes( + sample, label_store, subtype, chan, num, aux_note = proc_ann_bytes( filebytes, sampto ) @@ -1970,17 +1972,17 @@ def rdann( ) # Try to extract information describing the annotation file - (fs, custom_labels) = interpret_defintion_annotations( + fs, custom_labels = interpret_defintion_annotations( potential_definition_inds, aux_note ) # Remove annotations that do not store actual sample and label information - (sample, label_store, subtype, chan, num, aux_note) = rm_empty_indices( + sample, label_store, subtype, chan, num, aux_note = rm_empty_indices( rm_inds, sample, label_store, subtype, chan, num, aux_note ) # Convert lists to numpy arrays dtype='int' - (label_store, subtype, chan, num) = lists_to_int_arrays( + label_store, subtype, chan, num = lists_to_int_arrays( label_store, subtype, chan, num ) diff --git a/wfdb/io/convert/csv.py b/wfdb/io/convert/csv.py index 4817a0e5..e4fa8b68 100644 --- a/wfdb/io/convert/csv.py +++ b/wfdb/io/convert/csv.py @@ -625,20 +625,16 @@ def csv2ann( df_CSV.columns = ["onset", "duration", "description"] df_out = format_ann_from_df(df_CSV) else: - raise Exception( - """The number of columns in the CSV was not - recognized.""" - ) + raise Exception("""The number of columns in the CSV was not + recognized.""") # Remove extension from input file name file_name = file_name.split(".")[0] if time_onset: if not fs: - raise Exception( - """`fs` must be provided if `time_onset` is True + raise Exception("""`fs` must be provided if `time_onset` is True since it is required to convert time onsets to - samples""" - ) + samples""") sample = (df_out["onset"].to_numpy() * fs).astype(np.int64) else: sample = df_out["onset"].to_numpy() diff --git a/wfdb/io/download.py b/wfdb/io/download.py index 338d8b97..9681b873 100644 --- a/wfdb/io/download.py +++ b/wfdb/io/download.py @@ -7,7 +7,6 @@ from wfdb.io import _url - # The PhysioNet index url PN_INDEX_URL = "https://physionet.org/files/" PN_CONTENT_URL = "https://physionet.org/content/" diff --git a/wfdb/io/record.py b/wfdb/io/record.py index 6ae5190e..1743b305 100644 --- a/wfdb/io/record.py +++ b/wfdb/io/record.py @@ -16,7 +16,6 @@ from wfdb.io import util from wfdb.io._coreio import CLOUD_PROTOCOLS - # -------------- WFDB Signal Calibration and Classification ---------- #