From dcba1b3e45f2eaaf3125ce769dfb51b5ca0746a4 Mon Sep 17 00:00:00 2001 From: Jakob Cornell Date: Wed, 20 Apr 2022 10:31:05 -0500 Subject: [PATCH] Fix type error in verify worklist generation --- disk_jumble/src/disk_jumble/verify.py | 48 ++++++++++++--------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/disk_jumble/src/disk_jumble/verify.py b/disk_jumble/src/disk_jumble/verify.py index 93d5ba2..8a36e44 100644 --- a/disk_jumble/src/disk_jumble/verify.py +++ b/disk_jumble/src/disk_jumble/verify.py @@ -64,7 +64,7 @@ def _run_sort_key(run: _Run): return run.block_ranges[0].start -def _get_target_ranges(conn, limit: Optional[int]) -> List[NumericRange]: +def _get_target_ranges(conn, disk_id: int, limit: Optional[int]) -> List[range]: ranges = [] block_count = 0 with conn.cursor() as cursor: @@ -75,16 +75,17 @@ def _get_target_ranges(conn, limit: Optional[int]) -> List[NumericRange]: where disk_id = %s and written_map is not null order by range """, - (args.disk_id,), + (disk_id,), ) for (r,) in cursor: if limit is not None and block_count + (r.upper - r.lower) > limit: capped_size = limit - block_count if capped_size: - ranges.append(NumericRange(r.lower, r.lower + capped_size)) + ranges.append(range(r.lower, r.lower + capped_size)) break else: - ranges.append(r) + ranges.append(range(r.lower, r.upper)) + block_count += r.upper - r.lower return ranges @@ -278,33 +279,28 @@ def _get_v2_worklist(conn, disk_id: int, target_ranges: List[NumericRange]) -> L ] -def _do_verify(conn, disk_id: int, target_ranges: Optional[List[range]], disk_file: io.BufferedIOBase, read_size: int, read_tries: int): - if target_ranges is None: - pg_target_ranges = [NumericRange()] - else: - pg_target_ranges = [NumericRange(r.start, r.stop) for r in target_ranges] - +def _do_verify(conn, disk_id: int, target_ranges: List[range], disk_file: io.BufferedIOBase, read_size: int, read_tries: int): + pg_target_ranges = [NumericRange(r.start, r.stop) for r in target_ranges] worklist = list(heapq.merge( _get_v1_worklist(conn, disk_id, pg_target_ranges), _get_v2_worklist(conn, disk_id, pg_target_ranges), key = _run_sort_key, )) - if target_ranges is not None: - requested_blocks = { - block - for r in target_ranges - for block in r - } - covered_blocks = { - block - for run in worklist - for block_range in run.block_ranges - for block in block_range - } - missing = requested_blocks - covered_blocks - if missing: - raise RuntimeError(f"unable to locate blocks: {len(missing)} in the range {min(missing)} to {max(missing)}") + requested_blocks = { + block + for r in target_ranges + for block in r + } + covered_blocks = { + block + for run in worklist + for block_range in run.block_ranges + for block in block_range + } + missing = requested_blocks - covered_blocks + if missing: + raise RuntimeError(f"unable to locate blocks: {len(missing)} in the range {min(missing)} to {max(missing)}") passes = [] fails = [] @@ -416,6 +412,6 @@ if __name__ == "__main__": path = f"/dev/mapper/diskjumble-{args.disk_id}" with contextlib.closing(psycopg2.connect("")) as conn: conn.autocommit = True - target_ranges = _get_target_ranges(conn, args.block_limit) + target_ranges = _get_target_ranges(conn, args.disk_id, args.block_limit) with open(path, "rb", buffering = _READ_BUFFER_SIZE) as disk_file: _do_verify(conn, args.disk_id, target_ranges, disk_file, _READ_BUFFER_SIZE, args.read_tries) -- 2.30.2