return run.block_ranges[0].start
-def _get_target_ranges(conn, limit: Optional[int]) -> List[NumericRange]:
+def _get_target_ranges(conn, disk_id: int, limit: Optional[int]) -> List[range]:
ranges = []
block_count = 0
with conn.cursor() as cursor:
where disk_id = %s and written_map is not null
order by range
""",
- (args.disk_id,),
+ (disk_id,),
)
for (r,) in cursor:
if limit is not None and block_count + (r.upper - r.lower) > limit:
capped_size = limit - block_count
if capped_size:
- ranges.append(NumericRange(r.lower, r.lower + capped_size))
+ ranges.append(range(r.lower, r.lower + capped_size))
break
else:
- ranges.append(r)
+ ranges.append(range(r.lower, r.upper))
+ block_count += r.upper - r.lower
return ranges
]
-def _do_verify(conn, disk_id: int, target_ranges: Optional[List[range]], disk_file: io.BufferedIOBase, read_size: int, read_tries: int):
- if target_ranges is None:
- pg_target_ranges = [NumericRange()]
- else:
- pg_target_ranges = [NumericRange(r.start, r.stop) for r in target_ranges]
-
+def _do_verify(conn, disk_id: int, target_ranges: List[range], disk_file: io.BufferedIOBase, read_size: int, read_tries: int):
+ pg_target_ranges = [NumericRange(r.start, r.stop) for r in target_ranges]
worklist = list(heapq.merge(
_get_v1_worklist(conn, disk_id, pg_target_ranges),
_get_v2_worklist(conn, disk_id, pg_target_ranges),
key = _run_sort_key,
))
- if target_ranges is not None:
- requested_blocks = {
- block
- for r in target_ranges
- for block in r
- }
- covered_blocks = {
- block
- for run in worklist
- for block_range in run.block_ranges
- for block in block_range
- }
- missing = requested_blocks - covered_blocks
- if missing:
- raise RuntimeError(f"unable to locate blocks: {len(missing)} in the range {min(missing)} to {max(missing)}")
+ requested_blocks = {
+ block
+ for r in target_ranges
+ for block in r
+ }
+ covered_blocks = {
+ block
+ for run in worklist
+ for block_range in run.block_ranges
+ for block in block_range
+ }
+ missing = requested_blocks - covered_blocks
+ if missing:
+ raise RuntimeError(f"unable to locate blocks: {len(missing)} in the range {min(missing)} to {max(missing)}")
passes = []
fails = []
path = f"/dev/mapper/diskjumble-{args.disk_id}"
with contextlib.closing(psycopg2.connect("")) as conn:
conn.autocommit = True
- target_ranges = _get_target_ranges(conn, args.block_limit)
+ target_ranges = _get_target_ranges(conn, args.disk_id, args.block_limit)
with open(path, "rb", buffering = _READ_BUFFER_SIZE) as disk_file:
_do_verify(conn, args.disk_id, target_ranges, disk_file, _READ_BUFFER_SIZE, args.read_tries)