clean_passes = clean_up(passes)
clean_fails = clean_up(fails)
- conn.cursor().execute(
- """
- with
- new_passes as (
- select coalesce(range_agg(range), int8multirange()) as new_passes
- from unnest(%(pass_ranges)s::int8range[]) as range
- ),
- new_fails as (
- select coalesce(range_agg(range), int8multirange()) as new_fails
- from unnest(%(fail_ranges)s::int8range[]) as range
- )
- update diskjumble.disk_maps
- set
- verified_map = coalesce(verified_map, int8multirange()) + new_passes - new_fails,
- written_map = written_map - new_fails
- from new_passes, new_fails
- where disk_id = %(disk_id)s
- """,
- {
- "pass_ranges": [NumericRange(r.start, r.stop) for r in clean_passes],
- "fail_ranges": [NumericRange(r.start, r.stop) for r in clean_fails],
- "disk_id": disk_id,
- },
- )
+ with conn:
+ conn.cursor().execute(
+ """
+ with
+ new_passes as (
+ select coalesce(range_agg(range), int8multirange()) as new_passes
+ from unnest(%(pass_ranges)s::int8range[]) as range
+ ),
+ new_fails as (
+ select coalesce(range_agg(range), int8multirange()) as new_fails
+ from unnest(%(fail_ranges)s::int8range[]) as range
+ )
+ update diskjumble.disk_maps
+ set
+ verified_map = coalesce(verified_map, int8multirange()) + new_passes - new_fails,
+ written_map = written_map - new_fails
+ from new_passes, new_fails
+ where disk_id = %(disk_id)s
+ """,
+ {
+ "pass_ranges": [NumericRange(r.start, r.stop) for r in clean_passes],
+ "fail_ranges": [NumericRange(r.start, r.stop) for r in clean_fails],
+ "disk_id": disk_id,
+ },
+ )
if __name__ == "__main__":
path = f"/dev/mapper/diskjumble-{args.disk_id}"
with contextlib.closing(psycopg2.connect("")) as conn:
- conn.autocommit = True
target_ranges = _get_target_ranges(conn, args.disk_id, args.block_limit)
with open(path, "rb", buffering = _READ_BUFFER_SIZE) as disk_file:
_do_verify(conn, args.disk_id, target_ranges, disk_file, _READ_BUFFER_SIZE, args.read_tries)