_BUF_SIZE = 16 * 1024 ** 2 # in bytes
-def _random_file(size: int, rand_src: Random) -> tempfile.NamedTemporaryFile:
- f = tempfile.NamedTemporaryFile(buffering = _BUF_SIZE)
+def _random_file(size: int, rand_src: Random, on_disk: bool) -> tempfile.NamedTemporaryFile:
+ f = tempfile.NamedTemporaryFile(buffering = _BUF_SIZE) if on_disk else io.BytesIO()
try:
while f.tell() < size:
write_size = min(size - f.tell(), _BUF_SIZE)
torrent_len = 3 * piece_size
disk = self._write_disk(sector_size, torrent_len // sector_size)
- with _random_file(torrent_len, Random(0)) as torrent_file:
+ with _random_file(torrent_len, Random(0), on_disk = False) as torrent_file:
torrent = _Torrent(torrent_file, piece_size)
torrent_file.seek(0)
self._write_torrent(torrent)
(disk.id, NumericRange(0, disk.sector_count), torrent.info_hash)
)
- do_verify(self._conn, disk.id, torrent_file, read_size, 1)
+ do_verify(self._conn, disk.id, torrent_file, read_size, read_tries = 1)
cursor.execute("select * from diskjumble.verify_pass;")
self.assertEqual(cursor.rowcount, 1)
def test_basic_fresh_verify_large_read_size(self):
self._basic_fresh_verify_helper(128)
+ def test_resume_fragmentation_unaligned_end(self):
+ """
+ Test a run where a cached hash state is used, a piece is split on disk, and the end of the torrent isn't
+ sector-aligned.
+ """
+ read_size = 8
+ piece_size = 64
+
+ other_disk = self._write_disk(16, 1)
+ disk = self._write_disk(32, 3)
+ with _random_file(piece_size, Random(0), on_disk = False) as torrent_file:
+ torrent = _Torrent(torrent_file, piece_size)
+ torrent_file.seek(0)
+ self._write_torrent(torrent)
+ with self._conn.cursor() as cursor:
+ cursor.executemany(
+ "insert into diskjumble.slab values (default, %s, %s, %s, %s, null)",
+ [
+ (other_disk.id, NumericRange(0, 1), torrent.info_hash, 0),
+ (disk.id, NumericRange(0, 1), torrent.info_hash, other_disk.sector_size),
+ (disk.id, NumericRange(2, 3), torrent.info_hash, other_disk.sector_size + disk.sector_size),
+ ]
+ )
+
+ # Prepare the saved hasher state by running a verify
+ do_verify(self._conn, other_disk.id, torrent_file, read_size, read_tries = 1)
+ torrent_file.seek(0)
+
+ cursor.execute("select count(*) from diskjumble.verify_piece_incomplete;")
+ [(row_count,)] = cursor.fetchall()
+ self.assertEqual(row_count, 1)
+
+ disk_file = io.BytesIO()
+ torrent_file.seek(other_disk.sector_size)
+ disk_file.write(torrent_file.read(disk.sector_size))
+ disk_file.seek(disk_file.tell() + disk.sector_size)
+ disk_file.write(torrent_file.read())
+ disk_file.seek(0)
+ do_verify(self._conn, disk.id, disk_file, read_size, read_tries = 1)
+
+ # Check that there are no verify pieces in the database. Because of integrity constraints, this also
+ # guarantees there aren't any stray saved hasher states, failed verifies, or piece contents.
+ cursor.execute("select count(*) from diskjumble.verify_piece;")
+ [(row_count,)] = cursor.fetchall()
+ self.assertEqual(row_count, 0)
+
+ cursor.execute("select disk_id, disk_sectors from diskjumble.verify_pass;")
+ self.assertEqual(
+ cursor.fetchall(),
+ [(other_disk.id, NumericRange(0, 1)), (disk.id, NumericRange(0, 1)), (disk.id, NumericRange(2, 3))]
+ )
+
+ # TODO ignore useless hasher state
+
+ # TODO read errors
+
def _write_torrent(self, torrent: "_Torrent") -> None:
with self._conn.cursor() as cursor:
cursor.execute("insert into bittorrent.torrent_info values (%s);", (torrent.info,))