state = "2"
event = "%s"
new_state = "1"
-''' % (event, errno, self.STREAM_BUFFER_SIZE / 512, event, event))
+''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
file.close()
class TestEIO(TestErrors):
self.assert_no_active_block_jobs()
result = self.vm.qmp('block-commit', device='drive0', top=mid_img,
- base=backing_img, speed=(self.image_len / 4))
+ base=backing_img, speed=(self.image_len // 4))
self.assert_qmp(result, 'return', {})
result = self.vm.qmp('device_del', id='scsi0')
self.assert_qmp(result, 'return', {})
self.assert_no_active_block_jobs()
result = self.vm.qmp('block-commit', device='drive0', top=mid_img,
- base=backing_img, speed=(self.image_len / 4))
+ base=backing_img, speed=(self.image_len // 4))
self.assert_qmp(result, 'return', {})
result = self.vm.qmp('query-block')
state = "2"
event = "%s"
new_state = "1"
-''' % (event, errno, self.MIRROR_GRANULARITY / 512, event, event))
+''' % (event, errno, self.MIRROR_GRANULARITY // 512, event, event))
file.close()
def setUp(self):
state = "2"
event = "%s"
new_state = "1"
-''' % (event, errno, self.MIRROR_GRANULARITY / 512, event, event))
+''' % (event, errno, self.MIRROR_GRANULARITY // 512, event, event))
file.close()
def setUp(self):
off = off + 1024 * 512
table = b''.join(struct.pack('>Q', (1 << 63) | off + 512 * j)
- for j in xrange(0, remaining / 512))
+ for j in xrange(0, remaining // 512))
fd.write(table)
# in. The throttled requests won't be executed until we
# advance the virtual clock.
rq_size = 512
- rd_nr = max(params['bps'] / rq_size / 2,
- params['bps_rd'] / rq_size,
- params['iops'] / 2,
+ rd_nr = max(params['bps'] // rq_size // 2,
+ params['bps_rd'] // rq_size,
+ params['iops'] // 2,
params['iops_rd'])
rd_nr *= seconds * 2
- rd_nr /= ndrives
- wr_nr = max(params['bps'] / rq_size / 2,
- params['bps_wr'] / rq_size,
- params['iops'] / 2,
+ rd_nr //= ndrives
+ wr_nr = max(params['bps'] // rq_size // 2,
+ params['bps_wr'] // rq_size,
+ params['iops'] // 2,
params['iops_wr'])
wr_nr *= seconds * 2
- wr_nr /= ndrives
+ wr_nr //= ndrives
# Send I/O requests to all drives
for i in range(rd_nr):
self.configure_throttle(ndrives, settings)
# Wait for the bucket to empty so we can do bursts
- wait_ns = nsec_per_sec * burst_length * burst_rate / rate
+ wait_ns = nsec_per_sec * burst_length * burst_rate // rate
self.vm.qtest("clock_step %d" % wait_ns)
# Test I/O at the max burst rate
interval_length = 10
nsec_per_sec = 1000000000
-op_latency = nsec_per_sec / 1000 # See qtest_latency_ns in accounting.c
+op_latency = nsec_per_sec // 1000 # See qtest_latency_ns in accounting.c
bad_sector = 8192
bad_offset = bad_sector * 512
blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf')
image_size = 4 * oneTB
if qemu_img:
iotests.log("# Create image")
- qemu_img_create(config, image_size / oneMB)
+ qemu_img_create(config, image_size // oneMB)
else:
iotests.log("# Create image")
- create_image(config, image_size / oneMB)
+ create_image(config, image_size // oneMB)
lowOffsetMB = 100
- highOffsetMB = 3 * oneTB / oneMB
+ highOffsetMB = 3 * oneTB // oneMB
try:
if not qemu_img:
'write -P 1 0 %i' % self.image_len);
# Start some background requests
- for offset in range(1 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024):
+ for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset)
- for offset in range(2 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024):
+ for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
# Start the block job
self.assert_qmp(result, 'return', {})
# Start some more requests
- for offset in range(3 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024):
+ for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
- for offset in range(4 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024):
+ for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
# Wait for the READY event
# the source) should be settled using the active mechanism.
# The mirror code itself asserts that the source BDS's dirty
# bitmap will stay clean between READY and COMPLETED.
- for offset in range(5 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024):
+ for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
- for offset in range(6 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024):
+ for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
if sync_source_and_target:
entry_bits = 3
entry_size = 1 << entry_bits
l1_mask = 0x00fffffffffffe00
- div_roundup = lambda n, d: (n + d - 1) / d
+ div_roundup = lambda n, d: (n + d - 1) // d
def split_by_n(data, n):
for x in xrange(0, len(data), n):
file = open(name, 'wb')
i = 0
while i < size:
- sector = struct.pack('>l504xl', i / 512, i / 512)
+ sector = struct.pack('>l504xl', i // 512, i // 512)
file.write(sector)
i = i + 512
file.close()
def load_l1_table(self):
self.l1_table = self.read_table(self.header['l1_table_offset'])
- self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
+ self.table_nelems = self.header['table_size'] * self.header['cluster_size'] // table_elem_size
def write_table(self, offset, table):
s = ''.join(pack_table_elem(x) for x in table)
n = int(args[0])
for i in xrange(n):
- l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
+ l1_index = pos // qed.header['cluster_size'] // len(qed.l1_table)
if qed.l1_table[l1_index] == 0:
err('no l2 table allocated')
l2_offset = qed.l1_table[l1_index]
l2_table = qed.read_table(l2_offset)
- l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
+ l2_index = (pos // qed.header['cluster_size']) % len(qed.l1_table)
l2_table[l2_index] = 1 # zero the data cluster
qed.write_table(l2_offset, l2_table)
pos += qed.header['cluster_size']