]> Git Repo - qemu.git/commitdiff
iotests: Use // for Python integer division
authorMax Reitz <[email protected]>
Mon, 22 Oct 2018 13:53:02 +0000 (14:53 +0100)
committerEduardo Habkost <[email protected]>
Wed, 31 Oct 2018 00:11:52 +0000 (21:11 -0300)
In Python 3, / is always a floating-point division.  We usually do not
want this, and as Python 2.7 understands // as well, change all integer
divisions to use that.

Signed-off-by: Max Reitz <[email protected]>
Reviewed-by: Eduardo Habkost <[email protected]>
Reviewed-by: Cleber Rosa <[email protected]>
Message-Id: <20181022135307[email protected]>
Signed-off-by: Eduardo Habkost <[email protected]>
tests/qemu-iotests/030
tests/qemu-iotests/040
tests/qemu-iotests/041
tests/qemu-iotests/044
tests/qemu-iotests/093
tests/qemu-iotests/136
tests/qemu-iotests/149
tests/qemu-iotests/151
tests/qemu-iotests/163
tests/qemu-iotests/iotests.py
tests/qemu-iotests/qed.py

index 1dbc2ddc494b1dc30041d60e1c4932a1f261ff7d..276e06b5ba3ed300402bf1291f0247537f1b1aa4 100755 (executable)
@@ -521,7 +521,7 @@ new_state = "2"
 state = "2"
 event = "%s"
 new_state = "1"
-''' % (event, errno, self.STREAM_BUFFER_SIZE / 512, event, event))
+''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event))
         file.close()
 
 class TestEIO(TestErrors):
index 1cb1ceeb33492680c913e52da3c0585a9b22a020..b81133a474392a2521f1ea58381436b65ae7c232 100755 (executable)
@@ -195,7 +195,7 @@ class TestSingleDrive(ImageCommitTestCase):
 
         self.assert_no_active_block_jobs()
         result = self.vm.qmp('block-commit', device='drive0', top=mid_img,
-                             base=backing_img, speed=(self.image_len / 4))
+                             base=backing_img, speed=(self.image_len // 4))
         self.assert_qmp(result, 'return', {})
         result = self.vm.qmp('device_del', id='scsi0')
         self.assert_qmp(result, 'return', {})
@@ -225,7 +225,7 @@ class TestSingleDrive(ImageCommitTestCase):
 
         self.assert_no_active_block_jobs()
         result = self.vm.qmp('block-commit', device='drive0', top=mid_img,
-                             base=backing_img, speed=(self.image_len / 4))
+                             base=backing_img, speed=(self.image_len // 4))
         self.assert_qmp(result, 'return', {})
 
         result = self.vm.qmp('query-block')
index 9336ab6ff50d145ec5df372867dbf082f62dd5a0..3615011d98041d7f8cc1a34e2576b1af31fd5ba8 100755 (executable)
@@ -404,7 +404,7 @@ new_state = "2"
 state = "2"
 event = "%s"
 new_state = "1"
-''' % (event, errno, self.MIRROR_GRANULARITY / 512, event, event))
+''' % (event, errno, self.MIRROR_GRANULARITY // 512, event, event))
         file.close()
 
     def setUp(self):
@@ -569,7 +569,7 @@ new_state = "2"
 state = "2"
 event = "%s"
 new_state = "1"
-''' % (event, errno, self.MIRROR_GRANULARITY / 512, event, event))
+''' % (event, errno, self.MIRROR_GRANULARITY // 512, event, event))
         file.close()
 
     def setUp(self):
index 69e736f68704eb205f1994c6f7ae34b484035c7f..7ef5e46fe9f0fd187799a0f300ce1f3ab0cb94d1 100755 (executable)
@@ -86,7 +86,7 @@ class TestRefcountTableGrowth(iotests.QMPTestCase):
                 off = off + 1024 * 512
 
             table = b''.join(struct.pack('>Q', (1 << 63) | off + 512 * j)
-                for j in xrange(0, remaining / 512))
+                for j in xrange(0, remaining // 512))
             fd.write(table)
 
 
index 9d1971a56cb61922f3908b6b483ca9072f855e1d..d88fbc182ec986846b0fd731215cd0f76118a04b 100755 (executable)
@@ -69,18 +69,18 @@ class ThrottleTestCase(iotests.QMPTestCase):
         # in. The throttled requests won't be executed until we
         # advance the virtual clock.
         rq_size = 512
-        rd_nr = max(params['bps'] / rq_size / 2,
-                    params['bps_rd'] / rq_size,
-                    params['iops'] / 2,
+        rd_nr = max(params['bps'] // rq_size // 2,
+                    params['bps_rd'] // rq_size,
+                    params['iops'] // 2,
                     params['iops_rd'])
         rd_nr *= seconds * 2
-        rd_nr /= ndrives
-        wr_nr = max(params['bps'] / rq_size / 2,
-                    params['bps_wr'] / rq_size,
-                    params['iops'] / 2,
+        rd_nr //= ndrives
+        wr_nr = max(params['bps'] // rq_size // 2,
+                    params['bps_wr'] // rq_size,
+                    params['iops'] // 2,
                     params['iops_wr'])
         wr_nr *= seconds * 2
-        wr_nr /= ndrives
+        wr_nr //= ndrives
 
         # Send I/O requests to all drives
         for i in range(rd_nr):
@@ -196,7 +196,7 @@ class ThrottleTestCase(iotests.QMPTestCase):
             self.configure_throttle(ndrives, settings)
 
             # Wait for the bucket to empty so we can do bursts
-            wait_ns = nsec_per_sec * burst_length * burst_rate / rate
+            wait_ns = nsec_per_sec * burst_length * burst_rate // rate
             self.vm.qtest("clock_step %d" % wait_ns)
 
             # Test I/O at the max burst rate
index a154d8ef9d66bc0e6a9cc0179cbad9e90200e11b..af7ffa45408b53a7b6bf0583963dfda1bc692977 100755 (executable)
@@ -24,7 +24,7 @@ import os
 
 interval_length = 10
 nsec_per_sec = 1000000000
-op_latency = nsec_per_sec / 1000 # See qtest_latency_ns in accounting.c
+op_latency = nsec_per_sec // 1000 # See qtest_latency_ns in accounting.c
 bad_sector = 8192
 bad_offset = bad_sector * 512
 blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf')
index 1225334cb83ba3f3240ca2e765f1d453a79e6dad..4f363f295f83103edbed153b9586c2560c5b3c56 100755 (executable)
@@ -314,13 +314,13 @@ def test_once(config, qemu_img=False):
     image_size = 4 * oneTB
     if qemu_img:
         iotests.log("# Create image")
-        qemu_img_create(config, image_size / oneMB)
+        qemu_img_create(config, image_size // oneMB)
     else:
         iotests.log("# Create image")
-        create_image(config, image_size / oneMB)
+        create_image(config, image_size // oneMB)
 
     lowOffsetMB = 100
-    highOffsetMB = 3 * oneTB / oneMB
+    highOffsetMB = 3 * oneTB // oneMB
 
     try:
         if not qemu_img:
index fe53b9f446fae26fd1b99043835898d7fb12f2a4..1bb74d67c429c163687812529ceb288770c7e68a 100755 (executable)
@@ -67,9 +67,9 @@ class TestActiveMirror(iotests.QMPTestCase):
                             'write -P 1 0 %i' % self.image_len);
 
         # Start some background requests
-        for offset in range(1 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024):
+        for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
             self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset)
-        for offset in range(2 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024):
+        for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024):
             self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
 
         # Start the block job
@@ -83,9 +83,9 @@ class TestActiveMirror(iotests.QMPTestCase):
         self.assert_qmp(result, 'return', {})
 
         # Start some more requests
-        for offset in range(3 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024):
+        for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
             self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
-        for offset in range(4 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024):
+        for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024):
             self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
 
         # Wait for the READY event
@@ -95,9 +95,9 @@ class TestActiveMirror(iotests.QMPTestCase):
         # the source) should be settled using the active mechanism.
         # The mirror code itself asserts that the source BDS's dirty
         # bitmap will stay clean between READY and COMPLETED.
-        for offset in range(5 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024):
+        for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
             self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset)
-        for offset in range(6 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024):
+        for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024):
             self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset)
 
         if sync_source_and_target:
index 403842354e5cd9845de9c525dd11ded80e7b0ca7..5fd424761b733a4c33a9f9044c30f131c9d2ec38 100755 (executable)
@@ -38,7 +38,7 @@ class ShrinkBaseClass(iotests.QMPTestCase):
         entry_bits = 3
         entry_size = 1 << entry_bits
         l1_mask = 0x00fffffffffffe00
-        div_roundup = lambda n, d: (n + d - 1) / d
+        div_roundup = lambda n, d: (n + d - 1) // d
 
         def split_by_n(data, n):
             for x in xrange(0, len(data), n):
index 7290c0b15930df7b3ce0ac7b3b487d0aac2d060a..7ca94e927898b8fa198daa3a4508eb13827a7585 100644 (file)
@@ -199,7 +199,7 @@ def create_image(name, size):
     file = open(name, 'wb')
     i = 0
     while i < size:
-        sector = struct.pack('>l504xl', i / 512, i / 512)
+        sector = struct.pack('>l504xl', i // 512, i // 512)
         file.write(sector)
         i = i + 512
     file.close()
index ea469b9c48146bf5c6cb9eb7a06818ff520ad00a..8adaaf46c4ace924e6584cfaef19a13b3b593f22 100755 (executable)
@@ -80,7 +80,7 @@ class QED(object):
 
     def load_l1_table(self):
         self.l1_table = self.read_table(self.header['l1_table_offset'])
-        self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
+        self.table_nelems = self.header['table_size'] * self.header['cluster_size'] // table_elem_size
 
     def write_table(self, offset, table):
         s = ''.join(pack_table_elem(x) for x in table)
@@ -167,14 +167,14 @@ def cmd_zero_cluster(qed, pos, *args):
         n = int(args[0])
 
     for i in xrange(n):
-        l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
+        l1_index = pos // qed.header['cluster_size'] // len(qed.l1_table)
         if qed.l1_table[l1_index] == 0:
             err('no l2 table allocated')
 
         l2_offset = qed.l1_table[l1_index]
         l2_table = qed.read_table(l2_offset)
 
-        l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
+        l2_index = (pos // qed.header['cluster_size']) % len(qed.l1_table)
         l2_table[l2_index] = 1 # zero the data cluster
         qed.write_table(l2_offset, l2_table)
         pos += qed.header['cluster_size']
This page took 0.048881 seconds and 4 git commands to generate.