]>
Commit | Line | Data |
---|---|---|
9f7264f5 JS |
1 | #!/usr/bin/env python |
2 | # | |
3 | # Tests for incremental drive-backup | |
4 | # | |
5 | # Copyright (C) 2015 John Snow for Red Hat, Inc. | |
6 | # | |
7 | # Based on 056. | |
8 | # | |
9 | # This program is free software; you can redistribute it and/or modify | |
10 | # it under the terms of the GNU General Public License as published by | |
11 | # the Free Software Foundation; either version 2 of the License, or | |
12 | # (at your option) any later version. | |
13 | # | |
14 | # This program is distributed in the hope that it will be useful, | |
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | # GNU General Public License for more details. | |
18 | # | |
19 | # You should have received a copy of the GNU General Public License | |
20 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | # | |
22 | ||
23 | import os | |
24 | import iotests | |
25 | ||
26 | ||
27 | def io_write_patterns(img, patterns): | |
28 | for pattern in patterns: | |
29 | iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) | |
30 | ||
31 | ||
a3d71595 JS |
32 | def try_remove(img): |
33 | try: | |
34 | os.remove(img) | |
35 | except OSError: | |
36 | pass | |
37 | ||
38 | ||
749ad5e8 JS |
39 | def transaction_action(action, **kwargs): |
40 | return { | |
41 | 'type': action, | |
fc6c796f | 42 | 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems()) |
749ad5e8 JS |
43 | } |
44 | ||
45 | ||
46 | def transaction_bitmap_clear(node, name, **kwargs): | |
47 | return transaction_action('block-dirty-bitmap-clear', | |
48 | node=node, name=name, **kwargs) | |
49 | ||
50 | ||
51 | def transaction_drive_backup(device, target, **kwargs): | |
eed87583 KW |
52 | return transaction_action('drive-backup', job_id=device, device=device, |
53 | target=target, **kwargs) | |
749ad5e8 JS |
54 | |
55 | ||
a3d71595 JS |
56 | class Bitmap: |
57 | def __init__(self, name, drive): | |
58 | self.name = name | |
59 | self.drive = drive | |
60 | self.num = 0 | |
61 | self.backups = list() | |
62 | ||
63 | def base_target(self): | |
64 | return (self.drive['backup'], None) | |
65 | ||
66 | def new_target(self, num=None): | |
67 | if num is None: | |
68 | num = self.num | |
69 | self.num = num + 1 | |
70 | base = os.path.join(iotests.test_dir, | |
71 | "%s.%s." % (self.drive['id'], self.name)) | |
72 | suff = "%i.%s" % (num, self.drive['fmt']) | |
73 | target = base + "inc" + suff | |
74 | reference = base + "ref" + suff | |
75 | self.backups.append((target, reference)) | |
76 | return (target, reference) | |
77 | ||
78 | def last_target(self): | |
79 | if self.backups: | |
80 | return self.backups[-1] | |
81 | return self.base_target() | |
82 | ||
83 | def del_target(self): | |
84 | for image in self.backups.pop(): | |
85 | try_remove(image) | |
86 | self.num -= 1 | |
87 | ||
88 | def cleanup(self): | |
89 | for backup in self.backups: | |
90 | for image in backup: | |
91 | try_remove(image) | |
92 | ||
93 | ||
1b19bb9d JS |
94 | class TestIncrementalBackupBase(iotests.QMPTestCase): |
95 | def __init__(self, *args): | |
96 | super(TestIncrementalBackupBase, self).__init__(*args) | |
9f7264f5 JS |
97 | self.bitmaps = list() |
98 | self.files = list() | |
99 | self.drives = list() | |
100 | self.vm = iotests.VM() | |
101 | self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt) | |
102 | ||
1b19bb9d JS |
103 | |
104 | def setUp(self): | |
9f7264f5 JS |
105 | # Create a base image with a distinctive patterning |
106 | drive0 = self.add_node('drive0') | |
107 | self.img_create(drive0['file'], drive0['fmt']) | |
108 | self.vm.add_drive(drive0['file']) | |
1b19bb9d | 109 | self.write_default_pattern(drive0['file']) |
9f7264f5 JS |
110 | self.vm.launch() |
111 | ||
112 | ||
1b19bb9d JS |
113 | def write_default_pattern(self, target): |
114 | io_write_patterns(target, (('0x41', 0, 512), | |
115 | ('0xd5', '1M', '32k'), | |
116 | ('0xdc', '32M', '124k'))) | |
117 | ||
118 | ||
9f7264f5 JS |
119 | def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None): |
120 | if path is None: | |
121 | path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt)) | |
122 | if backup is None: | |
123 | backup = os.path.join(iotests.test_dir, | |
124 | '%s.full.backup.%s' % (node_id, fmt)) | |
125 | ||
126 | self.drives.append({ | |
127 | 'id': node_id, | |
128 | 'file': path, | |
129 | 'backup': backup, | |
130 | 'fmt': fmt }) | |
131 | return self.drives[-1] | |
132 | ||
133 | ||
134 | def img_create(self, img, fmt=iotests.imgfmt, size='64M', | |
cc199b16 JS |
135 | parent=None, parentFormat=None, **kwargs): |
136 | optargs = [] | |
137 | for k,v in kwargs.iteritems(): | |
138 | optargs = optargs + ['-o', '%s=%s' % (k,v)] | |
139 | args = ['create', '-f', fmt] + optargs + [img, size] | |
9f7264f5 JS |
140 | if parent: |
141 | if parentFormat is None: | |
142 | parentFormat = fmt | |
cc199b16 JS |
143 | args = args + ['-b', parent, '-F', parentFormat] |
144 | iotests.qemu_img(*args) | |
9f7264f5 JS |
145 | self.files.append(img) |
146 | ||
a3d71595 JS |
147 | |
148 | def do_qmp_backup(self, error='Input/output error', **kwargs): | |
149 | res = self.vm.qmp('drive-backup', **kwargs) | |
150 | self.assert_qmp(res, 'return', {}) | |
fc6c796f | 151 | return self.wait_qmp_backup(kwargs['device'], error) |
a3d71595 | 152 | |
fc6c796f | 153 | |
1dac83f1 KW |
154 | def ignore_job_status_change_events(self): |
155 | while True: | |
156 | e = self.vm.event_wait(name="JOB_STATUS_CHANGE") | |
157 | if e['data']['status'] == 'null': | |
158 | break | |
159 | ||
fc6c796f | 160 | def wait_qmp_backup(self, device, error='Input/output error'): |
a3d71595 | 161 | event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", |
fc6c796f | 162 | match={'data': {'device': device}}) |
ff793890 | 163 | self.assertNotEqual(event, None) |
1dac83f1 | 164 | self.ignore_job_status_change_events() |
a3d71595 JS |
165 | |
166 | try: | |
167 | failure = self.dictpath(event, 'data/error') | |
168 | except AssertionError: | |
169 | # Backup succeeded. | |
170 | self.assert_qmp(event, 'data/offset', event['data']['len']) | |
171 | return True | |
172 | else: | |
173 | # Backup failed. | |
174 | self.assert_qmp(event, 'data/error', error) | |
175 | return False | |
176 | ||
177 | ||
fc6c796f JS |
178 | def wait_qmp_backup_cancelled(self, device): |
179 | event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', | |
180 | match={'data': {'device': device}}) | |
181 | self.assertNotEqual(event, None) | |
1dac83f1 | 182 | self.ignore_job_status_change_events() |
fc6c796f JS |
183 | |
184 | ||
a3d71595 JS |
185 | def create_anchor_backup(self, drive=None): |
186 | if drive is None: | |
187 | drive = self.drives[-1] | |
eed87583 KW |
188 | res = self.do_qmp_backup(job_id=drive['id'], |
189 | device=drive['id'], sync='full', | |
a3d71595 JS |
190 | format=drive['fmt'], target=drive['backup']) |
191 | self.assertTrue(res) | |
192 | self.files.append(drive['backup']) | |
193 | return drive['backup'] | |
194 | ||
195 | ||
196 | def make_reference_backup(self, bitmap=None): | |
197 | if bitmap is None: | |
198 | bitmap = self.bitmaps[-1] | |
199 | _, reference = bitmap.last_target() | |
eed87583 KW |
200 | res = self.do_qmp_backup(job_id=bitmap.drive['id'], |
201 | device=bitmap.drive['id'], sync='full', | |
a3d71595 JS |
202 | format=bitmap.drive['fmt'], target=reference) |
203 | self.assertTrue(res) | |
204 | ||
205 | ||
59fc5d84 | 206 | def add_bitmap(self, name, drive, **kwargs): |
a3d71595 JS |
207 | bitmap = Bitmap(name, drive) |
208 | self.bitmaps.append(bitmap) | |
209 | result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'], | |
59fc5d84 | 210 | name=bitmap.name, **kwargs) |
a3d71595 JS |
211 | self.assert_qmp(result, 'return', {}) |
212 | return bitmap | |
213 | ||
214 | ||
215 | def prepare_backup(self, bitmap=None, parent=None): | |
216 | if bitmap is None: | |
217 | bitmap = self.bitmaps[-1] | |
218 | if parent is None: | |
219 | parent, _ = bitmap.last_target() | |
220 | ||
221 | target, _ = bitmap.new_target() | |
222 | self.img_create(target, bitmap.drive['fmt'], parent=parent) | |
223 | return target | |
224 | ||
225 | ||
226 | def create_incremental(self, bitmap=None, parent=None, | |
227 | parentFormat=None, validate=True): | |
228 | if bitmap is None: | |
229 | bitmap = self.bitmaps[-1] | |
230 | if parent is None: | |
231 | parent, _ = bitmap.last_target() | |
232 | ||
233 | target = self.prepare_backup(bitmap, parent) | |
eed87583 KW |
234 | res = self.do_qmp_backup(job_id=bitmap.drive['id'], |
235 | device=bitmap.drive['id'], | |
4b80ab2b | 236 | sync='incremental', bitmap=bitmap.name, |
a3d71595 JS |
237 | format=bitmap.drive['fmt'], target=target, |
238 | mode='existing') | |
239 | if not res: | |
240 | bitmap.del_target(); | |
241 | self.assertFalse(validate) | |
242 | else: | |
243 | self.make_reference_backup(bitmap) | |
244 | return res | |
245 | ||
246 | ||
247 | def check_backups(self): | |
248 | for bitmap in self.bitmaps: | |
249 | for incremental, reference in bitmap.backups: | |
250 | self.assertTrue(iotests.compare_images(incremental, reference)) | |
251 | last = bitmap.last_target()[0] | |
252 | self.assertTrue(iotests.compare_images(last, bitmap.drive['file'])) | |
253 | ||
254 | ||
255 | def hmp_io_writes(self, drive, patterns): | |
256 | for pattern in patterns: | |
257 | self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) | |
258 | self.vm.hmp_qemu_io(drive, 'flush') | |
259 | ||
260 | ||
59fc5d84 | 261 | def do_incremental_simple(self, **kwargs): |
a3d71595 | 262 | self.create_anchor_backup() |
59fc5d84 | 263 | self.add_bitmap('bitmap0', self.drives[0], **kwargs) |
a3d71595 JS |
264 | |
265 | # Sanity: Create a "hollow" incremental backup | |
266 | self.create_incremental() | |
267 | # Three writes: One complete overwrite, one new segment, | |
268 | # and one partial overlap. | |
269 | self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512), | |
270 | ('0xfe', '16M', '256k'), | |
271 | ('0x64', '32736k', '64k'))) | |
272 | self.create_incremental() | |
273 | # Three more writes, one of each kind, like above | |
274 | self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512), | |
275 | ('0x55', '8M', '352k'), | |
276 | ('0x78', '15872k', '1M'))) | |
277 | self.create_incremental() | |
278 | self.vm.shutdown() | |
279 | self.check_backups() | |
280 | ||
281 | ||
1b19bb9d JS |
282 | def tearDown(self): |
283 | self.vm.shutdown() | |
284 | for bitmap in self.bitmaps: | |
285 | bitmap.cleanup() | |
286 | for filename in self.files: | |
287 | try_remove(filename) | |
288 | ||
289 | ||
290 | ||
291 | class TestIncrementalBackup(TestIncrementalBackupBase): | |
59fc5d84 JS |
292 | def test_incremental_simple(self): |
293 | ''' | |
294 | Test: Create and verify three incremental backups. | |
295 | ||
296 | Create a bitmap and a full backup before VM execution begins, | |
297 | then create a series of three incremental backups "during execution," | |
298 | i.e.; after IO requests begin modifying the drive. | |
299 | ''' | |
300 | return self.do_incremental_simple() | |
301 | ||
302 | ||
303 | def test_small_granularity(self): | |
304 | ''' | |
305 | Test: Create and verify backups made with a small granularity bitmap. | |
306 | ||
307 | Perform the same test as test_incremental_simple, but with a granularity | |
308 | of only 32KiB instead of the present default of 64KiB. | |
309 | ''' | |
310 | return self.do_incremental_simple(granularity=32768) | |
311 | ||
312 | ||
313 | def test_large_granularity(self): | |
314 | ''' | |
315 | Test: Create and verify backups made with a large granularity bitmap. | |
316 | ||
317 | Perform the same test as test_incremental_simple, but with a granularity | |
318 | of 128KiB instead of the present default of 64KiB. | |
319 | ''' | |
320 | return self.do_incremental_simple(granularity=131072) | |
321 | ||
322 | ||
cc199b16 JS |
323 | def test_larger_cluster_target(self): |
324 | ''' | |
325 | Test: Create and verify backups made to a larger cluster size target. | |
326 | ||
327 | With a default granularity of 64KiB, verify that backups made to a | |
328 | larger cluster size target of 128KiB without a backing file works. | |
329 | ''' | |
330 | drive0 = self.drives[0] | |
331 | ||
332 | # Create a cluster_size=128k full backup / "anchor" backup | |
333 | self.img_create(drive0['backup'], cluster_size='128k') | |
334 | self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full', | |
335 | format=drive0['fmt'], | |
336 | target=drive0['backup'], | |
337 | mode='existing')) | |
338 | ||
339 | # Create bitmap and dirty it with some new writes. | |
340 | # overwrite [32736, 32799] which will dirty bitmap clusters at | |
341 | # 32M-64K and 32M. 32M+64K will be left undirtied. | |
342 | bitmap0 = self.add_bitmap('bitmap0', drive0) | |
343 | self.hmp_io_writes(drive0['id'], | |
344 | (('0xab', 0, 512), | |
345 | ('0xfe', '16M', '256k'), | |
346 | ('0x64', '32736k', '64k'))) | |
1e2b1f64 EB |
347 | # Check the dirty bitmap stats |
348 | result = self.vm.qmp('query-block') | |
349 | self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/name', 'bitmap0') | |
350 | self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/count', 458752) | |
351 | self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/granularity', 65536) | |
352 | self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/status', 'active') | |
cc199b16 JS |
353 | |
354 | # Prepare a cluster_size=128k backup target without a backing file. | |
355 | (target, _) = bitmap0.new_target() | |
356 | self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k') | |
357 | ||
358 | # Perform Incremental Backup | |
359 | self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'], | |
360 | sync='incremental', | |
361 | bitmap=bitmap0.name, | |
362 | format=bitmap0.drive['fmt'], | |
363 | target=target, | |
364 | mode='existing')) | |
365 | self.make_reference_backup(bitmap0) | |
366 | ||
367 | # Add the backing file, then compare and exit. | |
368 | iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b', | |
369 | drive0['backup'], '-F', drive0['fmt'], target) | |
370 | self.vm.shutdown() | |
371 | self.check_backups() | |
372 | ||
373 | ||
749ad5e8 JS |
374 | def test_incremental_transaction(self): |
375 | '''Test: Verify backups made from transactionally created bitmaps. | |
376 | ||
377 | Create a bitmap "before" VM execution begins, then create a second | |
378 | bitmap AFTER writes have already occurred. Use transactions to create | |
379 | a full backup and synchronize both bitmaps to this backup. | |
380 | Create an incremental backup through both bitmaps and verify that | |
381 | both backups match the current drive0 image. | |
382 | ''' | |
383 | ||
384 | drive0 = self.drives[0] | |
385 | bitmap0 = self.add_bitmap('bitmap0', drive0) | |
386 | self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), | |
387 | ('0xfe', '16M', '256k'), | |
388 | ('0x64', '32736k', '64k'))) | |
389 | bitmap1 = self.add_bitmap('bitmap1', drive0) | |
390 | ||
391 | result = self.vm.qmp('transaction', actions=[ | |
392 | transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name), | |
393 | transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name), | |
394 | transaction_drive_backup(drive0['id'], drive0['backup'], | |
395 | sync='full', format=drive0['fmt']) | |
396 | ]) | |
397 | self.assert_qmp(result, 'return', {}) | |
398 | self.wait_until_completed(drive0['id']) | |
399 | self.files.append(drive0['backup']) | |
400 | ||
401 | self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), | |
402 | ('0x55', '8M', '352k'), | |
403 | ('0x78', '15872k', '1M'))) | |
404 | # Both bitmaps should be correctly in sync. | |
405 | self.create_incremental(bitmap0) | |
406 | self.create_incremental(bitmap1) | |
407 | self.vm.shutdown() | |
408 | self.check_backups() | |
409 | ||
410 | ||
0aef09b9 | 411 | def do_transaction_failure_test(self, race=False): |
fc6c796f JS |
412 | # Create a second drive, with pattern: |
413 | drive1 = self.add_node('drive1') | |
414 | self.img_create(drive1['file'], drive1['fmt']) | |
415 | io_write_patterns(drive1['file'], (('0x14', 0, 512), | |
416 | ('0x5d', '1M', '32k'), | |
417 | ('0xcd', '32M', '124k'))) | |
418 | ||
419 | # Create a blkdebug interface to this img as 'drive1' | |
0153d2f5 KW |
420 | result = self.vm.qmp('blockdev-add', |
421 | node_name=drive1['id'], | |
422 | driver=drive1['fmt'], | |
423 | file={ | |
fc6c796f JS |
424 | 'driver': 'blkdebug', |
425 | 'image': { | |
426 | 'driver': 'file', | |
427 | 'filename': drive1['file'] | |
428 | }, | |
429 | 'set-state': [{ | |
430 | 'event': 'flush_to_disk', | |
431 | 'state': 1, | |
432 | 'new_state': 2 | |
433 | }], | |
434 | 'inject-error': [{ | |
435 | 'event': 'read_aio', | |
436 | 'errno': 5, | |
437 | 'state': 2, | |
438 | 'immediately': False, | |
439 | 'once': True | |
440 | }], | |
441 | } | |
0153d2f5 | 442 | ) |
fc6c796f JS |
443 | self.assert_qmp(result, 'return', {}) |
444 | ||
445 | # Create bitmaps and full backups for both drives | |
446 | drive0 = self.drives[0] | |
447 | dr0bm0 = self.add_bitmap('bitmap0', drive0) | |
448 | dr1bm0 = self.add_bitmap('bitmap0', drive1) | |
449 | self.create_anchor_backup(drive0) | |
450 | self.create_anchor_backup(drive1) | |
451 | self.assert_no_active_block_jobs() | |
452 | self.assertFalse(self.vm.get_qmp_events(wait=False)) | |
453 | ||
454 | # Emulate some writes | |
0aef09b9 JS |
455 | if not race: |
456 | self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), | |
457 | ('0xfe', '16M', '256k'), | |
458 | ('0x64', '32736k', '64k'))) | |
fc6c796f JS |
459 | self.hmp_io_writes(drive1['id'], (('0xba', 0, 512), |
460 | ('0xef', '16M', '256k'), | |
461 | ('0x46', '32736k', '64k'))) | |
462 | ||
463 | # Create incremental backup targets | |
464 | target0 = self.prepare_backup(dr0bm0) | |
465 | target1 = self.prepare_backup(dr1bm0) | |
466 | ||
467 | # Ask for a new incremental backup per-each drive, | |
0aef09b9 JS |
468 | # expecting drive1's backup to fail. In the 'race' test, |
469 | # we expect drive1 to attempt to cancel the empty drive0 job. | |
fc6c796f JS |
470 | transaction = [ |
471 | transaction_drive_backup(drive0['id'], target0, sync='incremental', | |
472 | format=drive0['fmt'], mode='existing', | |
473 | bitmap=dr0bm0.name), | |
474 | transaction_drive_backup(drive1['id'], target1, sync='incremental', | |
475 | format=drive1['fmt'], mode='existing', | |
476 | bitmap=dr1bm0.name) | |
477 | ] | |
478 | result = self.vm.qmp('transaction', actions=transaction, | |
479 | properties={'completion-mode': 'grouped'} ) | |
480 | self.assert_qmp(result, 'return', {}) | |
481 | ||
482 | # Observe that drive0's backup is cancelled and drive1 completes with | |
483 | # an error. | |
484 | self.wait_qmp_backup_cancelled(drive0['id']) | |
485 | self.assertFalse(self.wait_qmp_backup(drive1['id'])) | |
486 | error = self.vm.event_wait('BLOCK_JOB_ERROR') | |
487 | self.assert_qmp(error, 'data', {'device': drive1['id'], | |
488 | 'action': 'report', | |
489 | 'operation': 'read'}) | |
490 | self.assertFalse(self.vm.get_qmp_events(wait=False)) | |
491 | self.assert_no_active_block_jobs() | |
492 | ||
493 | # Delete drive0's successful target and eliminate our record of the | |
0aef09b9 | 494 | # unsuccessful drive1 target. |
fc6c796f JS |
495 | dr0bm0.del_target() |
496 | dr1bm0.del_target() | |
0aef09b9 JS |
497 | if race: |
498 | # Don't re-run the transaction, we only wanted to test the race. | |
499 | self.vm.shutdown() | |
500 | return | |
501 | ||
502 | # Re-run the same transaction: | |
fc6c796f JS |
503 | target0 = self.prepare_backup(dr0bm0) |
504 | target1 = self.prepare_backup(dr1bm0) | |
505 | ||
506 | # Re-run the exact same transaction. | |
507 | result = self.vm.qmp('transaction', actions=transaction, | |
508 | properties={'completion-mode':'grouped'}) | |
509 | self.assert_qmp(result, 'return', {}) | |
510 | ||
511 | # Both should complete successfully this time. | |
512 | self.assertTrue(self.wait_qmp_backup(drive0['id'])) | |
513 | self.assertTrue(self.wait_qmp_backup(drive1['id'])) | |
514 | self.make_reference_backup(dr0bm0) | |
515 | self.make_reference_backup(dr1bm0) | |
516 | self.assertFalse(self.vm.get_qmp_events(wait=False)) | |
517 | self.assert_no_active_block_jobs() | |
518 | ||
519 | # And the images should of course validate. | |
520 | self.vm.shutdown() | |
521 | self.check_backups() | |
522 | ||
0aef09b9 JS |
523 | def test_transaction_failure(self): |
524 | '''Test: Verify backups made from a transaction that partially fails. | |
525 | ||
526 | Add a second drive with its own unique pattern, and add a bitmap to each | |
527 | drive. Use blkdebug to interfere with the backup on just one drive and | |
528 | attempt to create a coherent incremental backup across both drives. | |
529 | ||
530 | verify a failure in one but not both, then delete the failed stubs and | |
531 | re-run the same transaction. | |
532 | ||
533 | verify that both incrementals are created successfully. | |
534 | ''' | |
535 | self.do_transaction_failure_test() | |
536 | ||
537 | def test_transaction_failure_race(self): | |
538 | '''Test: Verify that transactions with jobs that have no data to | |
539 | transfer do not cause race conditions in the cancellation of the entire | |
540 | transaction job group. | |
541 | ''' | |
542 | self.do_transaction_failure_test(race=True) | |
543 | ||
fc6c796f | 544 | |
9f7264f5 JS |
545 | def test_sync_dirty_bitmap_missing(self): |
546 | self.assert_no_active_block_jobs() | |
547 | self.files.append(self.err_img) | |
548 | result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], | |
4b80ab2b | 549 | sync='incremental', format=self.drives[0]['fmt'], |
9f7264f5 JS |
550 | target=self.err_img) |
551 | self.assert_qmp(result, 'error/class', 'GenericError') | |
552 | ||
553 | ||
554 | def test_sync_dirty_bitmap_not_found(self): | |
555 | self.assert_no_active_block_jobs() | |
556 | self.files.append(self.err_img) | |
557 | result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], | |
4b80ab2b | 558 | sync='incremental', bitmap='unknown', |
9f7264f5 JS |
559 | format=self.drives[0]['fmt'], target=self.err_img) |
560 | self.assert_qmp(result, 'error/class', 'GenericError') | |
561 | ||
562 | ||
59fc5d84 JS |
563 | def test_sync_dirty_bitmap_bad_granularity(self): |
564 | ''' | |
565 | Test: Test what happens if we provide an improper granularity. | |
566 | ||
567 | The granularity must always be a power of 2. | |
568 | ''' | |
569 | self.assert_no_active_block_jobs() | |
570 | self.assertRaises(AssertionError, self.add_bitmap, | |
571 | 'bitmap0', self.drives[0], | |
572 | granularity=64000) | |
573 | ||
574 | ||
ce2cbc49 JS |
575 | class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase): |
576 | '''Incremental backup tests that utilize a BlkDebug filter on drive0.''' | |
577 | ||
35cea223 JS |
578 | def setUp(self): |
579 | drive0 = self.add_node('drive0') | |
580 | self.img_create(drive0['file'], drive0['fmt']) | |
581 | self.write_default_pattern(drive0['file']) | |
582 | self.vm.launch() | |
583 | ||
ce2cbc49 JS |
584 | def test_incremental_failure(self): |
585 | '''Test: Verify backups made after a failure are correct. | |
586 | ||
587 | Simulate a failure during an incremental backup block job, | |
588 | emulate additional writes, then create another incremental backup | |
589 | afterwards and verify that the backup created is correct. | |
590 | ''' | |
591 | ||
35cea223 | 592 | drive0 = self.drives[0] |
0153d2f5 KW |
593 | result = self.vm.qmp('blockdev-add', |
594 | node_name=drive0['id'], | |
595 | driver=drive0['fmt'], | |
596 | file={ | |
ce2cbc49 JS |
597 | 'driver': 'blkdebug', |
598 | 'image': { | |
599 | 'driver': 'file', | |
35cea223 | 600 | 'filename': drive0['file'] |
ce2cbc49 JS |
601 | }, |
602 | 'set-state': [{ | |
603 | 'event': 'flush_to_disk', | |
604 | 'state': 1, | |
605 | 'new_state': 2 | |
606 | }], | |
607 | 'inject-error': [{ | |
608 | 'event': 'read_aio', | |
609 | 'errno': 5, | |
610 | 'state': 2, | |
611 | 'immediately': False, | |
612 | 'once': True | |
613 | }], | |
614 | } | |
0153d2f5 | 615 | ) |
ce2cbc49 JS |
616 | self.assert_qmp(result, 'return', {}) |
617 | ||
35cea223 JS |
618 | self.create_anchor_backup(drive0) |
619 | self.add_bitmap('bitmap0', drive0) | |
ce2cbc49 JS |
620 | # Note: at this point, during a normal execution, |
621 | # Assume that the VM resumes and begins issuing IO requests here. | |
622 | ||
35cea223 | 623 | self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), |
ce2cbc49 JS |
624 | ('0xfe', '16M', '256k'), |
625 | ('0x64', '32736k', '64k'))) | |
626 | ||
627 | result = self.create_incremental(validate=False) | |
628 | self.assertFalse(result) | |
35cea223 | 629 | self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), |
ce2cbc49 JS |
630 | ('0x55', '8M', '352k'), |
631 | ('0x78', '15872k', '1M'))) | |
632 | self.create_incremental() | |
633 | self.vm.shutdown() | |
634 | self.check_backups() | |
635 | ||
636 | ||
9f7264f5 JS |
637 | if __name__ == '__main__': |
638 | iotests.main(supported_fmts=['qcow2']) |