2 * Copyright (C) 2013 Fusion IO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/sizes.h>
23 #include "btrfs-tests.h"
24 #include "../extent_io.h"
26 #define PROCESS_UNLOCK (1 << 0)
27 #define PROCESS_RELEASE (1 << 1)
28 #define PROCESS_TEST_LOCKED (1 << 2)
30 static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
34 struct page *pages[16];
35 unsigned long index = start >> PAGE_CACHE_SHIFT;
36 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
37 unsigned long nr_pages = end_index - index + 1;
42 while (nr_pages > 0) {
43 ret = find_get_pages_contig(inode->i_mapping, index,
44 min_t(unsigned long, nr_pages,
45 ARRAY_SIZE(pages)), pages);
46 for (i = 0; i < ret; i++) {
47 if (flags & PROCESS_TEST_LOCKED &&
48 !PageLocked(pages[i]))
50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
51 unlock_page(pages[i]);
52 page_cache_release(pages[i]);
53 if (flags & PROCESS_RELEASE)
54 page_cache_release(pages[i]);
61 printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
68 static int test_find_delalloc(void)
71 struct extent_io_tree tmp;
73 struct page *locked_page = NULL;
74 unsigned long index = 0;
75 u64 total_dirty = SZ_256M;
76 u64 max_bytes = SZ_128M;
77 u64 start, end, test_start;
81 test_msg("Running find delalloc tests\n");
83 inode = btrfs_new_test_inode();
85 test_msg("Failed to allocate test inode\n");
89 extent_io_tree_init(&tmp, &inode->i_data);
92 * First go through and create and mark all of our pages dirty, we pin
93 * everything to make sure our pages don't get evicted and screw up our
96 for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
97 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
99 test_msg("Failed to allocate test page\n");
107 page_cache_get(page);
112 /* Test this scenario
116 set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
119 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
122 test_msg("Should have found at least one delalloc\n");
125 if (start != 0 || end != 4095) {
126 test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
130 unlock_extent(&tmp, start, end);
131 unlock_page(locked_page);
132 page_cache_release(locked_page);
141 locked_page = find_lock_page(inode->i_mapping,
142 test_start >> PAGE_CACHE_SHIFT);
144 test_msg("Couldn't find the locked page\n");
147 set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
150 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
153 test_msg("Couldn't find delalloc in our range\n");
156 if (start != test_start || end != max_bytes - 1) {
157 test_msg("Expected start %Lu end %Lu, got start %Lu, end "
158 "%Lu\n", test_start, max_bytes - 1, start, end);
161 if (process_page_range(inode, start, end,
162 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
163 test_msg("There were unlocked pages in the range\n");
166 unlock_extent(&tmp, start, end);
167 /* locked_page was unlocked above */
168 page_cache_release(locked_page);
175 test_start = max_bytes + 4096;
176 locked_page = find_lock_page(inode->i_mapping, test_start >>
179 test_msg("Could'nt find the locked page\n");
184 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
187 test_msg("Found range when we shouldn't have\n");
190 if (end != (u64)-1) {
191 test_msg("Did not return the proper end offset\n");
197 * [------- delalloc -------|
198 * [max_bytes]|-- search--|
200 * We are re-using our test_start from above since it works out well.
202 set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
205 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
208 test_msg("Didn't find our range\n");
211 if (start != test_start || end != total_dirty - 1) {
212 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
213 test_start, total_dirty - 1, start, end);
216 if (process_page_range(inode, start, end,
217 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
218 test_msg("Pages in range were not all locked\n");
221 unlock_extent(&tmp, start, end);
224 * Now to test where we run into a page that is no longer dirty in the
225 * range we want to find.
227 page = find_get_page(inode->i_mapping,
228 (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
230 test_msg("Couldn't find our page\n");
233 ClearPageDirty(page);
234 page_cache_release(page);
236 /* We unlocked it in the previous test */
237 lock_page(locked_page);
241 * Currently if we fail to find dirty pages in the delalloc range we
242 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If
243 * this changes at any point in the future we will need to fix this
244 * tests expected behavior.
246 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
249 test_msg("Didn't find our range\n");
252 if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
254 test_start, test_start + PAGE_CACHE_SIZE - 1, start,
258 if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
260 test_msg("Pages in range were not all locked\n");
265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
268 page_cache_release(locked_page);
269 process_page_range(inode, 0, total_dirty - 1,
270 PROCESS_UNLOCK | PROCESS_RELEASE);
275 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
280 memset(bitmap, 0, len);
281 memset_extent_buffer(eb, 0, 0, len);
282 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
283 test_msg("Bitmap was not zeroed\n");
287 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
288 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
289 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
290 test_msg("Setting all bits failed\n");
294 bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
295 extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
296 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
297 test_msg("Clearing all bits failed\n");
301 bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
302 sizeof(long) * BITS_PER_BYTE);
303 extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
304 sizeof(long) * BITS_PER_BYTE);
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
306 test_msg("Setting straddling pages failed\n");
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
312 (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n");
323 * Generate a wonky pseudo-random bit pattern for the sake of not using
324 * something repetitive that could miss some hypothetical off-by-n bug.
327 for (i = 0; i < len / sizeof(long); i++) {
328 x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
331 write_extent_buffer(eb, bitmap, 0, len);
333 for (i = 0; i < len * BITS_PER_BYTE; i++) {
336 bit = !!test_bit(i, bitmap);
337 bit1 = !!extent_buffer_test_bit(eb, 0, i);
339 test_msg("Testing bit pattern failed\n");
343 bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
346 test_msg("Testing bit pattern with offset failed\n");
354 static int test_eb_bitmaps(void)
356 unsigned long len = PAGE_CACHE_SIZE * 4;
357 unsigned long *bitmap;
358 struct extent_buffer *eb;
361 test_msg("Running extent buffer bitmap tests\n");
363 bitmap = kmalloc(len, GFP_KERNEL);
365 test_msg("Couldn't allocate test bitmap\n");
369 eb = __alloc_dummy_extent_buffer(NULL, 0, len);
371 test_msg("Couldn't allocate test extent buffer\n");
376 ret = __test_eb_bitmaps(bitmap, eb, len);
380 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len);
384 test_msg("Couldn't allocate test extent buffer\n");
389 ret = __test_eb_bitmaps(bitmap, eb, len);
391 free_extent_buffer(eb);
396 int btrfs_test_extent_io(void)
400 test_msg("Running extent I/O tests\n");
402 ret = test_find_delalloc();
406 ret = test_eb_bitmaps();
408 test_msg("Extent I/O tests finished\n");