]>
Commit | Line | Data |
---|---|---|
0b86a832 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | #include <linux/sched.h> | |
19 | #include <linux/bio.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
8a4b83cc | 21 | #include <linux/buffer_head.h> |
f2d8d74d | 22 | #include <linux/blkdev.h> |
788f20eb | 23 | #include <linux/random.h> |
b765ead5 | 24 | #include <linux/iocontext.h> |
6f88a440 | 25 | #include <linux/capability.h> |
593060d7 | 26 | #include <asm/div64.h> |
4b4e25f2 | 27 | #include "compat.h" |
0b86a832 CM |
28 | #include "ctree.h" |
29 | #include "extent_map.h" | |
30 | #include "disk-io.h" | |
31 | #include "transaction.h" | |
32 | #include "print-tree.h" | |
33 | #include "volumes.h" | |
8b712842 | 34 | #include "async-thread.h" |
0b86a832 | 35 | |
593060d7 CM |
36 | struct map_lookup { |
37 | u64 type; | |
38 | int io_align; | |
39 | int io_width; | |
40 | int stripe_len; | |
41 | int sector_size; | |
42 | int num_stripes; | |
321aecc6 | 43 | int sub_stripes; |
cea9e445 | 44 | struct btrfs_bio_stripe stripes[]; |
593060d7 CM |
45 | }; |
46 | ||
2b82032c YZ |
47 | static int init_first_rw_device(struct btrfs_trans_handle *trans, |
48 | struct btrfs_root *root, | |
49 | struct btrfs_device *device); | |
50 | static int btrfs_relocate_sys_chunks(struct btrfs_root *root); | |
51 | ||
593060d7 | 52 | #define map_lookup_size(n) (sizeof(struct map_lookup) + \ |
cea9e445 | 53 | (sizeof(struct btrfs_bio_stripe) * (n))) |
593060d7 | 54 | |
8a4b83cc CM |
55 | static DEFINE_MUTEX(uuid_mutex); |
56 | static LIST_HEAD(fs_uuids); | |
57 | ||
a061fc8d CM |
58 | void btrfs_lock_volumes(void) |
59 | { | |
60 | mutex_lock(&uuid_mutex); | |
61 | } | |
62 | ||
63 | void btrfs_unlock_volumes(void) | |
64 | { | |
65 | mutex_unlock(&uuid_mutex); | |
66 | } | |
67 | ||
7d9eb12c CM |
68 | static void lock_chunks(struct btrfs_root *root) |
69 | { | |
7d9eb12c CM |
70 | mutex_lock(&root->fs_info->chunk_mutex); |
71 | } | |
72 | ||
73 | static void unlock_chunks(struct btrfs_root *root) | |
74 | { | |
7d9eb12c CM |
75 | mutex_unlock(&root->fs_info->chunk_mutex); |
76 | } | |
77 | ||
e4404d6e YZ |
78 | static void free_fs_devices(struct btrfs_fs_devices *fs_devices) |
79 | { | |
80 | struct btrfs_device *device; | |
81 | WARN_ON(fs_devices->opened); | |
82 | while (!list_empty(&fs_devices->devices)) { | |
83 | device = list_entry(fs_devices->devices.next, | |
84 | struct btrfs_device, dev_list); | |
85 | list_del(&device->dev_list); | |
86 | kfree(device->name); | |
87 | kfree(device); | |
88 | } | |
89 | kfree(fs_devices); | |
90 | } | |
91 | ||
8a4b83cc CM |
92 | int btrfs_cleanup_fs_uuids(void) |
93 | { | |
94 | struct btrfs_fs_devices *fs_devices; | |
8a4b83cc | 95 | |
2b82032c YZ |
96 | while (!list_empty(&fs_uuids)) { |
97 | fs_devices = list_entry(fs_uuids.next, | |
98 | struct btrfs_fs_devices, list); | |
99 | list_del(&fs_devices->list); | |
e4404d6e | 100 | free_fs_devices(fs_devices); |
8a4b83cc CM |
101 | } |
102 | return 0; | |
103 | } | |
104 | ||
a1b32a59 CM |
105 | static noinline struct btrfs_device *__find_device(struct list_head *head, |
106 | u64 devid, u8 *uuid) | |
8a4b83cc CM |
107 | { |
108 | struct btrfs_device *dev; | |
8a4b83cc | 109 | |
c6e30871 | 110 | list_for_each_entry(dev, head, dev_list) { |
a443755f | 111 | if (dev->devid == devid && |
8f18cf13 | 112 | (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { |
8a4b83cc | 113 | return dev; |
a443755f | 114 | } |
8a4b83cc CM |
115 | } |
116 | return NULL; | |
117 | } | |
118 | ||
a1b32a59 | 119 | static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) |
8a4b83cc | 120 | { |
8a4b83cc CM |
121 | struct btrfs_fs_devices *fs_devices; |
122 | ||
c6e30871 | 123 | list_for_each_entry(fs_devices, &fs_uuids, list) { |
8a4b83cc CM |
124 | if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) |
125 | return fs_devices; | |
126 | } | |
127 | return NULL; | |
128 | } | |
129 | ||
ffbd517d CM |
130 | static void requeue_list(struct btrfs_pending_bios *pending_bios, |
131 | struct bio *head, struct bio *tail) | |
132 | { | |
133 | ||
134 | struct bio *old_head; | |
135 | ||
136 | old_head = pending_bios->head; | |
137 | pending_bios->head = head; | |
138 | if (pending_bios->tail) | |
139 | tail->bi_next = old_head; | |
140 | else | |
141 | pending_bios->tail = tail; | |
142 | } | |
143 | ||
8b712842 CM |
144 | /* |
145 | * we try to collect pending bios for a device so we don't get a large | |
146 | * number of procs sending bios down to the same device. This greatly | |
147 | * improves the schedulers ability to collect and merge the bios. | |
148 | * | |
149 | * But, it also turns into a long list of bios to process and that is sure | |
150 | * to eventually make the worker thread block. The solution here is to | |
151 | * make some progress and then put this work struct back at the end of | |
152 | * the list if the block device is congested. This way, multiple devices | |
153 | * can make progress from a single worker thread. | |
154 | */ | |
d397712b | 155 | static noinline int run_scheduled_bios(struct btrfs_device *device) |
8b712842 CM |
156 | { |
157 | struct bio *pending; | |
158 | struct backing_dev_info *bdi; | |
b64a2851 | 159 | struct btrfs_fs_info *fs_info; |
ffbd517d | 160 | struct btrfs_pending_bios *pending_bios; |
8b712842 CM |
161 | struct bio *tail; |
162 | struct bio *cur; | |
163 | int again = 0; | |
ffbd517d | 164 | unsigned long num_run; |
d644d8a1 | 165 | unsigned long batch_run = 0; |
b64a2851 | 166 | unsigned long limit; |
b765ead5 | 167 | unsigned long last_waited = 0; |
d84275c9 | 168 | int force_reg = 0; |
8b712842 | 169 | |
bedf762b | 170 | bdi = blk_get_backing_dev_info(device->bdev); |
b64a2851 CM |
171 | fs_info = device->dev_root->fs_info; |
172 | limit = btrfs_async_submit_limit(fs_info); | |
173 | limit = limit * 2 / 3; | |
174 | ||
8b712842 CM |
175 | loop: |
176 | spin_lock(&device->io_lock); | |
177 | ||
a6837051 | 178 | loop_lock: |
d84275c9 | 179 | num_run = 0; |
ffbd517d | 180 | |
8b712842 CM |
181 | /* take all the bios off the list at once and process them |
182 | * later on (without the lock held). But, remember the | |
183 | * tail and other pointers so the bios can be properly reinserted | |
184 | * into the list if we hit congestion | |
185 | */ | |
d84275c9 | 186 | if (!force_reg && device->pending_sync_bios.head) { |
ffbd517d | 187 | pending_bios = &device->pending_sync_bios; |
d84275c9 CM |
188 | force_reg = 1; |
189 | } else { | |
ffbd517d | 190 | pending_bios = &device->pending_bios; |
d84275c9 CM |
191 | force_reg = 0; |
192 | } | |
ffbd517d CM |
193 | |
194 | pending = pending_bios->head; | |
195 | tail = pending_bios->tail; | |
8b712842 | 196 | WARN_ON(pending && !tail); |
8b712842 CM |
197 | |
198 | /* | |
199 | * if pending was null this time around, no bios need processing | |
200 | * at all and we can stop. Otherwise it'll loop back up again | |
201 | * and do an additional check so no bios are missed. | |
202 | * | |
203 | * device->running_pending is used to synchronize with the | |
204 | * schedule_bio code. | |
205 | */ | |
ffbd517d CM |
206 | if (device->pending_sync_bios.head == NULL && |
207 | device->pending_bios.head == NULL) { | |
8b712842 CM |
208 | again = 0; |
209 | device->running_pending = 0; | |
ffbd517d CM |
210 | } else { |
211 | again = 1; | |
212 | device->running_pending = 1; | |
8b712842 | 213 | } |
ffbd517d CM |
214 | |
215 | pending_bios->head = NULL; | |
216 | pending_bios->tail = NULL; | |
217 | ||
8b712842 CM |
218 | spin_unlock(&device->io_lock); |
219 | ||
d397712b | 220 | while (pending) { |
ffbd517d CM |
221 | |
222 | rmb(); | |
d84275c9 CM |
223 | /* we want to work on both lists, but do more bios on the |
224 | * sync list than the regular list | |
225 | */ | |
226 | if ((num_run > 32 && | |
227 | pending_bios != &device->pending_sync_bios && | |
228 | device->pending_sync_bios.head) || | |
229 | (num_run > 64 && pending_bios == &device->pending_sync_bios && | |
230 | device->pending_bios.head)) { | |
ffbd517d CM |
231 | spin_lock(&device->io_lock); |
232 | requeue_list(pending_bios, pending, tail); | |
233 | goto loop_lock; | |
234 | } | |
235 | ||
8b712842 CM |
236 | cur = pending; |
237 | pending = pending->bi_next; | |
238 | cur->bi_next = NULL; | |
b64a2851 CM |
239 | atomic_dec(&fs_info->nr_async_bios); |
240 | ||
241 | if (atomic_read(&fs_info->nr_async_bios) < limit && | |
242 | waitqueue_active(&fs_info->async_submit_wait)) | |
243 | wake_up(&fs_info->async_submit_wait); | |
492bb6de CM |
244 | |
245 | BUG_ON(atomic_read(&cur->bi_cnt) == 0); | |
d644d8a1 | 246 | |
5ff7ba3a CM |
247 | submit_bio(cur->bi_rw, cur); |
248 | num_run++; | |
249 | batch_run++; | |
7eaceacc | 250 | if (need_resched()) |
ffbd517d | 251 | cond_resched(); |
8b712842 CM |
252 | |
253 | /* | |
254 | * we made progress, there is more work to do and the bdi | |
255 | * is now congested. Back off and let other work structs | |
256 | * run instead | |
257 | */ | |
57fd5a5f | 258 | if (pending && bdi_write_congested(bdi) && batch_run > 8 && |
5f2cc086 | 259 | fs_info->fs_devices->open_devices > 1) { |
b765ead5 | 260 | struct io_context *ioc; |
8b712842 | 261 | |
b765ead5 CM |
262 | ioc = current->io_context; |
263 | ||
264 | /* | |
265 | * the main goal here is that we don't want to | |
266 | * block if we're going to be able to submit | |
267 | * more requests without blocking. | |
268 | * | |
269 | * This code does two great things, it pokes into | |
270 | * the elevator code from a filesystem _and_ | |
271 | * it makes assumptions about how batching works. | |
272 | */ | |
273 | if (ioc && ioc->nr_batch_requests > 0 && | |
274 | time_before(jiffies, ioc->last_waited + HZ/50UL) && | |
275 | (last_waited == 0 || | |
276 | ioc->last_waited == last_waited)) { | |
277 | /* | |
278 | * we want to go through our batch of | |
279 | * requests and stop. So, we copy out | |
280 | * the ioc->last_waited time and test | |
281 | * against it before looping | |
282 | */ | |
283 | last_waited = ioc->last_waited; | |
7eaceacc | 284 | if (need_resched()) |
ffbd517d | 285 | cond_resched(); |
b765ead5 CM |
286 | continue; |
287 | } | |
8b712842 | 288 | spin_lock(&device->io_lock); |
ffbd517d | 289 | requeue_list(pending_bios, pending, tail); |
a6837051 | 290 | device->running_pending = 1; |
8b712842 CM |
291 | |
292 | spin_unlock(&device->io_lock); | |
293 | btrfs_requeue_work(&device->work); | |
294 | goto done; | |
295 | } | |
296 | } | |
ffbd517d | 297 | |
51684082 CM |
298 | cond_resched(); |
299 | if (again) | |
300 | goto loop; | |
301 | ||
302 | spin_lock(&device->io_lock); | |
303 | if (device->pending_bios.head || device->pending_sync_bios.head) | |
304 | goto loop_lock; | |
305 | spin_unlock(&device->io_lock); | |
306 | ||
8b712842 CM |
307 | done: |
308 | return 0; | |
309 | } | |
310 | ||
b2950863 | 311 | static void pending_bios_fn(struct btrfs_work *work) |
8b712842 CM |
312 | { |
313 | struct btrfs_device *device; | |
314 | ||
315 | device = container_of(work, struct btrfs_device, work); | |
316 | run_scheduled_bios(device); | |
317 | } | |
318 | ||
a1b32a59 | 319 | static noinline int device_list_add(const char *path, |
8a4b83cc CM |
320 | struct btrfs_super_block *disk_super, |
321 | u64 devid, struct btrfs_fs_devices **fs_devices_ret) | |
322 | { | |
323 | struct btrfs_device *device; | |
324 | struct btrfs_fs_devices *fs_devices; | |
325 | u64 found_transid = btrfs_super_generation(disk_super); | |
3a0524dc | 326 | char *name; |
8a4b83cc CM |
327 | |
328 | fs_devices = find_fsid(disk_super->fsid); | |
329 | if (!fs_devices) { | |
515dc322 | 330 | fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); |
8a4b83cc CM |
331 | if (!fs_devices) |
332 | return -ENOMEM; | |
333 | INIT_LIST_HEAD(&fs_devices->devices); | |
b3075717 | 334 | INIT_LIST_HEAD(&fs_devices->alloc_list); |
8a4b83cc CM |
335 | list_add(&fs_devices->list, &fs_uuids); |
336 | memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); | |
337 | fs_devices->latest_devid = devid; | |
338 | fs_devices->latest_trans = found_transid; | |
e5e9a520 | 339 | mutex_init(&fs_devices->device_list_mutex); |
8a4b83cc CM |
340 | device = NULL; |
341 | } else { | |
a443755f CM |
342 | device = __find_device(&fs_devices->devices, devid, |
343 | disk_super->dev_item.uuid); | |
8a4b83cc CM |
344 | } |
345 | if (!device) { | |
2b82032c YZ |
346 | if (fs_devices->opened) |
347 | return -EBUSY; | |
348 | ||
8a4b83cc CM |
349 | device = kzalloc(sizeof(*device), GFP_NOFS); |
350 | if (!device) { | |
351 | /* we can safely leave the fs_devices entry around */ | |
352 | return -ENOMEM; | |
353 | } | |
354 | device->devid = devid; | |
8b712842 | 355 | device->work.func = pending_bios_fn; |
a443755f CM |
356 | memcpy(device->uuid, disk_super->dev_item.uuid, |
357 | BTRFS_UUID_SIZE); | |
b248a415 | 358 | spin_lock_init(&device->io_lock); |
8a4b83cc CM |
359 | device->name = kstrdup(path, GFP_NOFS); |
360 | if (!device->name) { | |
361 | kfree(device); | |
362 | return -ENOMEM; | |
363 | } | |
2b82032c | 364 | INIT_LIST_HEAD(&device->dev_alloc_list); |
e5e9a520 CM |
365 | |
366 | mutex_lock(&fs_devices->device_list_mutex); | |
8a4b83cc | 367 | list_add(&device->dev_list, &fs_devices->devices); |
e5e9a520 CM |
368 | mutex_unlock(&fs_devices->device_list_mutex); |
369 | ||
2b82032c | 370 | device->fs_devices = fs_devices; |
8a4b83cc | 371 | fs_devices->num_devices++; |
cd02dca5 | 372 | } else if (!device->name || strcmp(device->name, path)) { |
3a0524dc TH |
373 | name = kstrdup(path, GFP_NOFS); |
374 | if (!name) | |
375 | return -ENOMEM; | |
376 | kfree(device->name); | |
377 | device->name = name; | |
cd02dca5 CM |
378 | if (device->missing) { |
379 | fs_devices->missing_devices--; | |
380 | device->missing = 0; | |
381 | } | |
8a4b83cc CM |
382 | } |
383 | ||
384 | if (found_transid > fs_devices->latest_trans) { | |
385 | fs_devices->latest_devid = devid; | |
386 | fs_devices->latest_trans = found_transid; | |
387 | } | |
8a4b83cc CM |
388 | *fs_devices_ret = fs_devices; |
389 | return 0; | |
390 | } | |
391 | ||
e4404d6e YZ |
392 | static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) |
393 | { | |
394 | struct btrfs_fs_devices *fs_devices; | |
395 | struct btrfs_device *device; | |
396 | struct btrfs_device *orig_dev; | |
397 | ||
398 | fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); | |
399 | if (!fs_devices) | |
400 | return ERR_PTR(-ENOMEM); | |
401 | ||
402 | INIT_LIST_HEAD(&fs_devices->devices); | |
403 | INIT_LIST_HEAD(&fs_devices->alloc_list); | |
404 | INIT_LIST_HEAD(&fs_devices->list); | |
e5e9a520 | 405 | mutex_init(&fs_devices->device_list_mutex); |
e4404d6e YZ |
406 | fs_devices->latest_devid = orig->latest_devid; |
407 | fs_devices->latest_trans = orig->latest_trans; | |
408 | memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); | |
409 | ||
e5e9a520 | 410 | mutex_lock(&orig->device_list_mutex); |
e4404d6e YZ |
411 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { |
412 | device = kzalloc(sizeof(*device), GFP_NOFS); | |
413 | if (!device) | |
414 | goto error; | |
415 | ||
416 | device->name = kstrdup(orig_dev->name, GFP_NOFS); | |
fd2696f3 JL |
417 | if (!device->name) { |
418 | kfree(device); | |
e4404d6e | 419 | goto error; |
fd2696f3 | 420 | } |
e4404d6e YZ |
421 | |
422 | device->devid = orig_dev->devid; | |
423 | device->work.func = pending_bios_fn; | |
424 | memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid)); | |
e4404d6e YZ |
425 | spin_lock_init(&device->io_lock); |
426 | INIT_LIST_HEAD(&device->dev_list); | |
427 | INIT_LIST_HEAD(&device->dev_alloc_list); | |
428 | ||
429 | list_add(&device->dev_list, &fs_devices->devices); | |
430 | device->fs_devices = fs_devices; | |
431 | fs_devices->num_devices++; | |
432 | } | |
e5e9a520 | 433 | mutex_unlock(&orig->device_list_mutex); |
e4404d6e YZ |
434 | return fs_devices; |
435 | error: | |
e5e9a520 | 436 | mutex_unlock(&orig->device_list_mutex); |
e4404d6e YZ |
437 | free_fs_devices(fs_devices); |
438 | return ERR_PTR(-ENOMEM); | |
439 | } | |
440 | ||
dfe25020 CM |
441 | int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) |
442 | { | |
c6e30871 | 443 | struct btrfs_device *device, *next; |
dfe25020 CM |
444 | |
445 | mutex_lock(&uuid_mutex); | |
446 | again: | |
e5e9a520 | 447 | mutex_lock(&fs_devices->device_list_mutex); |
c6e30871 | 448 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { |
2b82032c YZ |
449 | if (device->in_fs_metadata) |
450 | continue; | |
451 | ||
452 | if (device->bdev) { | |
d4d77629 | 453 | blkdev_put(device->bdev, device->mode); |
2b82032c YZ |
454 | device->bdev = NULL; |
455 | fs_devices->open_devices--; | |
456 | } | |
457 | if (device->writeable) { | |
458 | list_del_init(&device->dev_alloc_list); | |
459 | device->writeable = 0; | |
460 | fs_devices->rw_devices--; | |
461 | } | |
e4404d6e YZ |
462 | list_del_init(&device->dev_list); |
463 | fs_devices->num_devices--; | |
464 | kfree(device->name); | |
465 | kfree(device); | |
dfe25020 | 466 | } |
e5e9a520 | 467 | mutex_unlock(&fs_devices->device_list_mutex); |
2b82032c YZ |
468 | |
469 | if (fs_devices->seed) { | |
470 | fs_devices = fs_devices->seed; | |
2b82032c YZ |
471 | goto again; |
472 | } | |
473 | ||
dfe25020 CM |
474 | mutex_unlock(&uuid_mutex); |
475 | return 0; | |
476 | } | |
a0af469b | 477 | |
2b82032c | 478 | static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) |
8a4b83cc | 479 | { |
8a4b83cc | 480 | struct btrfs_device *device; |
e4404d6e | 481 | |
2b82032c YZ |
482 | if (--fs_devices->opened > 0) |
483 | return 0; | |
8a4b83cc | 484 | |
c6e30871 | 485 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
8a4b83cc | 486 | if (device->bdev) { |
d4d77629 | 487 | blkdev_put(device->bdev, device->mode); |
a0af469b | 488 | fs_devices->open_devices--; |
8a4b83cc | 489 | } |
2b82032c YZ |
490 | if (device->writeable) { |
491 | list_del_init(&device->dev_alloc_list); | |
492 | fs_devices->rw_devices--; | |
493 | } | |
494 | ||
8a4b83cc | 495 | device->bdev = NULL; |
2b82032c | 496 | device->writeable = 0; |
dfe25020 | 497 | device->in_fs_metadata = 0; |
8a4b83cc | 498 | } |
e4404d6e YZ |
499 | WARN_ON(fs_devices->open_devices); |
500 | WARN_ON(fs_devices->rw_devices); | |
2b82032c YZ |
501 | fs_devices->opened = 0; |
502 | fs_devices->seeding = 0; | |
2b82032c | 503 | |
8a4b83cc CM |
504 | return 0; |
505 | } | |
506 | ||
2b82032c YZ |
507 | int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) |
508 | { | |
e4404d6e | 509 | struct btrfs_fs_devices *seed_devices = NULL; |
2b82032c YZ |
510 | int ret; |
511 | ||
512 | mutex_lock(&uuid_mutex); | |
513 | ret = __btrfs_close_devices(fs_devices); | |
e4404d6e YZ |
514 | if (!fs_devices->opened) { |
515 | seed_devices = fs_devices->seed; | |
516 | fs_devices->seed = NULL; | |
517 | } | |
2b82032c | 518 | mutex_unlock(&uuid_mutex); |
e4404d6e YZ |
519 | |
520 | while (seed_devices) { | |
521 | fs_devices = seed_devices; | |
522 | seed_devices = fs_devices->seed; | |
523 | __btrfs_close_devices(fs_devices); | |
524 | free_fs_devices(fs_devices); | |
525 | } | |
2b82032c YZ |
526 | return ret; |
527 | } | |
528 | ||
e4404d6e YZ |
529 | static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, |
530 | fmode_t flags, void *holder) | |
8a4b83cc CM |
531 | { |
532 | struct block_device *bdev; | |
533 | struct list_head *head = &fs_devices->devices; | |
8a4b83cc | 534 | struct btrfs_device *device; |
a0af469b CM |
535 | struct block_device *latest_bdev = NULL; |
536 | struct buffer_head *bh; | |
537 | struct btrfs_super_block *disk_super; | |
538 | u64 latest_devid = 0; | |
539 | u64 latest_transid = 0; | |
a0af469b | 540 | u64 devid; |
2b82032c | 541 | int seeding = 1; |
a0af469b | 542 | int ret = 0; |
8a4b83cc | 543 | |
d4d77629 TH |
544 | flags |= FMODE_EXCL; |
545 | ||
c6e30871 | 546 | list_for_each_entry(device, head, dev_list) { |
c1c4d91c CM |
547 | if (device->bdev) |
548 | continue; | |
dfe25020 CM |
549 | if (!device->name) |
550 | continue; | |
551 | ||
d4d77629 | 552 | bdev = blkdev_get_by_path(device->name, flags, holder); |
8a4b83cc | 553 | if (IS_ERR(bdev)) { |
d397712b | 554 | printk(KERN_INFO "open %s failed\n", device->name); |
a0af469b | 555 | goto error; |
8a4b83cc | 556 | } |
a061fc8d | 557 | set_blocksize(bdev, 4096); |
a0af469b | 558 | |
a512bbf8 | 559 | bh = btrfs_read_dev_super(bdev); |
20b45077 DY |
560 | if (!bh) { |
561 | ret = -EINVAL; | |
a0af469b | 562 | goto error_close; |
20b45077 | 563 | } |
a0af469b CM |
564 | |
565 | disk_super = (struct btrfs_super_block *)bh->b_data; | |
a343832f | 566 | devid = btrfs_stack_device_id(&disk_super->dev_item); |
a0af469b CM |
567 | if (devid != device->devid) |
568 | goto error_brelse; | |
569 | ||
2b82032c YZ |
570 | if (memcmp(device->uuid, disk_super->dev_item.uuid, |
571 | BTRFS_UUID_SIZE)) | |
572 | goto error_brelse; | |
573 | ||
574 | device->generation = btrfs_super_generation(disk_super); | |
575 | if (!latest_transid || device->generation > latest_transid) { | |
a0af469b | 576 | latest_devid = devid; |
2b82032c | 577 | latest_transid = device->generation; |
a0af469b CM |
578 | latest_bdev = bdev; |
579 | } | |
580 | ||
2b82032c YZ |
581 | if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { |
582 | device->writeable = 0; | |
583 | } else { | |
584 | device->writeable = !bdev_read_only(bdev); | |
585 | seeding = 0; | |
586 | } | |
587 | ||
8a4b83cc | 588 | device->bdev = bdev; |
dfe25020 | 589 | device->in_fs_metadata = 0; |
15916de8 CM |
590 | device->mode = flags; |
591 | ||
c289811c CM |
592 | if (!blk_queue_nonrot(bdev_get_queue(bdev))) |
593 | fs_devices->rotating = 1; | |
594 | ||
a0af469b | 595 | fs_devices->open_devices++; |
2b82032c YZ |
596 | if (device->writeable) { |
597 | fs_devices->rw_devices++; | |
598 | list_add(&device->dev_alloc_list, | |
599 | &fs_devices->alloc_list); | |
600 | } | |
a0af469b | 601 | continue; |
a061fc8d | 602 | |
a0af469b CM |
603 | error_brelse: |
604 | brelse(bh); | |
605 | error_close: | |
d4d77629 | 606 | blkdev_put(bdev, flags); |
a0af469b CM |
607 | error: |
608 | continue; | |
8a4b83cc | 609 | } |
a0af469b CM |
610 | if (fs_devices->open_devices == 0) { |
611 | ret = -EIO; | |
612 | goto out; | |
613 | } | |
2b82032c YZ |
614 | fs_devices->seeding = seeding; |
615 | fs_devices->opened = 1; | |
a0af469b CM |
616 | fs_devices->latest_bdev = latest_bdev; |
617 | fs_devices->latest_devid = latest_devid; | |
618 | fs_devices->latest_trans = latest_transid; | |
2b82032c | 619 | fs_devices->total_rw_bytes = 0; |
a0af469b | 620 | out: |
2b82032c YZ |
621 | return ret; |
622 | } | |
623 | ||
624 | int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |
97288f2c | 625 | fmode_t flags, void *holder) |
2b82032c YZ |
626 | { |
627 | int ret; | |
628 | ||
629 | mutex_lock(&uuid_mutex); | |
630 | if (fs_devices->opened) { | |
e4404d6e YZ |
631 | fs_devices->opened++; |
632 | ret = 0; | |
2b82032c | 633 | } else { |
15916de8 | 634 | ret = __btrfs_open_devices(fs_devices, flags, holder); |
2b82032c | 635 | } |
8a4b83cc | 636 | mutex_unlock(&uuid_mutex); |
8a4b83cc CM |
637 | return ret; |
638 | } | |
639 | ||
97288f2c | 640 | int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, |
8a4b83cc CM |
641 | struct btrfs_fs_devices **fs_devices_ret) |
642 | { | |
643 | struct btrfs_super_block *disk_super; | |
644 | struct block_device *bdev; | |
645 | struct buffer_head *bh; | |
646 | int ret; | |
647 | u64 devid; | |
f2984462 | 648 | u64 transid; |
8a4b83cc CM |
649 | |
650 | mutex_lock(&uuid_mutex); | |
651 | ||
d4d77629 TH |
652 | flags |= FMODE_EXCL; |
653 | bdev = blkdev_get_by_path(path, flags, holder); | |
8a4b83cc CM |
654 | |
655 | if (IS_ERR(bdev)) { | |
8a4b83cc CM |
656 | ret = PTR_ERR(bdev); |
657 | goto error; | |
658 | } | |
659 | ||
660 | ret = set_blocksize(bdev, 4096); | |
661 | if (ret) | |
662 | goto error_close; | |
a512bbf8 | 663 | bh = btrfs_read_dev_super(bdev); |
8a4b83cc | 664 | if (!bh) { |
20b45077 | 665 | ret = -EINVAL; |
8a4b83cc CM |
666 | goto error_close; |
667 | } | |
668 | disk_super = (struct btrfs_super_block *)bh->b_data; | |
a343832f | 669 | devid = btrfs_stack_device_id(&disk_super->dev_item); |
f2984462 | 670 | transid = btrfs_super_generation(disk_super); |
7ae9c09d | 671 | if (disk_super->label[0]) |
d397712b | 672 | printk(KERN_INFO "device label %s ", disk_super->label); |
7ae9c09d CM |
673 | else { |
674 | /* FIXME, make a readl uuid parser */ | |
d397712b | 675 | printk(KERN_INFO "device fsid %llx-%llx ", |
7ae9c09d CM |
676 | *(unsigned long long *)disk_super->fsid, |
677 | *(unsigned long long *)(disk_super->fsid + 8)); | |
678 | } | |
119e10cf | 679 | printk(KERN_CONT "devid %llu transid %llu %s\n", |
d397712b | 680 | (unsigned long long)devid, (unsigned long long)transid, path); |
8a4b83cc CM |
681 | ret = device_list_add(path, disk_super, devid, fs_devices_ret); |
682 | ||
8a4b83cc CM |
683 | brelse(bh); |
684 | error_close: | |
d4d77629 | 685 | blkdev_put(bdev, flags); |
8a4b83cc CM |
686 | error: |
687 | mutex_unlock(&uuid_mutex); | |
688 | return ret; | |
689 | } | |
0b86a832 | 690 | |
6d07bcec MX |
691 | /* helper to account the used device space in the range */ |
692 | int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, | |
693 | u64 end, u64 *length) | |
694 | { | |
695 | struct btrfs_key key; | |
696 | struct btrfs_root *root = device->dev_root; | |
697 | struct btrfs_dev_extent *dev_extent; | |
698 | struct btrfs_path *path; | |
699 | u64 extent_end; | |
700 | int ret; | |
701 | int slot; | |
702 | struct extent_buffer *l; | |
703 | ||
704 | *length = 0; | |
705 | ||
706 | if (start >= device->total_bytes) | |
707 | return 0; | |
708 | ||
709 | path = btrfs_alloc_path(); | |
710 | if (!path) | |
711 | return -ENOMEM; | |
712 | path->reada = 2; | |
713 | ||
714 | key.objectid = device->devid; | |
715 | key.offset = start; | |
716 | key.type = BTRFS_DEV_EXTENT_KEY; | |
717 | ||
718 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
719 | if (ret < 0) | |
720 | goto out; | |
721 | if (ret > 0) { | |
722 | ret = btrfs_previous_item(root, path, key.objectid, key.type); | |
723 | if (ret < 0) | |
724 | goto out; | |
725 | } | |
726 | ||
727 | while (1) { | |
728 | l = path->nodes[0]; | |
729 | slot = path->slots[0]; | |
730 | if (slot >= btrfs_header_nritems(l)) { | |
731 | ret = btrfs_next_leaf(root, path); | |
732 | if (ret == 0) | |
733 | continue; | |
734 | if (ret < 0) | |
735 | goto out; | |
736 | ||
737 | break; | |
738 | } | |
739 | btrfs_item_key_to_cpu(l, &key, slot); | |
740 | ||
741 | if (key.objectid < device->devid) | |
742 | goto next; | |
743 | ||
744 | if (key.objectid > device->devid) | |
745 | break; | |
746 | ||
747 | if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) | |
748 | goto next; | |
749 | ||
750 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | |
751 | extent_end = key.offset + btrfs_dev_extent_length(l, | |
752 | dev_extent); | |
753 | if (key.offset <= start && extent_end > end) { | |
754 | *length = end - start + 1; | |
755 | break; | |
756 | } else if (key.offset <= start && extent_end > start) | |
757 | *length += extent_end - start; | |
758 | else if (key.offset > start && extent_end <= end) | |
759 | *length += extent_end - key.offset; | |
760 | else if (key.offset > start && key.offset <= end) { | |
761 | *length += end - key.offset + 1; | |
762 | break; | |
763 | } else if (key.offset > end) | |
764 | break; | |
765 | ||
766 | next: | |
767 | path->slots[0]++; | |
768 | } | |
769 | ret = 0; | |
770 | out: | |
771 | btrfs_free_path(path); | |
772 | return ret; | |
773 | } | |
774 | ||
0b86a832 | 775 | /* |
7bfc837d MX |
776 | * find_free_dev_extent - find free space in the specified device |
777 | * @trans: transaction handler | |
778 | * @device: the device which we search the free space in | |
779 | * @num_bytes: the size of the free space that we need | |
780 | * @start: store the start of the free space. | |
781 | * @len: the size of the free space. that we find, or the size of the max | |
782 | * free space if we don't find suitable free space | |
783 | * | |
0b86a832 CM |
784 | * this uses a pretty simple search, the expectation is that it is |
785 | * called very infrequently and that a given device has a small number | |
786 | * of extents | |
7bfc837d MX |
787 | * |
788 | * @start is used to store the start of the free space if we find. But if we | |
789 | * don't find suitable free space, it will be used to store the start position | |
790 | * of the max free space. | |
791 | * | |
792 | * @len is used to store the size of the free space that we find. | |
793 | * But if we don't find suitable free space, it is used to store the size of | |
794 | * the max free space. | |
0b86a832 | 795 | */ |
ba1bf481 JB |
796 | int find_free_dev_extent(struct btrfs_trans_handle *trans, |
797 | struct btrfs_device *device, u64 num_bytes, | |
7bfc837d | 798 | u64 *start, u64 *len) |
0b86a832 CM |
799 | { |
800 | struct btrfs_key key; | |
801 | struct btrfs_root *root = device->dev_root; | |
7bfc837d | 802 | struct btrfs_dev_extent *dev_extent; |
2b82032c | 803 | struct btrfs_path *path; |
7bfc837d MX |
804 | u64 hole_size; |
805 | u64 max_hole_start; | |
806 | u64 max_hole_size; | |
807 | u64 extent_end; | |
808 | u64 search_start; | |
0b86a832 CM |
809 | u64 search_end = device->total_bytes; |
810 | int ret; | |
7bfc837d | 811 | int slot; |
0b86a832 CM |
812 | struct extent_buffer *l; |
813 | ||
0b86a832 CM |
814 | /* FIXME use last free of some kind */ |
815 | ||
8a4b83cc CM |
816 | /* we don't want to overwrite the superblock on the drive, |
817 | * so we make sure to start at an offset of at least 1MB | |
818 | */ | |
7bfc837d | 819 | search_start = 1024 * 1024; |
8f18cf13 | 820 | |
7bfc837d | 821 | if (root->fs_info->alloc_start + num_bytes <= search_end) |
8f18cf13 CM |
822 | search_start = max(root->fs_info->alloc_start, search_start); |
823 | ||
7bfc837d MX |
824 | max_hole_start = search_start; |
825 | max_hole_size = 0; | |
826 | ||
827 | if (search_start >= search_end) { | |
828 | ret = -ENOSPC; | |
829 | goto error; | |
830 | } | |
831 | ||
832 | path = btrfs_alloc_path(); | |
833 | if (!path) { | |
834 | ret = -ENOMEM; | |
835 | goto error; | |
836 | } | |
837 | path->reada = 2; | |
838 | ||
0b86a832 CM |
839 | key.objectid = device->devid; |
840 | key.offset = search_start; | |
841 | key.type = BTRFS_DEV_EXTENT_KEY; | |
7bfc837d | 842 | |
0b86a832 CM |
843 | ret = btrfs_search_slot(trans, root, &key, path, 0, 0); |
844 | if (ret < 0) | |
7bfc837d | 845 | goto out; |
1fcbac58 YZ |
846 | if (ret > 0) { |
847 | ret = btrfs_previous_item(root, path, key.objectid, key.type); | |
848 | if (ret < 0) | |
7bfc837d | 849 | goto out; |
1fcbac58 | 850 | } |
7bfc837d | 851 | |
0b86a832 CM |
852 | while (1) { |
853 | l = path->nodes[0]; | |
854 | slot = path->slots[0]; | |
855 | if (slot >= btrfs_header_nritems(l)) { | |
856 | ret = btrfs_next_leaf(root, path); | |
857 | if (ret == 0) | |
858 | continue; | |
859 | if (ret < 0) | |
7bfc837d MX |
860 | goto out; |
861 | ||
862 | break; | |
0b86a832 CM |
863 | } |
864 | btrfs_item_key_to_cpu(l, &key, slot); | |
865 | ||
866 | if (key.objectid < device->devid) | |
867 | goto next; | |
868 | ||
869 | if (key.objectid > device->devid) | |
7bfc837d | 870 | break; |
0b86a832 | 871 | |
7bfc837d MX |
872 | if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) |
873 | goto next; | |
9779b72f | 874 | |
7bfc837d MX |
875 | if (key.offset > search_start) { |
876 | hole_size = key.offset - search_start; | |
9779b72f | 877 | |
7bfc837d MX |
878 | if (hole_size > max_hole_size) { |
879 | max_hole_start = search_start; | |
880 | max_hole_size = hole_size; | |
881 | } | |
9779b72f | 882 | |
7bfc837d MX |
883 | /* |
884 | * If this free space is greater than which we need, | |
885 | * it must be the max free space that we have found | |
886 | * until now, so max_hole_start must point to the start | |
887 | * of this free space and the length of this free space | |
888 | * is stored in max_hole_size. Thus, we return | |
889 | * max_hole_start and max_hole_size and go back to the | |
890 | * caller. | |
891 | */ | |
892 | if (hole_size >= num_bytes) { | |
893 | ret = 0; | |
894 | goto out; | |
0b86a832 CM |
895 | } |
896 | } | |
0b86a832 | 897 | |
0b86a832 | 898 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); |
7bfc837d MX |
899 | extent_end = key.offset + btrfs_dev_extent_length(l, |
900 | dev_extent); | |
901 | if (extent_end > search_start) | |
902 | search_start = extent_end; | |
0b86a832 CM |
903 | next: |
904 | path->slots[0]++; | |
905 | cond_resched(); | |
906 | } | |
0b86a832 | 907 | |
7bfc837d MX |
908 | hole_size = search_end- search_start; |
909 | if (hole_size > max_hole_size) { | |
910 | max_hole_start = search_start; | |
911 | max_hole_size = hole_size; | |
0b86a832 | 912 | } |
0b86a832 | 913 | |
7bfc837d MX |
914 | /* See above. */ |
915 | if (hole_size < num_bytes) | |
916 | ret = -ENOSPC; | |
917 | else | |
918 | ret = 0; | |
919 | ||
920 | out: | |
2b82032c | 921 | btrfs_free_path(path); |
7bfc837d MX |
922 | error: |
923 | *start = max_hole_start; | |
b2117a39 | 924 | if (len) |
7bfc837d | 925 | *len = max_hole_size; |
0b86a832 CM |
926 | return ret; |
927 | } | |
928 | ||
b2950863 | 929 | static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, |
8f18cf13 CM |
930 | struct btrfs_device *device, |
931 | u64 start) | |
932 | { | |
933 | int ret; | |
934 | struct btrfs_path *path; | |
935 | struct btrfs_root *root = device->dev_root; | |
936 | struct btrfs_key key; | |
a061fc8d CM |
937 | struct btrfs_key found_key; |
938 | struct extent_buffer *leaf = NULL; | |
939 | struct btrfs_dev_extent *extent = NULL; | |
8f18cf13 CM |
940 | |
941 | path = btrfs_alloc_path(); | |
942 | if (!path) | |
943 | return -ENOMEM; | |
944 | ||
945 | key.objectid = device->devid; | |
946 | key.offset = start; | |
947 | key.type = BTRFS_DEV_EXTENT_KEY; | |
948 | ||
949 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
a061fc8d CM |
950 | if (ret > 0) { |
951 | ret = btrfs_previous_item(root, path, key.objectid, | |
952 | BTRFS_DEV_EXTENT_KEY); | |
953 | BUG_ON(ret); | |
954 | leaf = path->nodes[0]; | |
955 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
956 | extent = btrfs_item_ptr(leaf, path->slots[0], | |
957 | struct btrfs_dev_extent); | |
958 | BUG_ON(found_key.offset > start || found_key.offset + | |
959 | btrfs_dev_extent_length(leaf, extent) < start); | |
960 | ret = 0; | |
961 | } else if (ret == 0) { | |
962 | leaf = path->nodes[0]; | |
963 | extent = btrfs_item_ptr(leaf, path->slots[0], | |
964 | struct btrfs_dev_extent); | |
965 | } | |
8f18cf13 CM |
966 | BUG_ON(ret); |
967 | ||
dfe25020 CM |
968 | if (device->bytes_used > 0) |
969 | device->bytes_used -= btrfs_dev_extent_length(leaf, extent); | |
8f18cf13 CM |
970 | ret = btrfs_del_item(trans, root, path); |
971 | BUG_ON(ret); | |
972 | ||
973 | btrfs_free_path(path); | |
974 | return ret; | |
975 | } | |
976 | ||
2b82032c | 977 | int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, |
0b86a832 | 978 | struct btrfs_device *device, |
e17cade2 | 979 | u64 chunk_tree, u64 chunk_objectid, |
2b82032c | 980 | u64 chunk_offset, u64 start, u64 num_bytes) |
0b86a832 CM |
981 | { |
982 | int ret; | |
983 | struct btrfs_path *path; | |
984 | struct btrfs_root *root = device->dev_root; | |
985 | struct btrfs_dev_extent *extent; | |
986 | struct extent_buffer *leaf; | |
987 | struct btrfs_key key; | |
988 | ||
dfe25020 | 989 | WARN_ON(!device->in_fs_metadata); |
0b86a832 CM |
990 | path = btrfs_alloc_path(); |
991 | if (!path) | |
992 | return -ENOMEM; | |
993 | ||
0b86a832 | 994 | key.objectid = device->devid; |
2b82032c | 995 | key.offset = start; |
0b86a832 CM |
996 | key.type = BTRFS_DEV_EXTENT_KEY; |
997 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
998 | sizeof(*extent)); | |
999 | BUG_ON(ret); | |
1000 | ||
1001 | leaf = path->nodes[0]; | |
1002 | extent = btrfs_item_ptr(leaf, path->slots[0], | |
1003 | struct btrfs_dev_extent); | |
e17cade2 CM |
1004 | btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree); |
1005 | btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid); | |
1006 | btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); | |
1007 | ||
1008 | write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid, | |
1009 | (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent), | |
1010 | BTRFS_UUID_SIZE); | |
1011 | ||
0b86a832 CM |
1012 | btrfs_set_dev_extent_length(leaf, extent, num_bytes); |
1013 | btrfs_mark_buffer_dirty(leaf); | |
0b86a832 CM |
1014 | btrfs_free_path(path); |
1015 | return ret; | |
1016 | } | |
1017 | ||
a1b32a59 CM |
1018 | static noinline int find_next_chunk(struct btrfs_root *root, |
1019 | u64 objectid, u64 *offset) | |
0b86a832 CM |
1020 | { |
1021 | struct btrfs_path *path; | |
1022 | int ret; | |
1023 | struct btrfs_key key; | |
e17cade2 | 1024 | struct btrfs_chunk *chunk; |
0b86a832 CM |
1025 | struct btrfs_key found_key; |
1026 | ||
1027 | path = btrfs_alloc_path(); | |
1028 | BUG_ON(!path); | |
1029 | ||
e17cade2 | 1030 | key.objectid = objectid; |
0b86a832 CM |
1031 | key.offset = (u64)-1; |
1032 | key.type = BTRFS_CHUNK_ITEM_KEY; | |
1033 | ||
1034 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
1035 | if (ret < 0) | |
1036 | goto error; | |
1037 | ||
1038 | BUG_ON(ret == 0); | |
1039 | ||
1040 | ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); | |
1041 | if (ret) { | |
e17cade2 | 1042 | *offset = 0; |
0b86a832 CM |
1043 | } else { |
1044 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | |
1045 | path->slots[0]); | |
e17cade2 CM |
1046 | if (found_key.objectid != objectid) |
1047 | *offset = 0; | |
1048 | else { | |
1049 | chunk = btrfs_item_ptr(path->nodes[0], path->slots[0], | |
1050 | struct btrfs_chunk); | |
1051 | *offset = found_key.offset + | |
1052 | btrfs_chunk_length(path->nodes[0], chunk); | |
1053 | } | |
0b86a832 CM |
1054 | } |
1055 | ret = 0; | |
1056 | error: | |
1057 | btrfs_free_path(path); | |
1058 | return ret; | |
1059 | } | |
1060 | ||
2b82032c | 1061 | static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid) |
0b86a832 CM |
1062 | { |
1063 | int ret; | |
1064 | struct btrfs_key key; | |
1065 | struct btrfs_key found_key; | |
2b82032c YZ |
1066 | struct btrfs_path *path; |
1067 | ||
1068 | root = root->fs_info->chunk_root; | |
1069 | ||
1070 | path = btrfs_alloc_path(); | |
1071 | if (!path) | |
1072 | return -ENOMEM; | |
0b86a832 CM |
1073 | |
1074 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; | |
1075 | key.type = BTRFS_DEV_ITEM_KEY; | |
1076 | key.offset = (u64)-1; | |
1077 | ||
1078 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
1079 | if (ret < 0) | |
1080 | goto error; | |
1081 | ||
1082 | BUG_ON(ret == 0); | |
1083 | ||
1084 | ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, | |
1085 | BTRFS_DEV_ITEM_KEY); | |
1086 | if (ret) { | |
1087 | *objectid = 1; | |
1088 | } else { | |
1089 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | |
1090 | path->slots[0]); | |
1091 | *objectid = found_key.offset + 1; | |
1092 | } | |
1093 | ret = 0; | |
1094 | error: | |
2b82032c | 1095 | btrfs_free_path(path); |
0b86a832 CM |
1096 | return ret; |
1097 | } | |
1098 | ||
1099 | /* | |
1100 | * the device information is stored in the chunk root | |
1101 | * the btrfs_device struct should be fully filled in | |
1102 | */ | |
1103 | int btrfs_add_device(struct btrfs_trans_handle *trans, | |
1104 | struct btrfs_root *root, | |
1105 | struct btrfs_device *device) | |
1106 | { | |
1107 | int ret; | |
1108 | struct btrfs_path *path; | |
1109 | struct btrfs_dev_item *dev_item; | |
1110 | struct extent_buffer *leaf; | |
1111 | struct btrfs_key key; | |
1112 | unsigned long ptr; | |
0b86a832 CM |
1113 | |
1114 | root = root->fs_info->chunk_root; | |
1115 | ||
1116 | path = btrfs_alloc_path(); | |
1117 | if (!path) | |
1118 | return -ENOMEM; | |
1119 | ||
0b86a832 CM |
1120 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; |
1121 | key.type = BTRFS_DEV_ITEM_KEY; | |
2b82032c | 1122 | key.offset = device->devid; |
0b86a832 CM |
1123 | |
1124 | ret = btrfs_insert_empty_item(trans, root, path, &key, | |
0d81ba5d | 1125 | sizeof(*dev_item)); |
0b86a832 CM |
1126 | if (ret) |
1127 | goto out; | |
1128 | ||
1129 | leaf = path->nodes[0]; | |
1130 | dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); | |
1131 | ||
1132 | btrfs_set_device_id(leaf, dev_item, device->devid); | |
2b82032c | 1133 | btrfs_set_device_generation(leaf, dev_item, 0); |
0b86a832 CM |
1134 | btrfs_set_device_type(leaf, dev_item, device->type); |
1135 | btrfs_set_device_io_align(leaf, dev_item, device->io_align); | |
1136 | btrfs_set_device_io_width(leaf, dev_item, device->io_width); | |
1137 | btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); | |
0b86a832 CM |
1138 | btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); |
1139 | btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); | |
e17cade2 CM |
1140 | btrfs_set_device_group(leaf, dev_item, 0); |
1141 | btrfs_set_device_seek_speed(leaf, dev_item, 0); | |
1142 | btrfs_set_device_bandwidth(leaf, dev_item, 0); | |
c3027eb5 | 1143 | btrfs_set_device_start_offset(leaf, dev_item, 0); |
0b86a832 | 1144 | |
0b86a832 | 1145 | ptr = (unsigned long)btrfs_device_uuid(dev_item); |
e17cade2 | 1146 | write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); |
2b82032c YZ |
1147 | ptr = (unsigned long)btrfs_device_fsid(dev_item); |
1148 | write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE); | |
0b86a832 | 1149 | btrfs_mark_buffer_dirty(leaf); |
0b86a832 | 1150 | |
2b82032c | 1151 | ret = 0; |
0b86a832 CM |
1152 | out: |
1153 | btrfs_free_path(path); | |
1154 | return ret; | |
1155 | } | |
8f18cf13 | 1156 | |
a061fc8d CM |
1157 | static int btrfs_rm_dev_item(struct btrfs_root *root, |
1158 | struct btrfs_device *device) | |
1159 | { | |
1160 | int ret; | |
1161 | struct btrfs_path *path; | |
a061fc8d | 1162 | struct btrfs_key key; |
a061fc8d CM |
1163 | struct btrfs_trans_handle *trans; |
1164 | ||
1165 | root = root->fs_info->chunk_root; | |
1166 | ||
1167 | path = btrfs_alloc_path(); | |
1168 | if (!path) | |
1169 | return -ENOMEM; | |
1170 | ||
a22285a6 | 1171 | trans = btrfs_start_transaction(root, 0); |
98d5dc13 TI |
1172 | if (IS_ERR(trans)) { |
1173 | btrfs_free_path(path); | |
1174 | return PTR_ERR(trans); | |
1175 | } | |
a061fc8d CM |
1176 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; |
1177 | key.type = BTRFS_DEV_ITEM_KEY; | |
1178 | key.offset = device->devid; | |
7d9eb12c | 1179 | lock_chunks(root); |
a061fc8d CM |
1180 | |
1181 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1182 | if (ret < 0) | |
1183 | goto out; | |
1184 | ||
1185 | if (ret > 0) { | |
1186 | ret = -ENOENT; | |
1187 | goto out; | |
1188 | } | |
1189 | ||
1190 | ret = btrfs_del_item(trans, root, path); | |
1191 | if (ret) | |
1192 | goto out; | |
a061fc8d CM |
1193 | out: |
1194 | btrfs_free_path(path); | |
7d9eb12c | 1195 | unlock_chunks(root); |
a061fc8d CM |
1196 | btrfs_commit_transaction(trans, root); |
1197 | return ret; | |
1198 | } | |
1199 | ||
1200 | int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |
1201 | { | |
1202 | struct btrfs_device *device; | |
2b82032c | 1203 | struct btrfs_device *next_device; |
a061fc8d | 1204 | struct block_device *bdev; |
dfe25020 | 1205 | struct buffer_head *bh = NULL; |
a061fc8d CM |
1206 | struct btrfs_super_block *disk_super; |
1207 | u64 all_avail; | |
1208 | u64 devid; | |
2b82032c YZ |
1209 | u64 num_devices; |
1210 | u8 *dev_uuid; | |
a061fc8d CM |
1211 | int ret = 0; |
1212 | ||
a061fc8d | 1213 | mutex_lock(&uuid_mutex); |
7d9eb12c | 1214 | mutex_lock(&root->fs_info->volume_mutex); |
a061fc8d CM |
1215 | |
1216 | all_avail = root->fs_info->avail_data_alloc_bits | | |
1217 | root->fs_info->avail_system_alloc_bits | | |
1218 | root->fs_info->avail_metadata_alloc_bits; | |
1219 | ||
1220 | if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && | |
035fe03a | 1221 | root->fs_info->fs_devices->num_devices <= 4) { |
d397712b CM |
1222 | printk(KERN_ERR "btrfs: unable to go below four devices " |
1223 | "on raid10\n"); | |
a061fc8d CM |
1224 | ret = -EINVAL; |
1225 | goto out; | |
1226 | } | |
1227 | ||
1228 | if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && | |
035fe03a | 1229 | root->fs_info->fs_devices->num_devices <= 2) { |
d397712b CM |
1230 | printk(KERN_ERR "btrfs: unable to go below two " |
1231 | "devices on raid1\n"); | |
a061fc8d CM |
1232 | ret = -EINVAL; |
1233 | goto out; | |
1234 | } | |
1235 | ||
dfe25020 | 1236 | if (strcmp(device_path, "missing") == 0) { |
dfe25020 CM |
1237 | struct list_head *devices; |
1238 | struct btrfs_device *tmp; | |
a061fc8d | 1239 | |
dfe25020 CM |
1240 | device = NULL; |
1241 | devices = &root->fs_info->fs_devices->devices; | |
e5e9a520 | 1242 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
c6e30871 | 1243 | list_for_each_entry(tmp, devices, dev_list) { |
dfe25020 CM |
1244 | if (tmp->in_fs_metadata && !tmp->bdev) { |
1245 | device = tmp; | |
1246 | break; | |
1247 | } | |
1248 | } | |
e5e9a520 | 1249 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
dfe25020 CM |
1250 | bdev = NULL; |
1251 | bh = NULL; | |
1252 | disk_super = NULL; | |
1253 | if (!device) { | |
d397712b CM |
1254 | printk(KERN_ERR "btrfs: no missing devices found to " |
1255 | "remove\n"); | |
dfe25020 CM |
1256 | goto out; |
1257 | } | |
dfe25020 | 1258 | } else { |
d4d77629 TH |
1259 | bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, |
1260 | root->fs_info->bdev_holder); | |
dfe25020 CM |
1261 | if (IS_ERR(bdev)) { |
1262 | ret = PTR_ERR(bdev); | |
1263 | goto out; | |
1264 | } | |
a061fc8d | 1265 | |
2b82032c | 1266 | set_blocksize(bdev, 4096); |
a512bbf8 | 1267 | bh = btrfs_read_dev_super(bdev); |
dfe25020 | 1268 | if (!bh) { |
20b45077 | 1269 | ret = -EINVAL; |
dfe25020 CM |
1270 | goto error_close; |
1271 | } | |
1272 | disk_super = (struct btrfs_super_block *)bh->b_data; | |
a343832f | 1273 | devid = btrfs_stack_device_id(&disk_super->dev_item); |
2b82032c YZ |
1274 | dev_uuid = disk_super->dev_item.uuid; |
1275 | device = btrfs_find_device(root, devid, dev_uuid, | |
1276 | disk_super->fsid); | |
dfe25020 CM |
1277 | if (!device) { |
1278 | ret = -ENOENT; | |
1279 | goto error_brelse; | |
1280 | } | |
2b82032c | 1281 | } |
dfe25020 | 1282 | |
2b82032c | 1283 | if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { |
d397712b CM |
1284 | printk(KERN_ERR "btrfs: unable to remove the only writeable " |
1285 | "device\n"); | |
2b82032c YZ |
1286 | ret = -EINVAL; |
1287 | goto error_brelse; | |
1288 | } | |
1289 | ||
1290 | if (device->writeable) { | |
1291 | list_del_init(&device->dev_alloc_list); | |
1292 | root->fs_info->fs_devices->rw_devices--; | |
dfe25020 | 1293 | } |
a061fc8d CM |
1294 | |
1295 | ret = btrfs_shrink_device(device, 0); | |
1296 | if (ret) | |
9b3517e9 | 1297 | goto error_undo; |
a061fc8d | 1298 | |
a061fc8d CM |
1299 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); |
1300 | if (ret) | |
9b3517e9 | 1301 | goto error_undo; |
a061fc8d | 1302 | |
2b82032c | 1303 | device->in_fs_metadata = 0; |
e5e9a520 CM |
1304 | |
1305 | /* | |
1306 | * the device list mutex makes sure that we don't change | |
1307 | * the device list while someone else is writing out all | |
1308 | * the device supers. | |
1309 | */ | |
1310 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | |
e4404d6e | 1311 | list_del_init(&device->dev_list); |
e5e9a520 CM |
1312 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
1313 | ||
e4404d6e | 1314 | device->fs_devices->num_devices--; |
2b82032c | 1315 | |
cd02dca5 CM |
1316 | if (device->missing) |
1317 | root->fs_info->fs_devices->missing_devices--; | |
1318 | ||
2b82032c YZ |
1319 | next_device = list_entry(root->fs_info->fs_devices->devices.next, |
1320 | struct btrfs_device, dev_list); | |
1321 | if (device->bdev == root->fs_info->sb->s_bdev) | |
1322 | root->fs_info->sb->s_bdev = next_device->bdev; | |
1323 | if (device->bdev == root->fs_info->fs_devices->latest_bdev) | |
1324 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; | |
1325 | ||
e4404d6e | 1326 | if (device->bdev) { |
d4d77629 | 1327 | blkdev_put(device->bdev, device->mode); |
e4404d6e YZ |
1328 | device->bdev = NULL; |
1329 | device->fs_devices->open_devices--; | |
1330 | } | |
1331 | ||
2b82032c YZ |
1332 | num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; |
1333 | btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); | |
1334 | ||
e4404d6e YZ |
1335 | if (device->fs_devices->open_devices == 0) { |
1336 | struct btrfs_fs_devices *fs_devices; | |
1337 | fs_devices = root->fs_info->fs_devices; | |
1338 | while (fs_devices) { | |
1339 | if (fs_devices->seed == device->fs_devices) | |
1340 | break; | |
1341 | fs_devices = fs_devices->seed; | |
2b82032c | 1342 | } |
e4404d6e YZ |
1343 | fs_devices->seed = device->fs_devices->seed; |
1344 | device->fs_devices->seed = NULL; | |
1345 | __btrfs_close_devices(device->fs_devices); | |
1346 | free_fs_devices(device->fs_devices); | |
2b82032c YZ |
1347 | } |
1348 | ||
1349 | /* | |
1350 | * at this point, the device is zero sized. We want to | |
1351 | * remove it from the devices list and zero out the old super | |
1352 | */ | |
1353 | if (device->writeable) { | |
dfe25020 CM |
1354 | /* make sure this device isn't detected as part of |
1355 | * the FS anymore | |
1356 | */ | |
1357 | memset(&disk_super->magic, 0, sizeof(disk_super->magic)); | |
1358 | set_buffer_dirty(bh); | |
1359 | sync_dirty_buffer(bh); | |
dfe25020 | 1360 | } |
a061fc8d CM |
1361 | |
1362 | kfree(device->name); | |
1363 | kfree(device); | |
1364 | ret = 0; | |
a061fc8d CM |
1365 | |
1366 | error_brelse: | |
1367 | brelse(bh); | |
1368 | error_close: | |
dfe25020 | 1369 | if (bdev) |
e525fd89 | 1370 | blkdev_put(bdev, FMODE_READ | FMODE_EXCL); |
a061fc8d | 1371 | out: |
7d9eb12c | 1372 | mutex_unlock(&root->fs_info->volume_mutex); |
a061fc8d | 1373 | mutex_unlock(&uuid_mutex); |
a061fc8d | 1374 | return ret; |
9b3517e9 ID |
1375 | error_undo: |
1376 | if (device->writeable) { | |
1377 | list_add(&device->dev_alloc_list, | |
1378 | &root->fs_info->fs_devices->alloc_list); | |
1379 | root->fs_info->fs_devices->rw_devices++; | |
1380 | } | |
1381 | goto error_brelse; | |
a061fc8d CM |
1382 | } |
1383 | ||
2b82032c YZ |
1384 | /* |
1385 | * does all the dirty work required for changing file system's UUID. | |
1386 | */ | |
1387 | static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, | |
1388 | struct btrfs_root *root) | |
1389 | { | |
1390 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | |
1391 | struct btrfs_fs_devices *old_devices; | |
e4404d6e | 1392 | struct btrfs_fs_devices *seed_devices; |
2b82032c YZ |
1393 | struct btrfs_super_block *disk_super = &root->fs_info->super_copy; |
1394 | struct btrfs_device *device; | |
1395 | u64 super_flags; | |
1396 | ||
1397 | BUG_ON(!mutex_is_locked(&uuid_mutex)); | |
e4404d6e | 1398 | if (!fs_devices->seeding) |
2b82032c YZ |
1399 | return -EINVAL; |
1400 | ||
e4404d6e YZ |
1401 | seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS); |
1402 | if (!seed_devices) | |
2b82032c YZ |
1403 | return -ENOMEM; |
1404 | ||
e4404d6e YZ |
1405 | old_devices = clone_fs_devices(fs_devices); |
1406 | if (IS_ERR(old_devices)) { | |
1407 | kfree(seed_devices); | |
1408 | return PTR_ERR(old_devices); | |
2b82032c | 1409 | } |
e4404d6e | 1410 | |
2b82032c YZ |
1411 | list_add(&old_devices->list, &fs_uuids); |
1412 | ||
e4404d6e YZ |
1413 | memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); |
1414 | seed_devices->opened = 1; | |
1415 | INIT_LIST_HEAD(&seed_devices->devices); | |
1416 | INIT_LIST_HEAD(&seed_devices->alloc_list); | |
e5e9a520 | 1417 | mutex_init(&seed_devices->device_list_mutex); |
e4404d6e YZ |
1418 | list_splice_init(&fs_devices->devices, &seed_devices->devices); |
1419 | list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); | |
1420 | list_for_each_entry(device, &seed_devices->devices, dev_list) { | |
1421 | device->fs_devices = seed_devices; | |
1422 | } | |
1423 | ||
2b82032c YZ |
1424 | fs_devices->seeding = 0; |
1425 | fs_devices->num_devices = 0; | |
1426 | fs_devices->open_devices = 0; | |
e4404d6e | 1427 | fs_devices->seed = seed_devices; |
2b82032c YZ |
1428 | |
1429 | generate_random_uuid(fs_devices->fsid); | |
1430 | memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); | |
1431 | memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); | |
1432 | super_flags = btrfs_super_flags(disk_super) & | |
1433 | ~BTRFS_SUPER_FLAG_SEEDING; | |
1434 | btrfs_set_super_flags(disk_super, super_flags); | |
1435 | ||
1436 | return 0; | |
1437 | } | |
1438 | ||
1439 | /* | |
1440 | * strore the expected generation for seed devices in device items. | |
1441 | */ | |
1442 | static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, | |
1443 | struct btrfs_root *root) | |
1444 | { | |
1445 | struct btrfs_path *path; | |
1446 | struct extent_buffer *leaf; | |
1447 | struct btrfs_dev_item *dev_item; | |
1448 | struct btrfs_device *device; | |
1449 | struct btrfs_key key; | |
1450 | u8 fs_uuid[BTRFS_UUID_SIZE]; | |
1451 | u8 dev_uuid[BTRFS_UUID_SIZE]; | |
1452 | u64 devid; | |
1453 | int ret; | |
1454 | ||
1455 | path = btrfs_alloc_path(); | |
1456 | if (!path) | |
1457 | return -ENOMEM; | |
1458 | ||
1459 | root = root->fs_info->chunk_root; | |
1460 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; | |
1461 | key.offset = 0; | |
1462 | key.type = BTRFS_DEV_ITEM_KEY; | |
1463 | ||
1464 | while (1) { | |
1465 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | |
1466 | if (ret < 0) | |
1467 | goto error; | |
1468 | ||
1469 | leaf = path->nodes[0]; | |
1470 | next_slot: | |
1471 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | |
1472 | ret = btrfs_next_leaf(root, path); | |
1473 | if (ret > 0) | |
1474 | break; | |
1475 | if (ret < 0) | |
1476 | goto error; | |
1477 | leaf = path->nodes[0]; | |
1478 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
1479 | btrfs_release_path(root, path); | |
1480 | continue; | |
1481 | } | |
1482 | ||
1483 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
1484 | if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || | |
1485 | key.type != BTRFS_DEV_ITEM_KEY) | |
1486 | break; | |
1487 | ||
1488 | dev_item = btrfs_item_ptr(leaf, path->slots[0], | |
1489 | struct btrfs_dev_item); | |
1490 | devid = btrfs_device_id(leaf, dev_item); | |
1491 | read_extent_buffer(leaf, dev_uuid, | |
1492 | (unsigned long)btrfs_device_uuid(dev_item), | |
1493 | BTRFS_UUID_SIZE); | |
1494 | read_extent_buffer(leaf, fs_uuid, | |
1495 | (unsigned long)btrfs_device_fsid(dev_item), | |
1496 | BTRFS_UUID_SIZE); | |
1497 | device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); | |
1498 | BUG_ON(!device); | |
1499 | ||
1500 | if (device->fs_devices->seeding) { | |
1501 | btrfs_set_device_generation(leaf, dev_item, | |
1502 | device->generation); | |
1503 | btrfs_mark_buffer_dirty(leaf); | |
1504 | } | |
1505 | ||
1506 | path->slots[0]++; | |
1507 | goto next_slot; | |
1508 | } | |
1509 | ret = 0; | |
1510 | error: | |
1511 | btrfs_free_path(path); | |
1512 | return ret; | |
1513 | } | |
1514 | ||
788f20eb CM |
1515 | int btrfs_init_new_device(struct btrfs_root *root, char *device_path) |
1516 | { | |
1517 | struct btrfs_trans_handle *trans; | |
1518 | struct btrfs_device *device; | |
1519 | struct block_device *bdev; | |
788f20eb | 1520 | struct list_head *devices; |
2b82032c | 1521 | struct super_block *sb = root->fs_info->sb; |
788f20eb | 1522 | u64 total_bytes; |
2b82032c | 1523 | int seeding_dev = 0; |
788f20eb CM |
1524 | int ret = 0; |
1525 | ||
2b82032c YZ |
1526 | if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) |
1527 | return -EINVAL; | |
788f20eb | 1528 | |
d4d77629 TH |
1529 | bdev = blkdev_get_by_path(device_path, FMODE_EXCL, |
1530 | root->fs_info->bdev_holder); | |
7f59203a JB |
1531 | if (IS_ERR(bdev)) |
1532 | return PTR_ERR(bdev); | |
a2135011 | 1533 | |
2b82032c YZ |
1534 | if (root->fs_info->fs_devices->seeding) { |
1535 | seeding_dev = 1; | |
1536 | down_write(&sb->s_umount); | |
1537 | mutex_lock(&uuid_mutex); | |
1538 | } | |
1539 | ||
8c8bee1d | 1540 | filemap_write_and_wait(bdev->bd_inode->i_mapping); |
7d9eb12c | 1541 | mutex_lock(&root->fs_info->volume_mutex); |
a2135011 | 1542 | |
788f20eb | 1543 | devices = &root->fs_info->fs_devices->devices; |
e5e9a520 CM |
1544 | /* |
1545 | * we have the volume lock, so we don't need the extra | |
1546 | * device list mutex while reading the list here. | |
1547 | */ | |
c6e30871 | 1548 | list_for_each_entry(device, devices, dev_list) { |
788f20eb CM |
1549 | if (device->bdev == bdev) { |
1550 | ret = -EEXIST; | |
2b82032c | 1551 | goto error; |
788f20eb CM |
1552 | } |
1553 | } | |
1554 | ||
1555 | device = kzalloc(sizeof(*device), GFP_NOFS); | |
1556 | if (!device) { | |
1557 | /* we can safely leave the fs_devices entry around */ | |
1558 | ret = -ENOMEM; | |
2b82032c | 1559 | goto error; |
788f20eb CM |
1560 | } |
1561 | ||
788f20eb CM |
1562 | device->name = kstrdup(device_path, GFP_NOFS); |
1563 | if (!device->name) { | |
1564 | kfree(device); | |
2b82032c YZ |
1565 | ret = -ENOMEM; |
1566 | goto error; | |
788f20eb | 1567 | } |
2b82032c YZ |
1568 | |
1569 | ret = find_next_devid(root, &device->devid); | |
1570 | if (ret) { | |
67100f25 | 1571 | kfree(device->name); |
2b82032c YZ |
1572 | kfree(device); |
1573 | goto error; | |
1574 | } | |
1575 | ||
a22285a6 | 1576 | trans = btrfs_start_transaction(root, 0); |
98d5dc13 | 1577 | if (IS_ERR(trans)) { |
67100f25 | 1578 | kfree(device->name); |
98d5dc13 TI |
1579 | kfree(device); |
1580 | ret = PTR_ERR(trans); | |
1581 | goto error; | |
1582 | } | |
1583 | ||
2b82032c YZ |
1584 | lock_chunks(root); |
1585 | ||
2b82032c YZ |
1586 | device->writeable = 1; |
1587 | device->work.func = pending_bios_fn; | |
1588 | generate_random_uuid(device->uuid); | |
1589 | spin_lock_init(&device->io_lock); | |
1590 | device->generation = trans->transid; | |
788f20eb CM |
1591 | device->io_width = root->sectorsize; |
1592 | device->io_align = root->sectorsize; | |
1593 | device->sector_size = root->sectorsize; | |
1594 | device->total_bytes = i_size_read(bdev->bd_inode); | |
2cc3c559 | 1595 | device->disk_total_bytes = device->total_bytes; |
788f20eb CM |
1596 | device->dev_root = root->fs_info->dev_root; |
1597 | device->bdev = bdev; | |
dfe25020 | 1598 | device->in_fs_metadata = 1; |
fb01aa85 | 1599 | device->mode = FMODE_EXCL; |
2b82032c | 1600 | set_blocksize(device->bdev, 4096); |
788f20eb | 1601 | |
2b82032c YZ |
1602 | if (seeding_dev) { |
1603 | sb->s_flags &= ~MS_RDONLY; | |
1604 | ret = btrfs_prepare_sprout(trans, root); | |
1605 | BUG_ON(ret); | |
1606 | } | |
788f20eb | 1607 | |
2b82032c | 1608 | device->fs_devices = root->fs_info->fs_devices; |
e5e9a520 CM |
1609 | |
1610 | /* | |
1611 | * we don't want write_supers to jump in here with our device | |
1612 | * half setup | |
1613 | */ | |
1614 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | |
2b82032c YZ |
1615 | list_add(&device->dev_list, &root->fs_info->fs_devices->devices); |
1616 | list_add(&device->dev_alloc_list, | |
1617 | &root->fs_info->fs_devices->alloc_list); | |
1618 | root->fs_info->fs_devices->num_devices++; | |
1619 | root->fs_info->fs_devices->open_devices++; | |
1620 | root->fs_info->fs_devices->rw_devices++; | |
1621 | root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; | |
325cd4ba | 1622 | |
c289811c CM |
1623 | if (!blk_queue_nonrot(bdev_get_queue(bdev))) |
1624 | root->fs_info->fs_devices->rotating = 1; | |
1625 | ||
788f20eb CM |
1626 | total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); |
1627 | btrfs_set_super_total_bytes(&root->fs_info->super_copy, | |
1628 | total_bytes + device->total_bytes); | |
1629 | ||
1630 | total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy); | |
1631 | btrfs_set_super_num_devices(&root->fs_info->super_copy, | |
1632 | total_bytes + 1); | |
e5e9a520 | 1633 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); |
788f20eb | 1634 | |
2b82032c YZ |
1635 | if (seeding_dev) { |
1636 | ret = init_first_rw_device(trans, root, device); | |
1637 | BUG_ON(ret); | |
1638 | ret = btrfs_finish_sprout(trans, root); | |
1639 | BUG_ON(ret); | |
1640 | } else { | |
1641 | ret = btrfs_add_device(trans, root, device); | |
1642 | } | |
1643 | ||
913d952e CM |
1644 | /* |
1645 | * we've got more storage, clear any full flags on the space | |
1646 | * infos | |
1647 | */ | |
1648 | btrfs_clear_space_info_full(root->fs_info); | |
1649 | ||
7d9eb12c | 1650 | unlock_chunks(root); |
2b82032c | 1651 | btrfs_commit_transaction(trans, root); |
a2135011 | 1652 | |
2b82032c YZ |
1653 | if (seeding_dev) { |
1654 | mutex_unlock(&uuid_mutex); | |
1655 | up_write(&sb->s_umount); | |
788f20eb | 1656 | |
2b82032c YZ |
1657 | ret = btrfs_relocate_sys_chunks(root); |
1658 | BUG_ON(ret); | |
1659 | } | |
1660 | out: | |
1661 | mutex_unlock(&root->fs_info->volume_mutex); | |
1662 | return ret; | |
1663 | error: | |
e525fd89 | 1664 | blkdev_put(bdev, FMODE_EXCL); |
2b82032c YZ |
1665 | if (seeding_dev) { |
1666 | mutex_unlock(&uuid_mutex); | |
1667 | up_write(&sb->s_umount); | |
1668 | } | |
788f20eb CM |
1669 | goto out; |
1670 | } | |
1671 | ||
d397712b CM |
1672 | static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, |
1673 | struct btrfs_device *device) | |
0b86a832 CM |
1674 | { |
1675 | int ret; | |
1676 | struct btrfs_path *path; | |
1677 | struct btrfs_root *root; | |
1678 | struct btrfs_dev_item *dev_item; | |
1679 | struct extent_buffer *leaf; | |
1680 | struct btrfs_key key; | |
1681 | ||
1682 | root = device->dev_root->fs_info->chunk_root; | |
1683 | ||
1684 | path = btrfs_alloc_path(); | |
1685 | if (!path) | |
1686 | return -ENOMEM; | |
1687 | ||
1688 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; | |
1689 | key.type = BTRFS_DEV_ITEM_KEY; | |
1690 | key.offset = device->devid; | |
1691 | ||
1692 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | |
1693 | if (ret < 0) | |
1694 | goto out; | |
1695 | ||
1696 | if (ret > 0) { | |
1697 | ret = -ENOENT; | |
1698 | goto out; | |
1699 | } | |
1700 | ||
1701 | leaf = path->nodes[0]; | |
1702 | dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); | |
1703 | ||
1704 | btrfs_set_device_id(leaf, dev_item, device->devid); | |
1705 | btrfs_set_device_type(leaf, dev_item, device->type); | |
1706 | btrfs_set_device_io_align(leaf, dev_item, device->io_align); | |
1707 | btrfs_set_device_io_width(leaf, dev_item, device->io_width); | |
1708 | btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); | |
d6397bae | 1709 | btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes); |
0b86a832 CM |
1710 | btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); |
1711 | btrfs_mark_buffer_dirty(leaf); | |
1712 | ||
1713 | out: | |
1714 | btrfs_free_path(path); | |
1715 | return ret; | |
1716 | } | |
1717 | ||
7d9eb12c | 1718 | static int __btrfs_grow_device(struct btrfs_trans_handle *trans, |
8f18cf13 CM |
1719 | struct btrfs_device *device, u64 new_size) |
1720 | { | |
1721 | struct btrfs_super_block *super_copy = | |
1722 | &device->dev_root->fs_info->super_copy; | |
1723 | u64 old_total = btrfs_super_total_bytes(super_copy); | |
1724 | u64 diff = new_size - device->total_bytes; | |
1725 | ||
2b82032c YZ |
1726 | if (!device->writeable) |
1727 | return -EACCES; | |
1728 | if (new_size <= device->total_bytes) | |
1729 | return -EINVAL; | |
1730 | ||
8f18cf13 | 1731 | btrfs_set_super_total_bytes(super_copy, old_total + diff); |
2b82032c YZ |
1732 | device->fs_devices->total_rw_bytes += diff; |
1733 | ||
1734 | device->total_bytes = new_size; | |
9779b72f | 1735 | device->disk_total_bytes = new_size; |
4184ea7f CM |
1736 | btrfs_clear_space_info_full(device->dev_root->fs_info); |
1737 | ||
8f18cf13 CM |
1738 | return btrfs_update_device(trans, device); |
1739 | } | |
1740 | ||
7d9eb12c CM |
1741 | int btrfs_grow_device(struct btrfs_trans_handle *trans, |
1742 | struct btrfs_device *device, u64 new_size) | |
1743 | { | |
1744 | int ret; | |
1745 | lock_chunks(device->dev_root); | |
1746 | ret = __btrfs_grow_device(trans, device, new_size); | |
1747 | unlock_chunks(device->dev_root); | |
1748 | return ret; | |
1749 | } | |
1750 | ||
8f18cf13 CM |
1751 | static int btrfs_free_chunk(struct btrfs_trans_handle *trans, |
1752 | struct btrfs_root *root, | |
1753 | u64 chunk_tree, u64 chunk_objectid, | |
1754 | u64 chunk_offset) | |
1755 | { | |
1756 | int ret; | |
1757 | struct btrfs_path *path; | |
1758 | struct btrfs_key key; | |
1759 | ||
1760 | root = root->fs_info->chunk_root; | |
1761 | path = btrfs_alloc_path(); | |
1762 | if (!path) | |
1763 | return -ENOMEM; | |
1764 | ||
1765 | key.objectid = chunk_objectid; | |
1766 | key.offset = chunk_offset; | |
1767 | key.type = BTRFS_CHUNK_ITEM_KEY; | |
1768 | ||
1769 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1770 | BUG_ON(ret); | |
1771 | ||
1772 | ret = btrfs_del_item(trans, root, path); | |
1773 | BUG_ON(ret); | |
1774 | ||
1775 | btrfs_free_path(path); | |
1776 | return 0; | |
1777 | } | |
1778 | ||
b2950863 | 1779 | static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 |
8f18cf13 CM |
1780 | chunk_offset) |
1781 | { | |
1782 | struct btrfs_super_block *super_copy = &root->fs_info->super_copy; | |
1783 | struct btrfs_disk_key *disk_key; | |
1784 | struct btrfs_chunk *chunk; | |
1785 | u8 *ptr; | |
1786 | int ret = 0; | |
1787 | u32 num_stripes; | |
1788 | u32 array_size; | |
1789 | u32 len = 0; | |
1790 | u32 cur; | |
1791 | struct btrfs_key key; | |
1792 | ||
1793 | array_size = btrfs_super_sys_array_size(super_copy); | |
1794 | ||
1795 | ptr = super_copy->sys_chunk_array; | |
1796 | cur = 0; | |
1797 | ||
1798 | while (cur < array_size) { | |
1799 | disk_key = (struct btrfs_disk_key *)ptr; | |
1800 | btrfs_disk_key_to_cpu(&key, disk_key); | |
1801 | ||
1802 | len = sizeof(*disk_key); | |
1803 | ||
1804 | if (key.type == BTRFS_CHUNK_ITEM_KEY) { | |
1805 | chunk = (struct btrfs_chunk *)(ptr + len); | |
1806 | num_stripes = btrfs_stack_chunk_num_stripes(chunk); | |
1807 | len += btrfs_chunk_item_size(num_stripes); | |
1808 | } else { | |
1809 | ret = -EIO; | |
1810 | break; | |
1811 | } | |
1812 | if (key.objectid == chunk_objectid && | |
1813 | key.offset == chunk_offset) { | |
1814 | memmove(ptr, ptr + len, array_size - (cur + len)); | |
1815 | array_size -= len; | |
1816 | btrfs_set_super_sys_array_size(super_copy, array_size); | |
1817 | } else { | |
1818 | ptr += len; | |
1819 | cur += len; | |
1820 | } | |
1821 | } | |
1822 | return ret; | |
1823 | } | |
1824 | ||
b2950863 | 1825 | static int btrfs_relocate_chunk(struct btrfs_root *root, |
8f18cf13 CM |
1826 | u64 chunk_tree, u64 chunk_objectid, |
1827 | u64 chunk_offset) | |
1828 | { | |
1829 | struct extent_map_tree *em_tree; | |
1830 | struct btrfs_root *extent_root; | |
1831 | struct btrfs_trans_handle *trans; | |
1832 | struct extent_map *em; | |
1833 | struct map_lookup *map; | |
1834 | int ret; | |
1835 | int i; | |
1836 | ||
1837 | root = root->fs_info->chunk_root; | |
1838 | extent_root = root->fs_info->extent_root; | |
1839 | em_tree = &root->fs_info->mapping_tree.map_tree; | |
1840 | ||
ba1bf481 JB |
1841 | ret = btrfs_can_relocate(extent_root, chunk_offset); |
1842 | if (ret) | |
1843 | return -ENOSPC; | |
1844 | ||
8f18cf13 | 1845 | /* step one, relocate all the extents inside this chunk */ |
1a40e23b | 1846 | ret = btrfs_relocate_block_group(extent_root, chunk_offset); |
a22285a6 YZ |
1847 | if (ret) |
1848 | return ret; | |
8f18cf13 | 1849 | |
a22285a6 | 1850 | trans = btrfs_start_transaction(root, 0); |
98d5dc13 | 1851 | BUG_ON(IS_ERR(trans)); |
8f18cf13 | 1852 | |
7d9eb12c CM |
1853 | lock_chunks(root); |
1854 | ||
8f18cf13 CM |
1855 | /* |
1856 | * step two, delete the device extents and the | |
1857 | * chunk tree entries | |
1858 | */ | |
890871be | 1859 | read_lock(&em_tree->lock); |
8f18cf13 | 1860 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); |
890871be | 1861 | read_unlock(&em_tree->lock); |
8f18cf13 | 1862 | |
a061fc8d CM |
1863 | BUG_ON(em->start > chunk_offset || |
1864 | em->start + em->len < chunk_offset); | |
8f18cf13 CM |
1865 | map = (struct map_lookup *)em->bdev; |
1866 | ||
1867 | for (i = 0; i < map->num_stripes; i++) { | |
1868 | ret = btrfs_free_dev_extent(trans, map->stripes[i].dev, | |
1869 | map->stripes[i].physical); | |
1870 | BUG_ON(ret); | |
a061fc8d | 1871 | |
dfe25020 CM |
1872 | if (map->stripes[i].dev) { |
1873 | ret = btrfs_update_device(trans, map->stripes[i].dev); | |
1874 | BUG_ON(ret); | |
1875 | } | |
8f18cf13 CM |
1876 | } |
1877 | ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid, | |
1878 | chunk_offset); | |
1879 | ||
1880 | BUG_ON(ret); | |
1881 | ||
1882 | if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { | |
1883 | ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset); | |
1884 | BUG_ON(ret); | |
8f18cf13 CM |
1885 | } |
1886 | ||
2b82032c YZ |
1887 | ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); |
1888 | BUG_ON(ret); | |
1889 | ||
890871be | 1890 | write_lock(&em_tree->lock); |
2b82032c | 1891 | remove_extent_mapping(em_tree, em); |
890871be | 1892 | write_unlock(&em_tree->lock); |
2b82032c YZ |
1893 | |
1894 | kfree(map); | |
1895 | em->bdev = NULL; | |
1896 | ||
1897 | /* once for the tree */ | |
1898 | free_extent_map(em); | |
1899 | /* once for us */ | |
1900 | free_extent_map(em); | |
1901 | ||
1902 | unlock_chunks(root); | |
1903 | btrfs_end_transaction(trans, root); | |
1904 | return 0; | |
1905 | } | |
1906 | ||
1907 | static int btrfs_relocate_sys_chunks(struct btrfs_root *root) | |
1908 | { | |
1909 | struct btrfs_root *chunk_root = root->fs_info->chunk_root; | |
1910 | struct btrfs_path *path; | |
1911 | struct extent_buffer *leaf; | |
1912 | struct btrfs_chunk *chunk; | |
1913 | struct btrfs_key key; | |
1914 | struct btrfs_key found_key; | |
1915 | u64 chunk_tree = chunk_root->root_key.objectid; | |
1916 | u64 chunk_type; | |
ba1bf481 JB |
1917 | bool retried = false; |
1918 | int failed = 0; | |
2b82032c YZ |
1919 | int ret; |
1920 | ||
1921 | path = btrfs_alloc_path(); | |
1922 | if (!path) | |
1923 | return -ENOMEM; | |
1924 | ||
ba1bf481 | 1925 | again: |
2b82032c YZ |
1926 | key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; |
1927 | key.offset = (u64)-1; | |
1928 | key.type = BTRFS_CHUNK_ITEM_KEY; | |
1929 | ||
1930 | while (1) { | |
1931 | ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); | |
1932 | if (ret < 0) | |
1933 | goto error; | |
1934 | BUG_ON(ret == 0); | |
1935 | ||
1936 | ret = btrfs_previous_item(chunk_root, path, key.objectid, | |
1937 | key.type); | |
1938 | if (ret < 0) | |
1939 | goto error; | |
1940 | if (ret > 0) | |
1941 | break; | |
1a40e23b | 1942 | |
2b82032c YZ |
1943 | leaf = path->nodes[0]; |
1944 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | |
1a40e23b | 1945 | |
2b82032c YZ |
1946 | chunk = btrfs_item_ptr(leaf, path->slots[0], |
1947 | struct btrfs_chunk); | |
1948 | chunk_type = btrfs_chunk_type(leaf, chunk); | |
1949 | btrfs_release_path(chunk_root, path); | |
8f18cf13 | 1950 | |
2b82032c YZ |
1951 | if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { |
1952 | ret = btrfs_relocate_chunk(chunk_root, chunk_tree, | |
1953 | found_key.objectid, | |
1954 | found_key.offset); | |
ba1bf481 JB |
1955 | if (ret == -ENOSPC) |
1956 | failed++; | |
1957 | else if (ret) | |
1958 | BUG(); | |
2b82032c | 1959 | } |
8f18cf13 | 1960 | |
2b82032c YZ |
1961 | if (found_key.offset == 0) |
1962 | break; | |
1963 | key.offset = found_key.offset - 1; | |
1964 | } | |
1965 | ret = 0; | |
ba1bf481 JB |
1966 | if (failed && !retried) { |
1967 | failed = 0; | |
1968 | retried = true; | |
1969 | goto again; | |
1970 | } else if (failed && retried) { | |
1971 | WARN_ON(1); | |
1972 | ret = -ENOSPC; | |
1973 | } | |
2b82032c YZ |
1974 | error: |
1975 | btrfs_free_path(path); | |
1976 | return ret; | |
8f18cf13 CM |
1977 | } |
1978 | ||
ec44a35c CM |
1979 | static u64 div_factor(u64 num, int factor) |
1980 | { | |
1981 | if (factor == 10) | |
1982 | return num; | |
1983 | num *= factor; | |
1984 | do_div(num, 10); | |
1985 | return num; | |
1986 | } | |
1987 | ||
ec44a35c CM |
1988 | int btrfs_balance(struct btrfs_root *dev_root) |
1989 | { | |
1990 | int ret; | |
ec44a35c CM |
1991 | struct list_head *devices = &dev_root->fs_info->fs_devices->devices; |
1992 | struct btrfs_device *device; | |
1993 | u64 old_size; | |
1994 | u64 size_to_free; | |
1995 | struct btrfs_path *path; | |
1996 | struct btrfs_key key; | |
ec44a35c CM |
1997 | struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root; |
1998 | struct btrfs_trans_handle *trans; | |
1999 | struct btrfs_key found_key; | |
2000 | ||
2b82032c YZ |
2001 | if (dev_root->fs_info->sb->s_flags & MS_RDONLY) |
2002 | return -EROFS; | |
ec44a35c | 2003 | |
6f88a440 BH |
2004 | if (!capable(CAP_SYS_ADMIN)) |
2005 | return -EPERM; | |
2006 | ||
7d9eb12c | 2007 | mutex_lock(&dev_root->fs_info->volume_mutex); |
ec44a35c CM |
2008 | dev_root = dev_root->fs_info->dev_root; |
2009 | ||
ec44a35c | 2010 | /* step one make some room on all the devices */ |
c6e30871 | 2011 | list_for_each_entry(device, devices, dev_list) { |
ec44a35c CM |
2012 | old_size = device->total_bytes; |
2013 | size_to_free = div_factor(old_size, 1); | |
2014 | size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); | |
2b82032c YZ |
2015 | if (!device->writeable || |
2016 | device->total_bytes - device->bytes_used > size_to_free) | |
ec44a35c CM |
2017 | continue; |
2018 | ||
2019 | ret = btrfs_shrink_device(device, old_size - size_to_free); | |
ba1bf481 JB |
2020 | if (ret == -ENOSPC) |
2021 | break; | |
ec44a35c CM |
2022 | BUG_ON(ret); |
2023 | ||
a22285a6 | 2024 | trans = btrfs_start_transaction(dev_root, 0); |
98d5dc13 | 2025 | BUG_ON(IS_ERR(trans)); |
ec44a35c CM |
2026 | |
2027 | ret = btrfs_grow_device(trans, device, old_size); | |
2028 | BUG_ON(ret); | |
2029 | ||
2030 | btrfs_end_transaction(trans, dev_root); | |
2031 | } | |
2032 | ||
2033 | /* step two, relocate all the chunks */ | |
2034 | path = btrfs_alloc_path(); | |
2035 | BUG_ON(!path); | |
2036 | ||
2037 | key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; | |
2038 | key.offset = (u64)-1; | |
2039 | key.type = BTRFS_CHUNK_ITEM_KEY; | |
2040 | ||
d397712b | 2041 | while (1) { |
ec44a35c CM |
2042 | ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); |
2043 | if (ret < 0) | |
2044 | goto error; | |
2045 | ||
2046 | /* | |
2047 | * this shouldn't happen, it means the last relocate | |
2048 | * failed | |
2049 | */ | |
2050 | if (ret == 0) | |
2051 | break; | |
2052 | ||
2053 | ret = btrfs_previous_item(chunk_root, path, 0, | |
2054 | BTRFS_CHUNK_ITEM_KEY); | |
7d9eb12c | 2055 | if (ret) |
ec44a35c | 2056 | break; |
7d9eb12c | 2057 | |
ec44a35c CM |
2058 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, |
2059 | path->slots[0]); | |
2060 | if (found_key.objectid != key.objectid) | |
2061 | break; | |
7d9eb12c | 2062 | |
ec44a35c | 2063 | /* chunk zero is special */ |
ba1bf481 | 2064 | if (found_key.offset == 0) |
ec44a35c CM |
2065 | break; |
2066 | ||
7d9eb12c | 2067 | btrfs_release_path(chunk_root, path); |
ec44a35c CM |
2068 | ret = btrfs_relocate_chunk(chunk_root, |
2069 | chunk_root->root_key.objectid, | |
2070 | found_key.objectid, | |
2071 | found_key.offset); | |
ba1bf481 JB |
2072 | BUG_ON(ret && ret != -ENOSPC); |
2073 | key.offset = found_key.offset - 1; | |
ec44a35c CM |
2074 | } |
2075 | ret = 0; | |
2076 | error: | |
2077 | btrfs_free_path(path); | |
7d9eb12c | 2078 | mutex_unlock(&dev_root->fs_info->volume_mutex); |
ec44a35c CM |
2079 | return ret; |
2080 | } | |
2081 | ||
8f18cf13 CM |
2082 | /* |
2083 | * shrinking a device means finding all of the device extents past | |
2084 | * the new size, and then following the back refs to the chunks. | |
2085 | * The chunk relocation code actually frees the device extent | |
2086 | */ | |
2087 | int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) | |
2088 | { | |
2089 | struct btrfs_trans_handle *trans; | |
2090 | struct btrfs_root *root = device->dev_root; | |
2091 | struct btrfs_dev_extent *dev_extent = NULL; | |
2092 | struct btrfs_path *path; | |
2093 | u64 length; | |
2094 | u64 chunk_tree; | |
2095 | u64 chunk_objectid; | |
2096 | u64 chunk_offset; | |
2097 | int ret; | |
2098 | int slot; | |
ba1bf481 JB |
2099 | int failed = 0; |
2100 | bool retried = false; | |
8f18cf13 CM |
2101 | struct extent_buffer *l; |
2102 | struct btrfs_key key; | |
2103 | struct btrfs_super_block *super_copy = &root->fs_info->super_copy; | |
2104 | u64 old_total = btrfs_super_total_bytes(super_copy); | |
ba1bf481 | 2105 | u64 old_size = device->total_bytes; |
8f18cf13 CM |
2106 | u64 diff = device->total_bytes - new_size; |
2107 | ||
2b82032c YZ |
2108 | if (new_size >= device->total_bytes) |
2109 | return -EINVAL; | |
8f18cf13 CM |
2110 | |
2111 | path = btrfs_alloc_path(); | |
2112 | if (!path) | |
2113 | return -ENOMEM; | |
2114 | ||
8f18cf13 CM |
2115 | path->reada = 2; |
2116 | ||
7d9eb12c CM |
2117 | lock_chunks(root); |
2118 | ||
8f18cf13 | 2119 | device->total_bytes = new_size; |
2b82032c YZ |
2120 | if (device->writeable) |
2121 | device->fs_devices->total_rw_bytes -= diff; | |
7d9eb12c | 2122 | unlock_chunks(root); |
8f18cf13 | 2123 | |
ba1bf481 | 2124 | again: |
8f18cf13 CM |
2125 | key.objectid = device->devid; |
2126 | key.offset = (u64)-1; | |
2127 | key.type = BTRFS_DEV_EXTENT_KEY; | |
2128 | ||
2129 | while (1) { | |
2130 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
2131 | if (ret < 0) | |
2132 | goto done; | |
2133 | ||
2134 | ret = btrfs_previous_item(root, path, 0, key.type); | |
2135 | if (ret < 0) | |
2136 | goto done; | |
2137 | if (ret) { | |
2138 | ret = 0; | |
ba1bf481 | 2139 | btrfs_release_path(root, path); |
bf1fb512 | 2140 | break; |
8f18cf13 CM |
2141 | } |
2142 | ||
2143 | l = path->nodes[0]; | |
2144 | slot = path->slots[0]; | |
2145 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); | |
2146 | ||
ba1bf481 JB |
2147 | if (key.objectid != device->devid) { |
2148 | btrfs_release_path(root, path); | |
bf1fb512 | 2149 | break; |
ba1bf481 | 2150 | } |
8f18cf13 CM |
2151 | |
2152 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | |
2153 | length = btrfs_dev_extent_length(l, dev_extent); | |
2154 | ||
ba1bf481 JB |
2155 | if (key.offset + length <= new_size) { |
2156 | btrfs_release_path(root, path); | |
d6397bae | 2157 | break; |
ba1bf481 | 2158 | } |
8f18cf13 CM |
2159 | |
2160 | chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); | |
2161 | chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); | |
2162 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); | |
2163 | btrfs_release_path(root, path); | |
2164 | ||
2165 | ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, | |
2166 | chunk_offset); | |
ba1bf481 | 2167 | if (ret && ret != -ENOSPC) |
8f18cf13 | 2168 | goto done; |
ba1bf481 JB |
2169 | if (ret == -ENOSPC) |
2170 | failed++; | |
2171 | key.offset -= 1; | |
2172 | } | |
2173 | ||
2174 | if (failed && !retried) { | |
2175 | failed = 0; | |
2176 | retried = true; | |
2177 | goto again; | |
2178 | } else if (failed && retried) { | |
2179 | ret = -ENOSPC; | |
2180 | lock_chunks(root); | |
2181 | ||
2182 | device->total_bytes = old_size; | |
2183 | if (device->writeable) | |
2184 | device->fs_devices->total_rw_bytes += diff; | |
2185 | unlock_chunks(root); | |
2186 | goto done; | |
8f18cf13 CM |
2187 | } |
2188 | ||
d6397bae | 2189 | /* Shrinking succeeded, else we would be at "done". */ |
a22285a6 | 2190 | trans = btrfs_start_transaction(root, 0); |
98d5dc13 TI |
2191 | if (IS_ERR(trans)) { |
2192 | ret = PTR_ERR(trans); | |
2193 | goto done; | |
2194 | } | |
2195 | ||
d6397bae CB |
2196 | lock_chunks(root); |
2197 | ||
2198 | device->disk_total_bytes = new_size; | |
2199 | /* Now btrfs_update_device() will change the on-disk size. */ | |
2200 | ret = btrfs_update_device(trans, device); | |
2201 | if (ret) { | |
2202 | unlock_chunks(root); | |
2203 | btrfs_end_transaction(trans, root); | |
2204 | goto done; | |
2205 | } | |
2206 | WARN_ON(diff > old_total); | |
2207 | btrfs_set_super_total_bytes(super_copy, old_total - diff); | |
2208 | unlock_chunks(root); | |
2209 | btrfs_end_transaction(trans, root); | |
8f18cf13 CM |
2210 | done: |
2211 | btrfs_free_path(path); | |
2212 | return ret; | |
2213 | } | |
2214 | ||
b2950863 | 2215 | static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, |
0b86a832 CM |
2216 | struct btrfs_root *root, |
2217 | struct btrfs_key *key, | |
2218 | struct btrfs_chunk *chunk, int item_size) | |
2219 | { | |
2220 | struct btrfs_super_block *super_copy = &root->fs_info->super_copy; | |
2221 | struct btrfs_disk_key disk_key; | |
2222 | u32 array_size; | |
2223 | u8 *ptr; | |
2224 | ||
2225 | array_size = btrfs_super_sys_array_size(super_copy); | |
2226 | if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) | |
2227 | return -EFBIG; | |
2228 | ||
2229 | ptr = super_copy->sys_chunk_array + array_size; | |
2230 | btrfs_cpu_key_to_disk(&disk_key, key); | |
2231 | memcpy(ptr, &disk_key, sizeof(disk_key)); | |
2232 | ptr += sizeof(disk_key); | |
2233 | memcpy(ptr, chunk, item_size); | |
2234 | item_size += sizeof(disk_key); | |
2235 | btrfs_set_super_sys_array_size(super_copy, array_size + item_size); | |
2236 | return 0; | |
2237 | } | |
2238 | ||
d397712b | 2239 | static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, |
a1b32a59 | 2240 | int num_stripes, int sub_stripes) |
9b3f68b9 CM |
2241 | { |
2242 | if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) | |
2243 | return calc_size; | |
2244 | else if (type & BTRFS_BLOCK_GROUP_RAID10) | |
2245 | return calc_size * (num_stripes / sub_stripes); | |
2246 | else | |
2247 | return calc_size * num_stripes; | |
2248 | } | |
2249 | ||
b2117a39 MX |
2250 | /* Used to sort the devices by max_avail(descending sort) */ |
2251 | int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) | |
0b86a832 | 2252 | { |
b2117a39 MX |
2253 | if (((struct btrfs_device_info *)dev_info1)->max_avail > |
2254 | ((struct btrfs_device_info *)dev_info2)->max_avail) | |
2255 | return -1; | |
2256 | else if (((struct btrfs_device_info *)dev_info1)->max_avail < | |
2257 | ((struct btrfs_device_info *)dev_info2)->max_avail) | |
2258 | return 1; | |
2259 | else | |
2260 | return 0; | |
2261 | } | |
0b86a832 | 2262 | |
b2117a39 MX |
2263 | static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, |
2264 | int *num_stripes, int *min_stripes, | |
2265 | int *sub_stripes) | |
2266 | { | |
2267 | *num_stripes = 1; | |
2268 | *min_stripes = 1; | |
2269 | *sub_stripes = 0; | |
593060d7 | 2270 | |
a40a90a0 | 2271 | if (type & (BTRFS_BLOCK_GROUP_RAID0)) { |
b2117a39 MX |
2272 | *num_stripes = fs_devices->rw_devices; |
2273 | *min_stripes = 2; | |
a40a90a0 CM |
2274 | } |
2275 | if (type & (BTRFS_BLOCK_GROUP_DUP)) { | |
b2117a39 MX |
2276 | *num_stripes = 2; |
2277 | *min_stripes = 2; | |
a40a90a0 | 2278 | } |
8790d502 | 2279 | if (type & (BTRFS_BLOCK_GROUP_RAID1)) { |
f3eae7e8 | 2280 | if (fs_devices->rw_devices < 2) |
9b3f68b9 | 2281 | return -ENOSPC; |
b2117a39 MX |
2282 | *num_stripes = 2; |
2283 | *min_stripes = 2; | |
8790d502 | 2284 | } |
321aecc6 | 2285 | if (type & (BTRFS_BLOCK_GROUP_RAID10)) { |
b2117a39 MX |
2286 | *num_stripes = fs_devices->rw_devices; |
2287 | if (*num_stripes < 4) | |
321aecc6 | 2288 | return -ENOSPC; |
b2117a39 MX |
2289 | *num_stripes &= ~(u32)1; |
2290 | *sub_stripes = 2; | |
2291 | *min_stripes = 4; | |
321aecc6 | 2292 | } |
9b3f68b9 | 2293 | |
b2117a39 MX |
2294 | return 0; |
2295 | } | |
2296 | ||
2297 | static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, | |
2298 | u64 proposed_size, u64 type, | |
2299 | int num_stripes, int small_stripe) | |
2300 | { | |
2301 | int min_stripe_size = 1 * 1024 * 1024; | |
2302 | u64 calc_size = proposed_size; | |
2303 | u64 max_chunk_size = calc_size; | |
2304 | int ncopies = 1; | |
2305 | ||
2306 | if (type & (BTRFS_BLOCK_GROUP_RAID1 | | |
2307 | BTRFS_BLOCK_GROUP_DUP | | |
2308 | BTRFS_BLOCK_GROUP_RAID10)) | |
2309 | ncopies = 2; | |
2310 | ||
9b3f68b9 CM |
2311 | if (type & BTRFS_BLOCK_GROUP_DATA) { |
2312 | max_chunk_size = 10 * calc_size; | |
a40a90a0 | 2313 | min_stripe_size = 64 * 1024 * 1024; |
9b3f68b9 | 2314 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { |
83d3c969 | 2315 | max_chunk_size = 256 * 1024 * 1024; |
a40a90a0 CM |
2316 | min_stripe_size = 32 * 1024 * 1024; |
2317 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { | |
2318 | calc_size = 8 * 1024 * 1024; | |
2319 | max_chunk_size = calc_size * 2; | |
2320 | min_stripe_size = 1 * 1024 * 1024; | |
9b3f68b9 CM |
2321 | } |
2322 | ||
2b82032c YZ |
2323 | /* we don't want a chunk larger than 10% of writeable space */ |
2324 | max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), | |
2325 | max_chunk_size); | |
9b3f68b9 | 2326 | |
1974a3b4 MX |
2327 | if (calc_size * num_stripes > max_chunk_size * ncopies) { |
2328 | calc_size = max_chunk_size * ncopies; | |
9b3f68b9 | 2329 | do_div(calc_size, num_stripes); |
b2117a39 MX |
2330 | do_div(calc_size, BTRFS_STRIPE_LEN); |
2331 | calc_size *= BTRFS_STRIPE_LEN; | |
9b3f68b9 | 2332 | } |
0cad8a11 | 2333 | |
9b3f68b9 | 2334 | /* we don't want tiny stripes */ |
b2117a39 | 2335 | if (!small_stripe) |
0cad8a11 | 2336 | calc_size = max_t(u64, min_stripe_size, calc_size); |
9b3f68b9 | 2337 | |
9f680ce0 | 2338 | /* |
b2117a39 | 2339 | * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure |
9f680ce0 CM |
2340 | * we end up with something bigger than a stripe |
2341 | */ | |
b2117a39 MX |
2342 | calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); |
2343 | ||
2344 | do_div(calc_size, BTRFS_STRIPE_LEN); | |
2345 | calc_size *= BTRFS_STRIPE_LEN; | |
2346 | ||
2347 | return calc_size; | |
2348 | } | |
2349 | ||
2350 | static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, | |
2351 | int num_stripes) | |
2352 | { | |
2353 | struct map_lookup *new; | |
2354 | size_t len = map_lookup_size(num_stripes); | |
2355 | ||
2356 | BUG_ON(map->num_stripes < num_stripes); | |
2357 | ||
2358 | if (map->num_stripes == num_stripes) | |
2359 | return map; | |
2360 | ||
2361 | new = kmalloc(len, GFP_NOFS); | |
2362 | if (!new) { | |
2363 | /* just change map->num_stripes */ | |
2364 | map->num_stripes = num_stripes; | |
2365 | return map; | |
2366 | } | |
2367 | ||
2368 | memcpy(new, map, len); | |
2369 | new->num_stripes = num_stripes; | |
2370 | kfree(map); | |
2371 | return new; | |
2372 | } | |
2373 | ||
2374 | /* | |
2375 | * helper to allocate device space from btrfs_device_info, in which we stored | |
2376 | * max free space information of every device. It is used when we can not | |
2377 | * allocate chunks by default size. | |
2378 | * | |
2379 | * By this helper, we can allocate a new chunk as larger as possible. | |
2380 | */ | |
2381 | static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, | |
2382 | struct btrfs_fs_devices *fs_devices, | |
2383 | struct btrfs_device_info *devices, | |
2384 | int nr_device, u64 type, | |
2385 | struct map_lookup **map_lookup, | |
2386 | int min_stripes, u64 *stripe_size) | |
2387 | { | |
2388 | int i, index, sort_again = 0; | |
2389 | int min_devices = min_stripes; | |
2390 | u64 max_avail, min_free; | |
2391 | struct map_lookup *map = *map_lookup; | |
2392 | int ret; | |
9f680ce0 | 2393 | |
b2117a39 MX |
2394 | if (nr_device < min_stripes) |
2395 | return -ENOSPC; | |
2396 | ||
2397 | btrfs_descending_sort_devices(devices, nr_device); | |
2398 | ||
2399 | max_avail = devices[0].max_avail; | |
2400 | if (!max_avail) | |
2401 | return -ENOSPC; | |
2402 | ||
2403 | for (i = 0; i < nr_device; i++) { | |
2404 | /* | |
2405 | * if dev_offset = 0, it means the free space of this device | |
2406 | * is less than what we need, and we didn't search max avail | |
2407 | * extent on this device, so do it now. | |
2408 | */ | |
2409 | if (!devices[i].dev_offset) { | |
2410 | ret = find_free_dev_extent(trans, devices[i].dev, | |
2411 | max_avail, | |
2412 | &devices[i].dev_offset, | |
2413 | &devices[i].max_avail); | |
2414 | if (ret != 0 && ret != -ENOSPC) | |
2415 | return ret; | |
2416 | sort_again = 1; | |
2417 | } | |
2418 | } | |
2419 | ||
2420 | /* we update the max avail free extent of each devices, sort again */ | |
2421 | if (sort_again) | |
2422 | btrfs_descending_sort_devices(devices, nr_device); | |
2423 | ||
2424 | if (type & BTRFS_BLOCK_GROUP_DUP) | |
2425 | min_devices = 1; | |
2426 | ||
2427 | if (!devices[min_devices - 1].max_avail) | |
2428 | return -ENOSPC; | |
2429 | ||
2430 | max_avail = devices[min_devices - 1].max_avail; | |
2431 | if (type & BTRFS_BLOCK_GROUP_DUP) | |
2432 | do_div(max_avail, 2); | |
2433 | ||
2434 | max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, | |
2435 | min_stripes, 1); | |
2436 | if (type & BTRFS_BLOCK_GROUP_DUP) | |
2437 | min_free = max_avail * 2; | |
2438 | else | |
2439 | min_free = max_avail; | |
2440 | ||
2441 | if (min_free > devices[min_devices - 1].max_avail) | |
2442 | return -ENOSPC; | |
2443 | ||
2444 | map = __shrink_map_lookup_stripes(map, min_stripes); | |
2445 | *stripe_size = max_avail; | |
2446 | ||
2447 | index = 0; | |
2448 | for (i = 0; i < min_stripes; i++) { | |
2449 | map->stripes[i].dev = devices[index].dev; | |
2450 | map->stripes[i].physical = devices[index].dev_offset; | |
2451 | if (type & BTRFS_BLOCK_GROUP_DUP) { | |
2452 | i++; | |
2453 | map->stripes[i].dev = devices[index].dev; | |
2454 | map->stripes[i].physical = devices[index].dev_offset + | |
2455 | max_avail; | |
2456 | } | |
2457 | index++; | |
2458 | } | |
2459 | *map_lookup = map; | |
9f680ce0 | 2460 | |
b2117a39 MX |
2461 | return 0; |
2462 | } | |
2463 | ||
2464 | static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |
2465 | struct btrfs_root *extent_root, | |
2466 | struct map_lookup **map_ret, | |
2467 | u64 *num_bytes, u64 *stripe_size, | |
2468 | u64 start, u64 type) | |
2469 | { | |
2470 | struct btrfs_fs_info *info = extent_root->fs_info; | |
2471 | struct btrfs_device *device = NULL; | |
2472 | struct btrfs_fs_devices *fs_devices = info->fs_devices; | |
2473 | struct list_head *cur; | |
2474 | struct map_lookup *map; | |
2475 | struct extent_map_tree *em_tree; | |
2476 | struct extent_map *em; | |
2477 | struct btrfs_device_info *devices_info; | |
2478 | struct list_head private_devs; | |
2479 | u64 calc_size = 1024 * 1024 * 1024; | |
2480 | u64 min_free; | |
2481 | u64 avail; | |
2482 | u64 dev_offset; | |
2483 | int num_stripes; | |
2484 | int min_stripes; | |
2485 | int sub_stripes; | |
2486 | int min_devices; /* the min number of devices we need */ | |
2487 | int i; | |
2488 | int ret; | |
2489 | int index; | |
2490 | ||
2491 | if ((type & BTRFS_BLOCK_GROUP_RAID1) && | |
2492 | (type & BTRFS_BLOCK_GROUP_DUP)) { | |
2493 | WARN_ON(1); | |
2494 | type &= ~BTRFS_BLOCK_GROUP_DUP; | |
2495 | } | |
2496 | if (list_empty(&fs_devices->alloc_list)) | |
2497 | return -ENOSPC; | |
2498 | ||
2499 | ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, | |
2500 | &min_stripes, &sub_stripes); | |
2501 | if (ret) | |
2502 | return ret; | |
2503 | ||
2504 | devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, | |
2505 | GFP_NOFS); | |
2506 | if (!devices_info) | |
2507 | return -ENOMEM; | |
2508 | ||
2509 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); | |
2510 | if (!map) { | |
2511 | ret = -ENOMEM; | |
2512 | goto error; | |
2513 | } | |
2514 | map->num_stripes = num_stripes; | |
9b3f68b9 | 2515 | |
2b82032c | 2516 | cur = fs_devices->alloc_list.next; |
6324fbf3 | 2517 | index = 0; |
b2117a39 | 2518 | i = 0; |
611f0e00 | 2519 | |
b2117a39 MX |
2520 | calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, |
2521 | num_stripes, 0); | |
2522 | ||
2523 | if (type & BTRFS_BLOCK_GROUP_DUP) { | |
611f0e00 | 2524 | min_free = calc_size * 2; |
b2117a39 MX |
2525 | min_devices = 1; |
2526 | } else { | |
9b3f68b9 | 2527 | min_free = calc_size; |
b2117a39 MX |
2528 | min_devices = min_stripes; |
2529 | } | |
ad5bd91e | 2530 | |
2b82032c | 2531 | INIT_LIST_HEAD(&private_devs); |
d397712b | 2532 | while (index < num_stripes) { |
b3075717 | 2533 | device = list_entry(cur, struct btrfs_device, dev_alloc_list); |
2b82032c | 2534 | BUG_ON(!device->writeable); |
dfe25020 CM |
2535 | if (device->total_bytes > device->bytes_used) |
2536 | avail = device->total_bytes - device->bytes_used; | |
2537 | else | |
2538 | avail = 0; | |
6324fbf3 | 2539 | cur = cur->next; |
8f18cf13 | 2540 | |
dfe25020 | 2541 | if (device->in_fs_metadata && avail >= min_free) { |
b2117a39 MX |
2542 | ret = find_free_dev_extent(trans, device, min_free, |
2543 | &devices_info[i].dev_offset, | |
2544 | &devices_info[i].max_avail); | |
8f18cf13 CM |
2545 | if (ret == 0) { |
2546 | list_move_tail(&device->dev_alloc_list, | |
2547 | &private_devs); | |
2b82032c | 2548 | map->stripes[index].dev = device; |
b2117a39 MX |
2549 | map->stripes[index].physical = |
2550 | devices_info[i].dev_offset; | |
611f0e00 | 2551 | index++; |
2b82032c YZ |
2552 | if (type & BTRFS_BLOCK_GROUP_DUP) { |
2553 | map->stripes[index].dev = device; | |
2554 | map->stripes[index].physical = | |
b2117a39 MX |
2555 | devices_info[i].dev_offset + |
2556 | calc_size; | |
8f18cf13 | 2557 | index++; |
2b82032c | 2558 | } |
b2117a39 MX |
2559 | } else if (ret != -ENOSPC) |
2560 | goto error; | |
2561 | ||
2562 | devices_info[i].dev = device; | |
2563 | i++; | |
2564 | } else if (device->in_fs_metadata && | |
2565 | avail >= BTRFS_STRIPE_LEN) { | |
2566 | devices_info[i].dev = device; | |
2567 | devices_info[i].max_avail = avail; | |
2568 | i++; | |
2569 | } | |
2570 | ||
2b82032c | 2571 | if (cur == &fs_devices->alloc_list) |
6324fbf3 CM |
2572 | break; |
2573 | } | |
b2117a39 | 2574 | |
2b82032c | 2575 | list_splice(&private_devs, &fs_devices->alloc_list); |
6324fbf3 | 2576 | if (index < num_stripes) { |
a40a90a0 CM |
2577 | if (index >= min_stripes) { |
2578 | num_stripes = index; | |
2579 | if (type & (BTRFS_BLOCK_GROUP_RAID10)) { | |
2580 | num_stripes /= sub_stripes; | |
2581 | num_stripes *= sub_stripes; | |
2582 | } | |
b2117a39 MX |
2583 | |
2584 | map = __shrink_map_lookup_stripes(map, num_stripes); | |
2585 | } else if (i >= min_devices) { | |
2586 | ret = __btrfs_alloc_tiny_space(trans, fs_devices, | |
2587 | devices_info, i, type, | |
2588 | &map, min_stripes, | |
2589 | &calc_size); | |
2590 | if (ret) | |
2591 | goto error; | |
2592 | } else { | |
2593 | ret = -ENOSPC; | |
2594 | goto error; | |
6324fbf3 | 2595 | } |
6324fbf3 | 2596 | } |
2b82032c | 2597 | map->sector_size = extent_root->sectorsize; |
b2117a39 MX |
2598 | map->stripe_len = BTRFS_STRIPE_LEN; |
2599 | map->io_align = BTRFS_STRIPE_LEN; | |
2600 | map->io_width = BTRFS_STRIPE_LEN; | |
2b82032c | 2601 | map->type = type; |
2b82032c | 2602 | map->sub_stripes = sub_stripes; |
0b86a832 | 2603 | |
2b82032c YZ |
2604 | *map_ret = map; |
2605 | *stripe_size = calc_size; | |
2606 | *num_bytes = chunk_bytes_by_type(type, calc_size, | |
b2117a39 | 2607 | map->num_stripes, sub_stripes); |
0b86a832 | 2608 | |
2b82032c YZ |
2609 | em = alloc_extent_map(GFP_NOFS); |
2610 | if (!em) { | |
b2117a39 MX |
2611 | ret = -ENOMEM; |
2612 | goto error; | |
593060d7 | 2613 | } |
2b82032c YZ |
2614 | em->bdev = (struct block_device *)map; |
2615 | em->start = start; | |
2616 | em->len = *num_bytes; | |
2617 | em->block_start = 0; | |
2618 | em->block_len = em->len; | |
593060d7 | 2619 | |
2b82032c | 2620 | em_tree = &extent_root->fs_info->mapping_tree.map_tree; |
890871be | 2621 | write_lock(&em_tree->lock); |
2b82032c | 2622 | ret = add_extent_mapping(em_tree, em); |
890871be | 2623 | write_unlock(&em_tree->lock); |
2b82032c YZ |
2624 | BUG_ON(ret); |
2625 | free_extent_map(em); | |
0b86a832 | 2626 | |
2b82032c YZ |
2627 | ret = btrfs_make_block_group(trans, extent_root, 0, type, |
2628 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, | |
2629 | start, *num_bytes); | |
2630 | BUG_ON(ret); | |
611f0e00 | 2631 | |
2b82032c YZ |
2632 | index = 0; |
2633 | while (index < map->num_stripes) { | |
2634 | device = map->stripes[index].dev; | |
2635 | dev_offset = map->stripes[index].physical; | |
0b86a832 CM |
2636 | |
2637 | ret = btrfs_alloc_dev_extent(trans, device, | |
2b82032c YZ |
2638 | info->chunk_root->root_key.objectid, |
2639 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, | |
2640 | start, dev_offset, calc_size); | |
0b86a832 | 2641 | BUG_ON(ret); |
2b82032c YZ |
2642 | index++; |
2643 | } | |
2644 | ||
b2117a39 | 2645 | kfree(devices_info); |
2b82032c | 2646 | return 0; |
b2117a39 MX |
2647 | |
2648 | error: | |
2649 | kfree(map); | |
2650 | kfree(devices_info); | |
2651 | return ret; | |
2b82032c YZ |
2652 | } |
2653 | ||
2654 | static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, | |
2655 | struct btrfs_root *extent_root, | |
2656 | struct map_lookup *map, u64 chunk_offset, | |
2657 | u64 chunk_size, u64 stripe_size) | |
2658 | { | |
2659 | u64 dev_offset; | |
2660 | struct btrfs_key key; | |
2661 | struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; | |
2662 | struct btrfs_device *device; | |
2663 | struct btrfs_chunk *chunk; | |
2664 | struct btrfs_stripe *stripe; | |
2665 | size_t item_size = btrfs_chunk_item_size(map->num_stripes); | |
2666 | int index = 0; | |
2667 | int ret; | |
2668 | ||
2669 | chunk = kzalloc(item_size, GFP_NOFS); | |
2670 | if (!chunk) | |
2671 | return -ENOMEM; | |
2672 | ||
2673 | index = 0; | |
2674 | while (index < map->num_stripes) { | |
2675 | device = map->stripes[index].dev; | |
2676 | device->bytes_used += stripe_size; | |
0b86a832 CM |
2677 | ret = btrfs_update_device(trans, device); |
2678 | BUG_ON(ret); | |
2b82032c YZ |
2679 | index++; |
2680 | } | |
2681 | ||
2682 | index = 0; | |
2683 | stripe = &chunk->stripe; | |
2684 | while (index < map->num_stripes) { | |
2685 | device = map->stripes[index].dev; | |
2686 | dev_offset = map->stripes[index].physical; | |
0b86a832 | 2687 | |
e17cade2 CM |
2688 | btrfs_set_stack_stripe_devid(stripe, device->devid); |
2689 | btrfs_set_stack_stripe_offset(stripe, dev_offset); | |
2690 | memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); | |
2b82032c | 2691 | stripe++; |
0b86a832 CM |
2692 | index++; |
2693 | } | |
2694 | ||
2b82032c | 2695 | btrfs_set_stack_chunk_length(chunk, chunk_size); |
0b86a832 | 2696 | btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); |
2b82032c YZ |
2697 | btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); |
2698 | btrfs_set_stack_chunk_type(chunk, map->type); | |
2699 | btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); | |
2700 | btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); | |
2701 | btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); | |
0b86a832 | 2702 | btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); |
2b82032c | 2703 | btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); |
0b86a832 | 2704 | |
2b82032c YZ |
2705 | key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; |
2706 | key.type = BTRFS_CHUNK_ITEM_KEY; | |
2707 | key.offset = chunk_offset; | |
0b86a832 | 2708 | |
2b82032c YZ |
2709 | ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); |
2710 | BUG_ON(ret); | |
0b86a832 | 2711 | |
2b82032c YZ |
2712 | if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { |
2713 | ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, | |
2714 | item_size); | |
8f18cf13 CM |
2715 | BUG_ON(ret); |
2716 | } | |
0b86a832 | 2717 | kfree(chunk); |
2b82032c YZ |
2718 | return 0; |
2719 | } | |
0b86a832 | 2720 | |
2b82032c YZ |
2721 | /* |
2722 | * Chunk allocation falls into two parts. The first part does works | |
2723 | * that make the new allocated chunk useable, but not do any operation | |
2724 | * that modifies the chunk tree. The second part does the works that | |
2725 | * require modifying the chunk tree. This division is important for the | |
2726 | * bootstrap process of adding storage to a seed btrfs. | |
2727 | */ | |
2728 | int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |
2729 | struct btrfs_root *extent_root, u64 type) | |
2730 | { | |
2731 | u64 chunk_offset; | |
2732 | u64 chunk_size; | |
2733 | u64 stripe_size; | |
2734 | struct map_lookup *map; | |
2735 | struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; | |
2736 | int ret; | |
2737 | ||
2738 | ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID, | |
2739 | &chunk_offset); | |
2740 | if (ret) | |
2741 | return ret; | |
2742 | ||
2743 | ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, | |
2744 | &stripe_size, chunk_offset, type); | |
2745 | if (ret) | |
2746 | return ret; | |
2747 | ||
2748 | ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, | |
2749 | chunk_size, stripe_size); | |
2750 | BUG_ON(ret); | |
2751 | return 0; | |
2752 | } | |
2753 | ||
d397712b | 2754 | static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, |
2b82032c YZ |
2755 | struct btrfs_root *root, |
2756 | struct btrfs_device *device) | |
2757 | { | |
2758 | u64 chunk_offset; | |
2759 | u64 sys_chunk_offset; | |
2760 | u64 chunk_size; | |
2761 | u64 sys_chunk_size; | |
2762 | u64 stripe_size; | |
2763 | u64 sys_stripe_size; | |
2764 | u64 alloc_profile; | |
2765 | struct map_lookup *map; | |
2766 | struct map_lookup *sys_map; | |
2767 | struct btrfs_fs_info *fs_info = root->fs_info; | |
2768 | struct btrfs_root *extent_root = fs_info->extent_root; | |
2769 | int ret; | |
2770 | ||
2771 | ret = find_next_chunk(fs_info->chunk_root, | |
2772 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); | |
2773 | BUG_ON(ret); | |
2774 | ||
2775 | alloc_profile = BTRFS_BLOCK_GROUP_METADATA | | |
2776 | (fs_info->metadata_alloc_profile & | |
2777 | fs_info->avail_metadata_alloc_bits); | |
2778 | alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); | |
2779 | ||
2780 | ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, | |
2781 | &stripe_size, chunk_offset, alloc_profile); | |
2782 | BUG_ON(ret); | |
2783 | ||
2784 | sys_chunk_offset = chunk_offset + chunk_size; | |
2785 | ||
2786 | alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | | |
2787 | (fs_info->system_alloc_profile & | |
2788 | fs_info->avail_system_alloc_bits); | |
2789 | alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile); | |
2790 | ||
2791 | ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, | |
2792 | &sys_chunk_size, &sys_stripe_size, | |
2793 | sys_chunk_offset, alloc_profile); | |
2794 | BUG_ON(ret); | |
2795 | ||
2796 | ret = btrfs_add_device(trans, fs_info->chunk_root, device); | |
2797 | BUG_ON(ret); | |
2798 | ||
2799 | /* | |
2800 | * Modifying chunk tree needs allocating new blocks from both | |
2801 | * system block group and metadata block group. So we only can | |
2802 | * do operations require modifying the chunk tree after both | |
2803 | * block groups were created. | |
2804 | */ | |
2805 | ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, | |
2806 | chunk_size, stripe_size); | |
2807 | BUG_ON(ret); | |
2808 | ||
2809 | ret = __finish_chunk_alloc(trans, extent_root, sys_map, | |
2810 | sys_chunk_offset, sys_chunk_size, | |
2811 | sys_stripe_size); | |
b248a415 | 2812 | BUG_ON(ret); |
2b82032c YZ |
2813 | return 0; |
2814 | } | |
2815 | ||
2816 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) | |
2817 | { | |
2818 | struct extent_map *em; | |
2819 | struct map_lookup *map; | |
2820 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | |
2821 | int readonly = 0; | |
2822 | int i; | |
2823 | ||
890871be | 2824 | read_lock(&map_tree->map_tree.lock); |
2b82032c | 2825 | em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); |
890871be | 2826 | read_unlock(&map_tree->map_tree.lock); |
2b82032c YZ |
2827 | if (!em) |
2828 | return 1; | |
2829 | ||
f48b9075 JB |
2830 | if (btrfs_test_opt(root, DEGRADED)) { |
2831 | free_extent_map(em); | |
2832 | return 0; | |
2833 | } | |
2834 | ||
2b82032c YZ |
2835 | map = (struct map_lookup *)em->bdev; |
2836 | for (i = 0; i < map->num_stripes; i++) { | |
2837 | if (!map->stripes[i].dev->writeable) { | |
2838 | readonly = 1; | |
2839 | break; | |
2840 | } | |
2841 | } | |
0b86a832 | 2842 | free_extent_map(em); |
2b82032c | 2843 | return readonly; |
0b86a832 CM |
2844 | } |
2845 | ||
2846 | void btrfs_mapping_init(struct btrfs_mapping_tree *tree) | |
2847 | { | |
2848 | extent_map_tree_init(&tree->map_tree, GFP_NOFS); | |
2849 | } | |
2850 | ||
2851 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) | |
2852 | { | |
2853 | struct extent_map *em; | |
2854 | ||
d397712b | 2855 | while (1) { |
890871be | 2856 | write_lock(&tree->map_tree.lock); |
0b86a832 CM |
2857 | em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); |
2858 | if (em) | |
2859 | remove_extent_mapping(&tree->map_tree, em); | |
890871be | 2860 | write_unlock(&tree->map_tree.lock); |
0b86a832 CM |
2861 | if (!em) |
2862 | break; | |
2863 | kfree(em->bdev); | |
2864 | /* once for us */ | |
2865 | free_extent_map(em); | |
2866 | /* once for the tree */ | |
2867 | free_extent_map(em); | |
2868 | } | |
2869 | } | |
2870 | ||
f188591e CM |
2871 | int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) |
2872 | { | |
2873 | struct extent_map *em; | |
2874 | struct map_lookup *map; | |
2875 | struct extent_map_tree *em_tree = &map_tree->map_tree; | |
2876 | int ret; | |
2877 | ||
890871be | 2878 | read_lock(&em_tree->lock); |
f188591e | 2879 | em = lookup_extent_mapping(em_tree, logical, len); |
890871be | 2880 | read_unlock(&em_tree->lock); |
f188591e CM |
2881 | BUG_ON(!em); |
2882 | ||
2883 | BUG_ON(em->start > logical || em->start + em->len < logical); | |
2884 | map = (struct map_lookup *)em->bdev; | |
2885 | if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) | |
2886 | ret = map->num_stripes; | |
321aecc6 CM |
2887 | else if (map->type & BTRFS_BLOCK_GROUP_RAID10) |
2888 | ret = map->sub_stripes; | |
f188591e CM |
2889 | else |
2890 | ret = 1; | |
2891 | free_extent_map(em); | |
f188591e CM |
2892 | return ret; |
2893 | } | |
2894 | ||
dfe25020 CM |
2895 | static int find_live_mirror(struct map_lookup *map, int first, int num, |
2896 | int optimal) | |
2897 | { | |
2898 | int i; | |
2899 | if (map->stripes[optimal].dev->bdev) | |
2900 | return optimal; | |
2901 | for (i = first; i < first + num; i++) { | |
2902 | if (map->stripes[i].dev->bdev) | |
2903 | return i; | |
2904 | } | |
2905 | /* we couldn't find one that doesn't fail. Just return something | |
2906 | * and the io error handling code will clean up eventually | |
2907 | */ | |
2908 | return optimal; | |
2909 | } | |
2910 | ||
f2d8d74d CM |
2911 | static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, |
2912 | u64 logical, u64 *length, | |
2913 | struct btrfs_multi_bio **multi_ret, | |
7eaceacc | 2914 | int mirror_num) |
0b86a832 CM |
2915 | { |
2916 | struct extent_map *em; | |
2917 | struct map_lookup *map; | |
2918 | struct extent_map_tree *em_tree = &map_tree->map_tree; | |
2919 | u64 offset; | |
593060d7 CM |
2920 | u64 stripe_offset; |
2921 | u64 stripe_nr; | |
cea9e445 | 2922 | int stripes_allocated = 8; |
321aecc6 | 2923 | int stripes_required = 1; |
593060d7 | 2924 | int stripe_index; |
cea9e445 | 2925 | int i; |
f2d8d74d | 2926 | int num_stripes; |
a236aed1 | 2927 | int max_errors = 0; |
cea9e445 | 2928 | struct btrfs_multi_bio *multi = NULL; |
0b86a832 | 2929 | |
7b6d91da | 2930 | if (multi_ret && !(rw & REQ_WRITE)) |
cea9e445 | 2931 | stripes_allocated = 1; |
cea9e445 CM |
2932 | again: |
2933 | if (multi_ret) { | |
2934 | multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), | |
2935 | GFP_NOFS); | |
2936 | if (!multi) | |
2937 | return -ENOMEM; | |
a236aed1 CM |
2938 | |
2939 | atomic_set(&multi->error, 0); | |
cea9e445 | 2940 | } |
0b86a832 | 2941 | |
890871be | 2942 | read_lock(&em_tree->lock); |
0b86a832 | 2943 | em = lookup_extent_mapping(em_tree, logical, *length); |
890871be | 2944 | read_unlock(&em_tree->lock); |
f2d8d74d | 2945 | |
3b951516 | 2946 | if (!em) { |
d397712b CM |
2947 | printk(KERN_CRIT "unable to find logical %llu len %llu\n", |
2948 | (unsigned long long)logical, | |
2949 | (unsigned long long)*length); | |
f2d8d74d | 2950 | BUG(); |
3b951516 | 2951 | } |
0b86a832 CM |
2952 | |
2953 | BUG_ON(em->start > logical || em->start + em->len < logical); | |
2954 | map = (struct map_lookup *)em->bdev; | |
2955 | offset = logical - em->start; | |
593060d7 | 2956 | |
f188591e CM |
2957 | if (mirror_num > map->num_stripes) |
2958 | mirror_num = 0; | |
2959 | ||
cea9e445 | 2960 | /* if our multi bio struct is too small, back off and try again */ |
7b6d91da | 2961 | if (rw & REQ_WRITE) { |
321aecc6 CM |
2962 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | |
2963 | BTRFS_BLOCK_GROUP_DUP)) { | |
2964 | stripes_required = map->num_stripes; | |
a236aed1 | 2965 | max_errors = 1; |
321aecc6 CM |
2966 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { |
2967 | stripes_required = map->sub_stripes; | |
a236aed1 | 2968 | max_errors = 1; |
321aecc6 CM |
2969 | } |
2970 | } | |
7b6d91da | 2971 | if (multi_ret && (rw & REQ_WRITE) && |
321aecc6 | 2972 | stripes_allocated < stripes_required) { |
cea9e445 | 2973 | stripes_allocated = map->num_stripes; |
cea9e445 CM |
2974 | free_extent_map(em); |
2975 | kfree(multi); | |
2976 | goto again; | |
2977 | } | |
593060d7 CM |
2978 | stripe_nr = offset; |
2979 | /* | |
2980 | * stripe_nr counts the total number of stripes we have to stride | |
2981 | * to get to this block | |
2982 | */ | |
2983 | do_div(stripe_nr, map->stripe_len); | |
2984 | ||
2985 | stripe_offset = stripe_nr * map->stripe_len; | |
2986 | BUG_ON(offset < stripe_offset); | |
2987 | ||
2988 | /* stripe_offset is the offset of this block in its stripe*/ | |
2989 | stripe_offset = offset - stripe_offset; | |
2990 | ||
cea9e445 | 2991 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | |
321aecc6 | 2992 | BTRFS_BLOCK_GROUP_RAID10 | |
cea9e445 CM |
2993 | BTRFS_BLOCK_GROUP_DUP)) { |
2994 | /* we limit the length of each bio to what fits in a stripe */ | |
2995 | *length = min_t(u64, em->len - offset, | |
2996 | map->stripe_len - stripe_offset); | |
2997 | } else { | |
2998 | *length = em->len - offset; | |
2999 | } | |
f2d8d74d | 3000 | |
7eaceacc | 3001 | if (!multi_ret) |
cea9e445 CM |
3002 | goto out; |
3003 | ||
f2d8d74d | 3004 | num_stripes = 1; |
cea9e445 | 3005 | stripe_index = 0; |
8790d502 | 3006 | if (map->type & BTRFS_BLOCK_GROUP_RAID1) { |
7eaceacc | 3007 | if (rw & REQ_WRITE) |
f2d8d74d | 3008 | num_stripes = map->num_stripes; |
2fff734f | 3009 | else if (mirror_num) |
f188591e | 3010 | stripe_index = mirror_num - 1; |
dfe25020 CM |
3011 | else { |
3012 | stripe_index = find_live_mirror(map, 0, | |
3013 | map->num_stripes, | |
3014 | current->pid % map->num_stripes); | |
3015 | } | |
2fff734f | 3016 | |
611f0e00 | 3017 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { |
7b6d91da | 3018 | if (rw & REQ_WRITE) |
f2d8d74d | 3019 | num_stripes = map->num_stripes; |
f188591e CM |
3020 | else if (mirror_num) |
3021 | stripe_index = mirror_num - 1; | |
2fff734f | 3022 | |
321aecc6 CM |
3023 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { |
3024 | int factor = map->num_stripes / map->sub_stripes; | |
321aecc6 CM |
3025 | |
3026 | stripe_index = do_div(stripe_nr, factor); | |
3027 | stripe_index *= map->sub_stripes; | |
3028 | ||
7eaceacc | 3029 | if (rw & REQ_WRITE) |
f2d8d74d | 3030 | num_stripes = map->sub_stripes; |
321aecc6 CM |
3031 | else if (mirror_num) |
3032 | stripe_index += mirror_num - 1; | |
dfe25020 CM |
3033 | else { |
3034 | stripe_index = find_live_mirror(map, stripe_index, | |
3035 | map->sub_stripes, stripe_index + | |
3036 | current->pid % map->sub_stripes); | |
3037 | } | |
8790d502 CM |
3038 | } else { |
3039 | /* | |
3040 | * after this do_div call, stripe_nr is the number of stripes | |
3041 | * on this device we have to walk to find the data, and | |
3042 | * stripe_index is the number of our device in the stripe array | |
3043 | */ | |
3044 | stripe_index = do_div(stripe_nr, map->num_stripes); | |
3045 | } | |
593060d7 | 3046 | BUG_ON(stripe_index >= map->num_stripes); |
cea9e445 | 3047 | |
f2d8d74d | 3048 | for (i = 0; i < num_stripes; i++) { |
7eaceacc JA |
3049 | multi->stripes[i].physical = |
3050 | map->stripes[stripe_index].physical + | |
3051 | stripe_offset + stripe_nr * map->stripe_len; | |
3052 | multi->stripes[i].dev = map->stripes[stripe_index].dev; | |
cea9e445 | 3053 | stripe_index++; |
593060d7 | 3054 | } |
f2d8d74d CM |
3055 | if (multi_ret) { |
3056 | *multi_ret = multi; | |
3057 | multi->num_stripes = num_stripes; | |
a236aed1 | 3058 | multi->max_errors = max_errors; |
f2d8d74d | 3059 | } |
cea9e445 | 3060 | out: |
0b86a832 | 3061 | free_extent_map(em); |
0b86a832 CM |
3062 | return 0; |
3063 | } | |
3064 | ||
f2d8d74d CM |
3065 | int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, |
3066 | u64 logical, u64 *length, | |
3067 | struct btrfs_multi_bio **multi_ret, int mirror_num) | |
3068 | { | |
3069 | return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, | |
7eaceacc | 3070 | mirror_num); |
f2d8d74d CM |
3071 | } |
3072 | ||
a512bbf8 YZ |
3073 | int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, |
3074 | u64 chunk_start, u64 physical, u64 devid, | |
3075 | u64 **logical, int *naddrs, int *stripe_len) | |
3076 | { | |
3077 | struct extent_map_tree *em_tree = &map_tree->map_tree; | |
3078 | struct extent_map *em; | |
3079 | struct map_lookup *map; | |
3080 | u64 *buf; | |
3081 | u64 bytenr; | |
3082 | u64 length; | |
3083 | u64 stripe_nr; | |
3084 | int i, j, nr = 0; | |
3085 | ||
890871be | 3086 | read_lock(&em_tree->lock); |
a512bbf8 | 3087 | em = lookup_extent_mapping(em_tree, chunk_start, 1); |
890871be | 3088 | read_unlock(&em_tree->lock); |
a512bbf8 YZ |
3089 | |
3090 | BUG_ON(!em || em->start != chunk_start); | |
3091 | map = (struct map_lookup *)em->bdev; | |
3092 | ||
3093 | length = em->len; | |
3094 | if (map->type & BTRFS_BLOCK_GROUP_RAID10) | |
3095 | do_div(length, map->num_stripes / map->sub_stripes); | |
3096 | else if (map->type & BTRFS_BLOCK_GROUP_RAID0) | |
3097 | do_div(length, map->num_stripes); | |
3098 | ||
3099 | buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); | |
3100 | BUG_ON(!buf); | |
3101 | ||
3102 | for (i = 0; i < map->num_stripes; i++) { | |
3103 | if (devid && map->stripes[i].dev->devid != devid) | |
3104 | continue; | |
3105 | if (map->stripes[i].physical > physical || | |
3106 | map->stripes[i].physical + length <= physical) | |
3107 | continue; | |
3108 | ||
3109 | stripe_nr = physical - map->stripes[i].physical; | |
3110 | do_div(stripe_nr, map->stripe_len); | |
3111 | ||
3112 | if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | |
3113 | stripe_nr = stripe_nr * map->num_stripes + i; | |
3114 | do_div(stripe_nr, map->sub_stripes); | |
3115 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | |
3116 | stripe_nr = stripe_nr * map->num_stripes + i; | |
3117 | } | |
3118 | bytenr = chunk_start + stripe_nr * map->stripe_len; | |
934d375b | 3119 | WARN_ON(nr >= map->num_stripes); |
a512bbf8 YZ |
3120 | for (j = 0; j < nr; j++) { |
3121 | if (buf[j] == bytenr) | |
3122 | break; | |
3123 | } | |
934d375b CM |
3124 | if (j == nr) { |
3125 | WARN_ON(nr >= map->num_stripes); | |
a512bbf8 | 3126 | buf[nr++] = bytenr; |
934d375b | 3127 | } |
a512bbf8 YZ |
3128 | } |
3129 | ||
a512bbf8 YZ |
3130 | *logical = buf; |
3131 | *naddrs = nr; | |
3132 | *stripe_len = map->stripe_len; | |
3133 | ||
3134 | free_extent_map(em); | |
3135 | return 0; | |
f2d8d74d CM |
3136 | } |
3137 | ||
8790d502 | 3138 | static void end_bio_multi_stripe(struct bio *bio, int err) |
8790d502 | 3139 | { |
cea9e445 | 3140 | struct btrfs_multi_bio *multi = bio->bi_private; |
7d2b4daa | 3141 | int is_orig_bio = 0; |
8790d502 | 3142 | |
8790d502 | 3143 | if (err) |
a236aed1 | 3144 | atomic_inc(&multi->error); |
8790d502 | 3145 | |
7d2b4daa CM |
3146 | if (bio == multi->orig_bio) |
3147 | is_orig_bio = 1; | |
3148 | ||
cea9e445 | 3149 | if (atomic_dec_and_test(&multi->stripes_pending)) { |
7d2b4daa CM |
3150 | if (!is_orig_bio) { |
3151 | bio_put(bio); | |
3152 | bio = multi->orig_bio; | |
3153 | } | |
8790d502 CM |
3154 | bio->bi_private = multi->private; |
3155 | bio->bi_end_io = multi->end_io; | |
a236aed1 CM |
3156 | /* only send an error to the higher layers if it is |
3157 | * beyond the tolerance of the multi-bio | |
3158 | */ | |
1259ab75 | 3159 | if (atomic_read(&multi->error) > multi->max_errors) { |
a236aed1 | 3160 | err = -EIO; |
1259ab75 CM |
3161 | } else if (err) { |
3162 | /* | |
3163 | * this bio is actually up to date, we didn't | |
3164 | * go over the max number of errors | |
3165 | */ | |
3166 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
a236aed1 | 3167 | err = 0; |
1259ab75 | 3168 | } |
8790d502 CM |
3169 | kfree(multi); |
3170 | ||
3171 | bio_endio(bio, err); | |
7d2b4daa | 3172 | } else if (!is_orig_bio) { |
8790d502 CM |
3173 | bio_put(bio); |
3174 | } | |
8790d502 CM |
3175 | } |
3176 | ||
8b712842 CM |
3177 | struct async_sched { |
3178 | struct bio *bio; | |
3179 | int rw; | |
3180 | struct btrfs_fs_info *info; | |
3181 | struct btrfs_work work; | |
3182 | }; | |
3183 | ||
3184 | /* | |
3185 | * see run_scheduled_bios for a description of why bios are collected for | |
3186 | * async submit. | |
3187 | * | |
3188 | * This will add one bio to the pending list for a device and make sure | |
3189 | * the work struct is scheduled. | |
3190 | */ | |
d397712b | 3191 | static noinline int schedule_bio(struct btrfs_root *root, |
a1b32a59 CM |
3192 | struct btrfs_device *device, |
3193 | int rw, struct bio *bio) | |
8b712842 CM |
3194 | { |
3195 | int should_queue = 1; | |
ffbd517d | 3196 | struct btrfs_pending_bios *pending_bios; |
8b712842 CM |
3197 | |
3198 | /* don't bother with additional async steps for reads, right now */ | |
7b6d91da | 3199 | if (!(rw & REQ_WRITE)) { |
492bb6de | 3200 | bio_get(bio); |
8b712842 | 3201 | submit_bio(rw, bio); |
492bb6de | 3202 | bio_put(bio); |
8b712842 CM |
3203 | return 0; |
3204 | } | |
3205 | ||
3206 | /* | |
0986fe9e | 3207 | * nr_async_bios allows us to reliably return congestion to the |
8b712842 CM |
3208 | * higher layers. Otherwise, the async bio makes it appear we have |
3209 | * made progress against dirty pages when we've really just put it | |
3210 | * on a queue for later | |
3211 | */ | |
0986fe9e | 3212 | atomic_inc(&root->fs_info->nr_async_bios); |
492bb6de | 3213 | WARN_ON(bio->bi_next); |
8b712842 CM |
3214 | bio->bi_next = NULL; |
3215 | bio->bi_rw |= rw; | |
3216 | ||
3217 | spin_lock(&device->io_lock); | |
7b6d91da | 3218 | if (bio->bi_rw & REQ_SYNC) |
ffbd517d CM |
3219 | pending_bios = &device->pending_sync_bios; |
3220 | else | |
3221 | pending_bios = &device->pending_bios; | |
8b712842 | 3222 | |
ffbd517d CM |
3223 | if (pending_bios->tail) |
3224 | pending_bios->tail->bi_next = bio; | |
8b712842 | 3225 | |
ffbd517d CM |
3226 | pending_bios->tail = bio; |
3227 | if (!pending_bios->head) | |
3228 | pending_bios->head = bio; | |
8b712842 CM |
3229 | if (device->running_pending) |
3230 | should_queue = 0; | |
3231 | ||
3232 | spin_unlock(&device->io_lock); | |
3233 | ||
3234 | if (should_queue) | |
1cc127b5 CM |
3235 | btrfs_queue_worker(&root->fs_info->submit_workers, |
3236 | &device->work); | |
8b712842 CM |
3237 | return 0; |
3238 | } | |
3239 | ||
f188591e | 3240 | int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, |
8b712842 | 3241 | int mirror_num, int async_submit) |
0b86a832 CM |
3242 | { |
3243 | struct btrfs_mapping_tree *map_tree; | |
3244 | struct btrfs_device *dev; | |
8790d502 | 3245 | struct bio *first_bio = bio; |
a62b9401 | 3246 | u64 logical = (u64)bio->bi_sector << 9; |
0b86a832 CM |
3247 | u64 length = 0; |
3248 | u64 map_length; | |
cea9e445 | 3249 | struct btrfs_multi_bio *multi = NULL; |
0b86a832 | 3250 | int ret; |
8790d502 CM |
3251 | int dev_nr = 0; |
3252 | int total_devs = 1; | |
0b86a832 | 3253 | |
f2d8d74d | 3254 | length = bio->bi_size; |
0b86a832 CM |
3255 | map_tree = &root->fs_info->mapping_tree; |
3256 | map_length = length; | |
cea9e445 | 3257 | |
f188591e CM |
3258 | ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, |
3259 | mirror_num); | |
cea9e445 CM |
3260 | BUG_ON(ret); |
3261 | ||
3262 | total_devs = multi->num_stripes; | |
3263 | if (map_length < length) { | |
d397712b CM |
3264 | printk(KERN_CRIT "mapping failed logical %llu bio len %llu " |
3265 | "len %llu\n", (unsigned long long)logical, | |
3266 | (unsigned long long)length, | |
3267 | (unsigned long long)map_length); | |
cea9e445 CM |
3268 | BUG(); |
3269 | } | |
3270 | multi->end_io = first_bio->bi_end_io; | |
3271 | multi->private = first_bio->bi_private; | |
7d2b4daa | 3272 | multi->orig_bio = first_bio; |
cea9e445 CM |
3273 | atomic_set(&multi->stripes_pending, multi->num_stripes); |
3274 | ||
d397712b | 3275 | while (dev_nr < total_devs) { |
8790d502 | 3276 | if (total_devs > 1) { |
8790d502 CM |
3277 | if (dev_nr < total_devs - 1) { |
3278 | bio = bio_clone(first_bio, GFP_NOFS); | |
3279 | BUG_ON(!bio); | |
3280 | } else { | |
3281 | bio = first_bio; | |
3282 | } | |
3283 | bio->bi_private = multi; | |
3284 | bio->bi_end_io = end_bio_multi_stripe; | |
3285 | } | |
cea9e445 CM |
3286 | bio->bi_sector = multi->stripes[dev_nr].physical >> 9; |
3287 | dev = multi->stripes[dev_nr].dev; | |
18e503d6 | 3288 | if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { |
dfe25020 | 3289 | bio->bi_bdev = dev->bdev; |
8b712842 CM |
3290 | if (async_submit) |
3291 | schedule_bio(root, dev, rw, bio); | |
3292 | else | |
3293 | submit_bio(rw, bio); | |
dfe25020 CM |
3294 | } else { |
3295 | bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; | |
3296 | bio->bi_sector = logical >> 9; | |
dfe25020 | 3297 | bio_endio(bio, -EIO); |
dfe25020 | 3298 | } |
8790d502 CM |
3299 | dev_nr++; |
3300 | } | |
cea9e445 CM |
3301 | if (total_devs == 1) |
3302 | kfree(multi); | |
0b86a832 CM |
3303 | return 0; |
3304 | } | |
3305 | ||
a443755f | 3306 | struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, |
2b82032c | 3307 | u8 *uuid, u8 *fsid) |
0b86a832 | 3308 | { |
2b82032c YZ |
3309 | struct btrfs_device *device; |
3310 | struct btrfs_fs_devices *cur_devices; | |
3311 | ||
3312 | cur_devices = root->fs_info->fs_devices; | |
3313 | while (cur_devices) { | |
3314 | if (!fsid || | |
3315 | !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) { | |
3316 | device = __find_device(&cur_devices->devices, | |
3317 | devid, uuid); | |
3318 | if (device) | |
3319 | return device; | |
3320 | } | |
3321 | cur_devices = cur_devices->seed; | |
3322 | } | |
3323 | return NULL; | |
0b86a832 CM |
3324 | } |
3325 | ||
dfe25020 CM |
3326 | static struct btrfs_device *add_missing_dev(struct btrfs_root *root, |
3327 | u64 devid, u8 *dev_uuid) | |
3328 | { | |
3329 | struct btrfs_device *device; | |
3330 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | |
3331 | ||
3332 | device = kzalloc(sizeof(*device), GFP_NOFS); | |
7cbd8a83 | 3333 | if (!device) |
3334 | return NULL; | |
dfe25020 CM |
3335 | list_add(&device->dev_list, |
3336 | &fs_devices->devices); | |
dfe25020 CM |
3337 | device->dev_root = root->fs_info->dev_root; |
3338 | device->devid = devid; | |
8b712842 | 3339 | device->work.func = pending_bios_fn; |
e4404d6e | 3340 | device->fs_devices = fs_devices; |
cd02dca5 | 3341 | device->missing = 1; |
dfe25020 | 3342 | fs_devices->num_devices++; |
cd02dca5 | 3343 | fs_devices->missing_devices++; |
dfe25020 | 3344 | spin_lock_init(&device->io_lock); |
d20f7043 | 3345 | INIT_LIST_HEAD(&device->dev_alloc_list); |
dfe25020 CM |
3346 | memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); |
3347 | return device; | |
3348 | } | |
3349 | ||
0b86a832 CM |
3350 | static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, |
3351 | struct extent_buffer *leaf, | |
3352 | struct btrfs_chunk *chunk) | |
3353 | { | |
3354 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | |
3355 | struct map_lookup *map; | |
3356 | struct extent_map *em; | |
3357 | u64 logical; | |
3358 | u64 length; | |
3359 | u64 devid; | |
a443755f | 3360 | u8 uuid[BTRFS_UUID_SIZE]; |
593060d7 | 3361 | int num_stripes; |
0b86a832 | 3362 | int ret; |
593060d7 | 3363 | int i; |
0b86a832 | 3364 | |
e17cade2 CM |
3365 | logical = key->offset; |
3366 | length = btrfs_chunk_length(leaf, chunk); | |
a061fc8d | 3367 | |
890871be | 3368 | read_lock(&map_tree->map_tree.lock); |
0b86a832 | 3369 | em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); |
890871be | 3370 | read_unlock(&map_tree->map_tree.lock); |
0b86a832 CM |
3371 | |
3372 | /* already mapped? */ | |
3373 | if (em && em->start <= logical && em->start + em->len > logical) { | |
3374 | free_extent_map(em); | |
0b86a832 CM |
3375 | return 0; |
3376 | } else if (em) { | |
3377 | free_extent_map(em); | |
3378 | } | |
0b86a832 | 3379 | |
0b86a832 CM |
3380 | em = alloc_extent_map(GFP_NOFS); |
3381 | if (!em) | |
3382 | return -ENOMEM; | |
593060d7 CM |
3383 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
3384 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); | |
0b86a832 CM |
3385 | if (!map) { |
3386 | free_extent_map(em); | |
3387 | return -ENOMEM; | |
3388 | } | |
3389 | ||
3390 | em->bdev = (struct block_device *)map; | |
3391 | em->start = logical; | |
3392 | em->len = length; | |
3393 | em->block_start = 0; | |
c8b97818 | 3394 | em->block_len = em->len; |
0b86a832 | 3395 | |
593060d7 CM |
3396 | map->num_stripes = num_stripes; |
3397 | map->io_width = btrfs_chunk_io_width(leaf, chunk); | |
3398 | map->io_align = btrfs_chunk_io_align(leaf, chunk); | |
3399 | map->sector_size = btrfs_chunk_sector_size(leaf, chunk); | |
3400 | map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); | |
3401 | map->type = btrfs_chunk_type(leaf, chunk); | |
321aecc6 | 3402 | map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); |
593060d7 CM |
3403 | for (i = 0; i < num_stripes; i++) { |
3404 | map->stripes[i].physical = | |
3405 | btrfs_stripe_offset_nr(leaf, chunk, i); | |
3406 | devid = btrfs_stripe_devid_nr(leaf, chunk, i); | |
a443755f CM |
3407 | read_extent_buffer(leaf, uuid, (unsigned long) |
3408 | btrfs_stripe_dev_uuid_nr(chunk, i), | |
3409 | BTRFS_UUID_SIZE); | |
2b82032c YZ |
3410 | map->stripes[i].dev = btrfs_find_device(root, devid, uuid, |
3411 | NULL); | |
dfe25020 | 3412 | if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { |
593060d7 CM |
3413 | kfree(map); |
3414 | free_extent_map(em); | |
3415 | return -EIO; | |
3416 | } | |
dfe25020 CM |
3417 | if (!map->stripes[i].dev) { |
3418 | map->stripes[i].dev = | |
3419 | add_missing_dev(root, devid, uuid); | |
3420 | if (!map->stripes[i].dev) { | |
3421 | kfree(map); | |
3422 | free_extent_map(em); | |
3423 | return -EIO; | |
3424 | } | |
3425 | } | |
3426 | map->stripes[i].dev->in_fs_metadata = 1; | |
0b86a832 CM |
3427 | } |
3428 | ||
890871be | 3429 | write_lock(&map_tree->map_tree.lock); |
0b86a832 | 3430 | ret = add_extent_mapping(&map_tree->map_tree, em); |
890871be | 3431 | write_unlock(&map_tree->map_tree.lock); |
b248a415 | 3432 | BUG_ON(ret); |
0b86a832 CM |
3433 | free_extent_map(em); |
3434 | ||
3435 | return 0; | |
3436 | } | |
3437 | ||
3438 | static int fill_device_from_item(struct extent_buffer *leaf, | |
3439 | struct btrfs_dev_item *dev_item, | |
3440 | struct btrfs_device *device) | |
3441 | { | |
3442 | unsigned long ptr; | |
0b86a832 CM |
3443 | |
3444 | device->devid = btrfs_device_id(leaf, dev_item); | |
d6397bae CB |
3445 | device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); |
3446 | device->total_bytes = device->disk_total_bytes; | |
0b86a832 CM |
3447 | device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); |
3448 | device->type = btrfs_device_type(leaf, dev_item); | |
3449 | device->io_align = btrfs_device_io_align(leaf, dev_item); | |
3450 | device->io_width = btrfs_device_io_width(leaf, dev_item); | |
3451 | device->sector_size = btrfs_device_sector_size(leaf, dev_item); | |
0b86a832 CM |
3452 | |
3453 | ptr = (unsigned long)btrfs_device_uuid(dev_item); | |
e17cade2 | 3454 | read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); |
0b86a832 | 3455 | |
0b86a832 CM |
3456 | return 0; |
3457 | } | |
3458 | ||
2b82032c YZ |
3459 | static int open_seed_devices(struct btrfs_root *root, u8 *fsid) |
3460 | { | |
3461 | struct btrfs_fs_devices *fs_devices; | |
3462 | int ret; | |
3463 | ||
3464 | mutex_lock(&uuid_mutex); | |
3465 | ||
3466 | fs_devices = root->fs_info->fs_devices->seed; | |
3467 | while (fs_devices) { | |
3468 | if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) { | |
3469 | ret = 0; | |
3470 | goto out; | |
3471 | } | |
3472 | fs_devices = fs_devices->seed; | |
3473 | } | |
3474 | ||
3475 | fs_devices = find_fsid(fsid); | |
3476 | if (!fs_devices) { | |
3477 | ret = -ENOENT; | |
3478 | goto out; | |
3479 | } | |
e4404d6e YZ |
3480 | |
3481 | fs_devices = clone_fs_devices(fs_devices); | |
3482 | if (IS_ERR(fs_devices)) { | |
3483 | ret = PTR_ERR(fs_devices); | |
2b82032c YZ |
3484 | goto out; |
3485 | } | |
3486 | ||
97288f2c | 3487 | ret = __btrfs_open_devices(fs_devices, FMODE_READ, |
15916de8 | 3488 | root->fs_info->bdev_holder); |
2b82032c YZ |
3489 | if (ret) |
3490 | goto out; | |
3491 | ||
3492 | if (!fs_devices->seeding) { | |
3493 | __btrfs_close_devices(fs_devices); | |
e4404d6e | 3494 | free_fs_devices(fs_devices); |
2b82032c YZ |
3495 | ret = -EINVAL; |
3496 | goto out; | |
3497 | } | |
3498 | ||
3499 | fs_devices->seed = root->fs_info->fs_devices->seed; | |
3500 | root->fs_info->fs_devices->seed = fs_devices; | |
2b82032c YZ |
3501 | out: |
3502 | mutex_unlock(&uuid_mutex); | |
3503 | return ret; | |
3504 | } | |
3505 | ||
0d81ba5d | 3506 | static int read_one_dev(struct btrfs_root *root, |
0b86a832 CM |
3507 | struct extent_buffer *leaf, |
3508 | struct btrfs_dev_item *dev_item) | |
3509 | { | |
3510 | struct btrfs_device *device; | |
3511 | u64 devid; | |
3512 | int ret; | |
2b82032c | 3513 | u8 fs_uuid[BTRFS_UUID_SIZE]; |
a443755f CM |
3514 | u8 dev_uuid[BTRFS_UUID_SIZE]; |
3515 | ||
0b86a832 | 3516 | devid = btrfs_device_id(leaf, dev_item); |
a443755f CM |
3517 | read_extent_buffer(leaf, dev_uuid, |
3518 | (unsigned long)btrfs_device_uuid(dev_item), | |
3519 | BTRFS_UUID_SIZE); | |
2b82032c YZ |
3520 | read_extent_buffer(leaf, fs_uuid, |
3521 | (unsigned long)btrfs_device_fsid(dev_item), | |
3522 | BTRFS_UUID_SIZE); | |
3523 | ||
3524 | if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) { | |
3525 | ret = open_seed_devices(root, fs_uuid); | |
e4404d6e | 3526 | if (ret && !btrfs_test_opt(root, DEGRADED)) |
2b82032c | 3527 | return ret; |
2b82032c YZ |
3528 | } |
3529 | ||
3530 | device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); | |
3531 | if (!device || !device->bdev) { | |
e4404d6e | 3532 | if (!btrfs_test_opt(root, DEGRADED)) |
2b82032c YZ |
3533 | return -EIO; |
3534 | ||
3535 | if (!device) { | |
d397712b CM |
3536 | printk(KERN_WARNING "warning devid %llu missing\n", |
3537 | (unsigned long long)devid); | |
2b82032c YZ |
3538 | device = add_missing_dev(root, devid, dev_uuid); |
3539 | if (!device) | |
3540 | return -ENOMEM; | |
cd02dca5 CM |
3541 | } else if (!device->missing) { |
3542 | /* | |
3543 | * this happens when a device that was properly setup | |
3544 | * in the device info lists suddenly goes bad. | |
3545 | * device->bdev is NULL, and so we have to set | |
3546 | * device->missing to one here | |
3547 | */ | |
3548 | root->fs_info->fs_devices->missing_devices++; | |
3549 | device->missing = 1; | |
2b82032c YZ |
3550 | } |
3551 | } | |
3552 | ||
3553 | if (device->fs_devices != root->fs_info->fs_devices) { | |
3554 | BUG_ON(device->writeable); | |
3555 | if (device->generation != | |
3556 | btrfs_device_generation(leaf, dev_item)) | |
3557 | return -EINVAL; | |
6324fbf3 | 3558 | } |
0b86a832 CM |
3559 | |
3560 | fill_device_from_item(leaf, dev_item, device); | |
3561 | device->dev_root = root->fs_info->dev_root; | |
dfe25020 | 3562 | device->in_fs_metadata = 1; |
2b82032c YZ |
3563 | if (device->writeable) |
3564 | device->fs_devices->total_rw_bytes += device->total_bytes; | |
0b86a832 | 3565 | ret = 0; |
0b86a832 CM |
3566 | return ret; |
3567 | } | |
3568 | ||
0d81ba5d CM |
3569 | int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) |
3570 | { | |
3571 | struct btrfs_dev_item *dev_item; | |
3572 | ||
3573 | dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, | |
3574 | dev_item); | |
3575 | return read_one_dev(root, buf, dev_item); | |
3576 | } | |
3577 | ||
e4404d6e | 3578 | int btrfs_read_sys_array(struct btrfs_root *root) |
0b86a832 CM |
3579 | { |
3580 | struct btrfs_super_block *super_copy = &root->fs_info->super_copy; | |
a061fc8d | 3581 | struct extent_buffer *sb; |
0b86a832 | 3582 | struct btrfs_disk_key *disk_key; |
0b86a832 | 3583 | struct btrfs_chunk *chunk; |
84eed90f CM |
3584 | u8 *ptr; |
3585 | unsigned long sb_ptr; | |
3586 | int ret = 0; | |
0b86a832 CM |
3587 | u32 num_stripes; |
3588 | u32 array_size; | |
3589 | u32 len = 0; | |
0b86a832 | 3590 | u32 cur; |
84eed90f | 3591 | struct btrfs_key key; |
0b86a832 | 3592 | |
e4404d6e | 3593 | sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, |
a061fc8d CM |
3594 | BTRFS_SUPER_INFO_SIZE); |
3595 | if (!sb) | |
3596 | return -ENOMEM; | |
3597 | btrfs_set_buffer_uptodate(sb); | |
4008c04a CM |
3598 | btrfs_set_buffer_lockdep_class(sb, 0); |
3599 | ||
a061fc8d | 3600 | write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); |
0b86a832 CM |
3601 | array_size = btrfs_super_sys_array_size(super_copy); |
3602 | ||
0b86a832 CM |
3603 | ptr = super_copy->sys_chunk_array; |
3604 | sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); | |
3605 | cur = 0; | |
3606 | ||
3607 | while (cur < array_size) { | |
3608 | disk_key = (struct btrfs_disk_key *)ptr; | |
3609 | btrfs_disk_key_to_cpu(&key, disk_key); | |
3610 | ||
a061fc8d | 3611 | len = sizeof(*disk_key); ptr += len; |
0b86a832 CM |
3612 | sb_ptr += len; |
3613 | cur += len; | |
3614 | ||
0d81ba5d | 3615 | if (key.type == BTRFS_CHUNK_ITEM_KEY) { |
0b86a832 | 3616 | chunk = (struct btrfs_chunk *)sb_ptr; |
0d81ba5d | 3617 | ret = read_one_chunk(root, &key, sb, chunk); |
84eed90f CM |
3618 | if (ret) |
3619 | break; | |
0b86a832 CM |
3620 | num_stripes = btrfs_chunk_num_stripes(sb, chunk); |
3621 | len = btrfs_chunk_item_size(num_stripes); | |
3622 | } else { | |
84eed90f CM |
3623 | ret = -EIO; |
3624 | break; | |
0b86a832 CM |
3625 | } |
3626 | ptr += len; | |
3627 | sb_ptr += len; | |
3628 | cur += len; | |
3629 | } | |
a061fc8d | 3630 | free_extent_buffer(sb); |
84eed90f | 3631 | return ret; |
0b86a832 CM |
3632 | } |
3633 | ||
3634 | int btrfs_read_chunk_tree(struct btrfs_root *root) | |
3635 | { | |
3636 | struct btrfs_path *path; | |
3637 | struct extent_buffer *leaf; | |
3638 | struct btrfs_key key; | |
3639 | struct btrfs_key found_key; | |
3640 | int ret; | |
3641 | int slot; | |
3642 | ||
3643 | root = root->fs_info->chunk_root; | |
3644 | ||
3645 | path = btrfs_alloc_path(); | |
3646 | if (!path) | |
3647 | return -ENOMEM; | |
3648 | ||
3649 | /* first we search for all of the device items, and then we | |
3650 | * read in all of the chunk items. This way we can create chunk | |
3651 | * mappings that reference all of the devices that are afound | |
3652 | */ | |
3653 | key.objectid = BTRFS_DEV_ITEMS_OBJECTID; | |
3654 | key.offset = 0; | |
3655 | key.type = 0; | |
3656 | again: | |
3657 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | |
ab59381e ZL |
3658 | if (ret < 0) |
3659 | goto error; | |
d397712b | 3660 | while (1) { |
0b86a832 CM |
3661 | leaf = path->nodes[0]; |
3662 | slot = path->slots[0]; | |
3663 | if (slot >= btrfs_header_nritems(leaf)) { | |
3664 | ret = btrfs_next_leaf(root, path); | |
3665 | if (ret == 0) | |
3666 | continue; | |
3667 | if (ret < 0) | |
3668 | goto error; | |
3669 | break; | |
3670 | } | |
3671 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
3672 | if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { | |
3673 | if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) | |
3674 | break; | |
3675 | if (found_key.type == BTRFS_DEV_ITEM_KEY) { | |
3676 | struct btrfs_dev_item *dev_item; | |
3677 | dev_item = btrfs_item_ptr(leaf, slot, | |
3678 | struct btrfs_dev_item); | |
0d81ba5d | 3679 | ret = read_one_dev(root, leaf, dev_item); |
2b82032c YZ |
3680 | if (ret) |
3681 | goto error; | |
0b86a832 CM |
3682 | } |
3683 | } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { | |
3684 | struct btrfs_chunk *chunk; | |
3685 | chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); | |
3686 | ret = read_one_chunk(root, &found_key, leaf, chunk); | |
2b82032c YZ |
3687 | if (ret) |
3688 | goto error; | |
0b86a832 CM |
3689 | } |
3690 | path->slots[0]++; | |
3691 | } | |
3692 | if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { | |
3693 | key.objectid = 0; | |
3694 | btrfs_release_path(root, path); | |
3695 | goto again; | |
3696 | } | |
0b86a832 CM |
3697 | ret = 0; |
3698 | error: | |
2b82032c | 3699 | btrfs_free_path(path); |
0b86a832 CM |
3700 | return ret; |
3701 | } |