]> Git Repo - linux.git/blob - fs/bcachefs/btree_gc.c
cifs: Add a tracepoint to track credits involved in R/W requests
[linux.git] / fs / bcachefs / btree_gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <[email protected]>
4  * Copyright (C) 2014 Datera Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "backpointers.h"
11 #include "bkey_methods.h"
12 #include "bkey_buf.h"
13 #include "btree_journal_iter.h"
14 #include "btree_key_cache.h"
15 #include "btree_locking.h"
16 #include "btree_node_scan.h"
17 #include "btree_update_interior.h"
18 #include "btree_io.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "clock.h"
22 #include "debug.h"
23 #include "ec.h"
24 #include "error.h"
25 #include "extents.h"
26 #include "journal.h"
27 #include "keylist.h"
28 #include "move.h"
29 #include "recovery_passes.h"
30 #include "reflink.h"
31 #include "replicas.h"
32 #include "super-io.h"
33 #include "trace.h"
34
35 #include <linux/slab.h>
36 #include <linux/bitops.h>
37 #include <linux/freezer.h>
38 #include <linux/kthread.h>
39 #include <linux/preempt.h>
40 #include <linux/rcupdate.h>
41 #include <linux/sched/task.h>
42
43 #define DROP_THIS_NODE          10
44 #define DROP_PREV_NODE          11
45 #define DID_FILL_FROM_SCAN      12
46
47 static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
48 {
49         return (struct bkey_s) {{{
50                 (struct bkey *) k.k,
51                 (struct bch_val *) k.v
52         }}};
53 }
54
55 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
56 {
57         preempt_disable();
58         write_seqcount_begin(&c->gc_pos_lock);
59         c->gc_pos = new_pos;
60         write_seqcount_end(&c->gc_pos_lock);
61         preempt_enable();
62 }
63
64 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
65 {
66         BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) < 0);
67         __gc_pos_set(c, new_pos);
68 }
69
70 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
71 {
72         switch (b->key.k.type) {
73         case KEY_TYPE_btree_ptr: {
74                 struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
75
76                 dst->k.p                = src->k.p;
77                 dst->v.mem_ptr          = 0;
78                 dst->v.seq              = b->data->keys.seq;
79                 dst->v.sectors_written  = 0;
80                 dst->v.flags            = 0;
81                 dst->v.min_key          = b->data->min_key;
82                 set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
83                 memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
84                 break;
85         }
86         case KEY_TYPE_btree_ptr_v2:
87                 bkey_copy(&dst->k_i, &b->key);
88                 break;
89         default:
90                 BUG();
91         }
92 }
93
94 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
95 {
96         struct bkey_i_btree_ptr_v2 *new;
97         int ret;
98
99         if (c->opts.verbose) {
100                 struct printbuf buf = PRINTBUF;
101
102                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
103                 prt_str(&buf, " -> ");
104                 bch2_bpos_to_text(&buf, new_min);
105
106                 bch_info(c, "%s(): %s", __func__, buf.buf);
107                 printbuf_exit(&buf);
108         }
109
110         new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
111         if (!new)
112                 return -BCH_ERR_ENOMEM_gc_repair_key;
113
114         btree_ptr_to_v2(b, new);
115         b->data->min_key        = new_min;
116         new->v.min_key          = new_min;
117         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
118
119         ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
120         if (ret) {
121                 kfree(new);
122                 return ret;
123         }
124
125         bch2_btree_node_drop_keys_outside_node(b);
126         bkey_copy(&b->key, &new->k_i);
127         return 0;
128 }
129
130 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
131 {
132         struct bkey_i_btree_ptr_v2 *new;
133         int ret;
134
135         if (c->opts.verbose) {
136                 struct printbuf buf = PRINTBUF;
137
138                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
139                 prt_str(&buf, " -> ");
140                 bch2_bpos_to_text(&buf, new_max);
141
142                 bch_info(c, "%s(): %s", __func__, buf.buf);
143                 printbuf_exit(&buf);
144         }
145
146         ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
147         if (ret)
148                 return ret;
149
150         new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
151         if (!new)
152                 return -BCH_ERR_ENOMEM_gc_repair_key;
153
154         btree_ptr_to_v2(b, new);
155         b->data->max_key        = new_max;
156         new->k.p                = new_max;
157         SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
158
159         ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
160         if (ret) {
161                 kfree(new);
162                 return ret;
163         }
164
165         bch2_btree_node_drop_keys_outside_node(b);
166
167         mutex_lock(&c->btree_cache.lock);
168         bch2_btree_node_hash_remove(&c->btree_cache, b);
169
170         bkey_copy(&b->key, &new->k_i);
171         ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
172         BUG_ON(ret);
173         mutex_unlock(&c->btree_cache.lock);
174         return 0;
175 }
176
177 static int btree_check_node_boundaries(struct bch_fs *c, struct btree *b,
178                                        struct btree *prev, struct btree *cur,
179                                        struct bpos *pulled_from_scan)
180 {
181         struct bpos expected_start = !prev
182                 ? b->data->min_key
183                 : bpos_successor(prev->key.k.p);
184         struct printbuf buf = PRINTBUF;
185         int ret = 0;
186
187         BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
188                !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
189                         b->data->min_key));
190
191         if (bpos_eq(expected_start, cur->data->min_key))
192                 return 0;
193
194         prt_printf(&buf, "  at btree %s level %u:\n  parent: ",
195                    bch2_btree_id_str(b->c.btree_id), b->c.level);
196         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
197
198         if (prev) {
199                 prt_printf(&buf, "\n  prev: ");
200                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
201         }
202
203         prt_str(&buf, "\n  next: ");
204         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
205
206         if (bpos_lt(expected_start, cur->data->min_key)) {                              /* gap */
207                 if (b->c.level == 1 &&
208                     bpos_lt(*pulled_from_scan, cur->data->min_key)) {
209                         ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
210                                                      expected_start,
211                                                      bpos_predecessor(cur->data->min_key));
212                         if (ret)
213                                 goto err;
214
215                         *pulled_from_scan = cur->data->min_key;
216                         ret = DID_FILL_FROM_SCAN;
217                 } else {
218                         if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
219                                              "btree node with incorrect min_key%s", buf.buf))
220                                 ret = set_node_min(c, cur, expected_start);
221                 }
222         } else {                                                                        /* overlap */
223                 if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {   /* cur overwrites prev */
224                         if (bpos_ge(prev->data->min_key, cur->data->min_key)) {         /* fully? */
225                                 if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_next_node,
226                                                      "btree node overwritten by next node%s", buf.buf))
227                                         ret = DROP_PREV_NODE;
228                         } else {
229                                 if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
230                                                      "btree node with incorrect max_key%s", buf.buf))
231                                         ret = set_node_max(c, prev,
232                                                            bpos_predecessor(cur->data->min_key));
233                         }
234                 } else {
235                         if (bpos_ge(expected_start, cur->data->max_key)) {              /* fully? */
236                                 if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_prev_node,
237                                                      "btree node overwritten by prev node%s", buf.buf))
238                                         ret = DROP_THIS_NODE;
239                         } else {
240                                 if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
241                                                      "btree node with incorrect min_key%s", buf.buf))
242                                         ret = set_node_min(c, cur, expected_start);
243                         }
244                 }
245         }
246 err:
247 fsck_err:
248         printbuf_exit(&buf);
249         return ret;
250 }
251
252 static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
253                                  struct btree *child, struct bpos *pulled_from_scan)
254 {
255         struct printbuf buf = PRINTBUF;
256         int ret = 0;
257
258         if (bpos_eq(child->key.k.p, b->key.k.p))
259                 return 0;
260
261         prt_printf(&buf, "at btree %s level %u:\n  parent: ",
262                    bch2_btree_id_str(b->c.btree_id), b->c.level);
263         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
264
265         prt_str(&buf, "\n  child: ");
266         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
267
268         if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
269                              "btree node with incorrect max_key%s", buf.buf)) {
270                 if (b->c.level == 1 &&
271                     bpos_lt(*pulled_from_scan, b->key.k.p)) {
272                         ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
273                                                 bpos_successor(child->key.k.p), b->key.k.p);
274                         if (ret)
275                                 goto err;
276
277                         *pulled_from_scan = b->key.k.p;
278                         ret = DID_FILL_FROM_SCAN;
279                 } else {
280                         ret = set_node_max(c, child, b->key.k.p);
281                 }
282         }
283 err:
284 fsck_err:
285         printbuf_exit(&buf);
286         return ret;
287 }
288
289 static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
290                                               struct bpos *pulled_from_scan)
291 {
292         struct bch_fs *c = trans->c;
293         struct btree_and_journal_iter iter;
294         struct bkey_s_c k;
295         struct bkey_buf prev_k, cur_k;
296         struct btree *prev = NULL, *cur = NULL;
297         bool have_child, new_pass = false;
298         struct printbuf buf = PRINTBUF;
299         int ret = 0;
300
301         if (!b->c.level)
302                 return 0;
303
304         bch2_bkey_buf_init(&prev_k);
305         bch2_bkey_buf_init(&cur_k);
306 again:
307         cur = prev = NULL;
308         have_child = new_pass = false;
309         bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
310         iter.prefetch = true;
311
312         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
313                 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
314                 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
315
316                 bch2_btree_and_journal_iter_advance(&iter);
317                 bch2_bkey_buf_reassemble(&cur_k, c, k);
318
319                 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
320                                         b->c.btree_id, b->c.level - 1,
321                                         false);
322                 ret = PTR_ERR_OR_ZERO(cur);
323
324                 printbuf_reset(&buf);
325                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
326
327                 if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), c,
328                                 btree_node_unreadable,
329                                 "Topology repair: unreadable btree node at btree %s level %u:\n"
330                                 "  %s",
331                                 bch2_btree_id_str(b->c.btree_id),
332                                 b->c.level - 1,
333                                 buf.buf)) {
334                         bch2_btree_node_evict(trans, cur_k.k);
335                         cur = NULL;
336                         ret = bch2_journal_key_delete(c, b->c.btree_id,
337                                                       b->c.level, cur_k.k->k.p);
338                         if (ret)
339                                 break;
340
341                         if (!btree_id_is_alloc(b->c.btree_id)) {
342                                 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
343                                 if (ret)
344                                         break;
345                         }
346                         continue;
347                 }
348
349                 bch_err_msg(c, ret, "getting btree node");
350                 if (ret)
351                         break;
352
353                 if (bch2_btree_node_is_stale(c, cur)) {
354                         bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
355                         six_unlock_read(&cur->c.lock);
356                         bch2_btree_node_evict(trans, cur_k.k);
357                         ret = bch2_journal_key_delete(c, b->c.btree_id,
358                                                       b->c.level, cur_k.k->k.p);
359                         cur = NULL;
360                         if (ret)
361                                 break;
362                         continue;
363                 }
364
365                 ret = btree_check_node_boundaries(c, b, prev, cur, pulled_from_scan);
366                 if (ret == DID_FILL_FROM_SCAN) {
367                         new_pass = true;
368                         ret = 0;
369                 }
370
371                 if (ret == DROP_THIS_NODE) {
372                         six_unlock_read(&cur->c.lock);
373                         bch2_btree_node_evict(trans, cur_k.k);
374                         ret = bch2_journal_key_delete(c, b->c.btree_id,
375                                                       b->c.level, cur_k.k->k.p);
376                         cur = NULL;
377                         if (ret)
378                                 break;
379                         continue;
380                 }
381
382                 if (prev)
383                         six_unlock_read(&prev->c.lock);
384                 prev = NULL;
385
386                 if (ret == DROP_PREV_NODE) {
387                         bch_info(c, "dropped prev node");
388                         bch2_btree_node_evict(trans, prev_k.k);
389                         ret = bch2_journal_key_delete(c, b->c.btree_id,
390                                                       b->c.level, prev_k.k->k.p);
391                         if (ret)
392                                 break;
393
394                         bch2_btree_and_journal_iter_exit(&iter);
395                         goto again;
396                 } else if (ret)
397                         break;
398
399                 prev = cur;
400                 cur = NULL;
401                 bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
402         }
403
404         if (!ret && !IS_ERR_OR_NULL(prev)) {
405                 BUG_ON(cur);
406                 ret = btree_repair_node_end(c, b, prev, pulled_from_scan);
407                 if (ret == DID_FILL_FROM_SCAN) {
408                         new_pass = true;
409                         ret = 0;
410                 }
411         }
412
413         if (!IS_ERR_OR_NULL(prev))
414                 six_unlock_read(&prev->c.lock);
415         prev = NULL;
416         if (!IS_ERR_OR_NULL(cur))
417                 six_unlock_read(&cur->c.lock);
418         cur = NULL;
419
420         if (ret)
421                 goto err;
422
423         bch2_btree_and_journal_iter_exit(&iter);
424
425         if (new_pass)
426                 goto again;
427
428         bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
429         iter.prefetch = true;
430
431         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
432                 bch2_bkey_buf_reassemble(&cur_k, c, k);
433                 bch2_btree_and_journal_iter_advance(&iter);
434
435                 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
436                                         b->c.btree_id, b->c.level - 1,
437                                         false);
438                 ret = PTR_ERR_OR_ZERO(cur);
439
440                 bch_err_msg(c, ret, "getting btree node");
441                 if (ret)
442                         goto err;
443
444                 ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
445                 six_unlock_read(&cur->c.lock);
446                 cur = NULL;
447
448                 if (ret == DROP_THIS_NODE) {
449                         bch2_btree_node_evict(trans, cur_k.k);
450                         ret = bch2_journal_key_delete(c, b->c.btree_id,
451                                                       b->c.level, cur_k.k->k.p);
452                         new_pass = true;
453                 }
454
455                 if (ret)
456                         goto err;
457
458                 have_child = true;
459         }
460
461         printbuf_reset(&buf);
462         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
463
464         if (mustfix_fsck_err_on(!have_child, c,
465                         btree_node_topology_interior_node_empty,
466                         "empty interior btree node at btree %s level %u\n"
467                         "  %s",
468                         bch2_btree_id_str(b->c.btree_id),
469                         b->c.level, buf.buf))
470                 ret = DROP_THIS_NODE;
471 err:
472 fsck_err:
473         if (!IS_ERR_OR_NULL(prev))
474                 six_unlock_read(&prev->c.lock);
475         if (!IS_ERR_OR_NULL(cur))
476                 six_unlock_read(&cur->c.lock);
477
478         bch2_btree_and_journal_iter_exit(&iter);
479
480         if (!ret && new_pass)
481                 goto again;
482
483         BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
484
485         bch2_bkey_buf_exit(&prev_k, c);
486         bch2_bkey_buf_exit(&cur_k, c);
487         printbuf_exit(&buf);
488         return ret;
489 }
490
491 int bch2_check_topology(struct bch_fs *c)
492 {
493         struct btree_trans *trans = bch2_trans_get(c);
494         struct bpos pulled_from_scan = POS_MIN;
495         int ret = 0;
496
497         for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
498                 struct btree_root *r = bch2_btree_id_root(c, i);
499                 bool reconstructed_root = false;
500
501                 if (r->error) {
502                         ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
503                         if (ret)
504                                 break;
505 reconstruct_root:
506                         bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
507
508                         r->alive = false;
509                         r->error = 0;
510
511                         if (!bch2_btree_has_scanned_nodes(c, i)) {
512                                 mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
513                                                  "no nodes found for btree %s, continue?", bch2_btree_id_str(i));
514                                 bch2_btree_root_alloc_fake_trans(trans, i, 0);
515                         } else {
516                                 bch2_btree_root_alloc_fake_trans(trans, i, 1);
517                                 bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
518                                 ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
519                                 if (ret)
520                                         break;
521                         }
522
523                         reconstructed_root = true;
524                 }
525
526                 struct btree *b = r->b;
527
528                 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
529                 ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
530                 six_unlock_read(&b->c.lock);
531
532                 if (ret == DROP_THIS_NODE) {
533                         bch2_btree_node_hash_remove(&c->btree_cache, b);
534                         mutex_lock(&c->btree_cache.lock);
535                         list_move(&b->list, &c->btree_cache.freeable);
536                         mutex_unlock(&c->btree_cache.lock);
537
538                         r->b = NULL;
539
540                         if (!reconstructed_root)
541                                 goto reconstruct_root;
542
543                         bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
544                         bch2_btree_root_alloc_fake_trans(trans, i, 0);
545                         r->alive = false;
546                         ret = 0;
547                 }
548         }
549 fsck_err:
550         bch2_trans_put(trans);
551         return ret;
552 }
553
554 /* marking of btree keys/nodes: */
555
556 static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
557                             unsigned level, struct btree **prev,
558                             struct btree_iter *iter, struct bkey_s_c k,
559                             bool initial)
560 {
561         struct bch_fs *c = trans->c;
562
563         if (iter) {
564                 struct btree_path *path = btree_iter_path(trans, iter);
565                 struct btree *b = path_l(path)->b;
566
567                 if (*prev != b) {
568                         int ret = bch2_btree_node_check_topology(trans, b);
569                         if (ret)
570                                 return ret;
571                 }
572                 *prev = b;
573         }
574
575         struct bkey deleted = KEY(0, 0, 0);
576         struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
577         struct printbuf buf = PRINTBUF;
578         int ret = 0;
579
580         deleted.p = k.k->p;
581
582         if (initial) {
583                 BUG_ON(bch2_journal_seq_verify &&
584                        k.k->version.lo > atomic64_read(&c->journal.seq));
585
586                 if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
587                                 bkey_version_in_future,
588                                 "key version number higher than recorded %llu\n  %s",
589                                 atomic64_read(&c->key_version),
590                                 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
591                         atomic64_set(&c->key_version, k.k->version.lo);
592         }
593
594         if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
595                                 c, btree_bitmap_not_marked,
596                                 "btree ptr not marked in member info btree allocated bitmap\n  %s",
597                                 (printbuf_reset(&buf),
598                                  bch2_bkey_val_to_text(&buf, c, k),
599                                  buf.buf))) {
600                 mutex_lock(&c->sb_lock);
601                 bch2_dev_btree_bitmap_mark(c, k);
602                 bch2_write_super(c);
603                 mutex_unlock(&c->sb_lock);
604         }
605
606         /*
607          * We require a commit before key_trigger() because
608          * key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the
609          * wrong result if we run it multiple times.
610          */
611         unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0;
612
613         ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
614                                BTREE_TRIGGER_check_repair|flags);
615         if (ret)
616                 goto out;
617
618         if (trans->nr_updates) {
619                 ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
620                         -BCH_ERR_transaction_restart_nested;
621                 goto out;
622         }
623
624         ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
625                                BTREE_TRIGGER_gc|flags);
626 out:
627 fsck_err:
628         printbuf_exit(&buf);
629         bch_err_fn(c, ret);
630         return ret;
631 }
632
633 static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool initial)
634 {
635         struct bch_fs *c = trans->c;
636         int level = 0, target_depth = btree_node_type_needs_gc(__btree_node_type(0, btree)) ? 0 : 1;
637         int ret = 0;
638
639         /* We need to make sure every leaf node is readable before going RW */
640         if (initial)
641                 target_depth = 0;
642
643         /* root */
644         do {
645 retry_root:
646                 bch2_trans_begin(trans);
647
648                 struct btree_iter iter;
649                 bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
650                                           0, bch2_btree_id_root(c, btree)->b->c.level, 0);
651                 struct btree *b = bch2_btree_iter_peek_node(&iter);
652                 ret = PTR_ERR_OR_ZERO(b);
653                 if (ret)
654                         goto err_root;
655
656                 if (b != btree_node_root(c, b)) {
657                         bch2_trans_iter_exit(trans, &iter);
658                         goto retry_root;
659                 }
660
661                 gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
662                 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
663                 ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
664                 level = b->c.level;
665 err_root:
666                 bch2_trans_iter_exit(trans, &iter);
667         } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
668
669         if (ret)
670                 return ret;
671
672         for (; level >= target_depth; --level) {
673                 struct btree *prev = NULL;
674                 struct btree_iter iter;
675                 bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, level,
676                                           BTREE_ITER_prefetch);
677
678                 ret = for_each_btree_key_continue(trans, iter, 0, k, ({
679                         gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
680                         bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
681                 }));
682                 if (ret)
683                         break;
684         }
685
686         return ret;
687 }
688
689 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
690 {
691         return cmp_int(gc_btree_order(l), gc_btree_order(r));
692 }
693
694 static int bch2_gc_btrees(struct bch_fs *c)
695 {
696         struct btree_trans *trans = bch2_trans_get(c);
697         enum btree_id ids[BTREE_ID_NR];
698         unsigned i;
699         int ret = 0;
700
701         for (i = 0; i < BTREE_ID_NR; i++)
702                 ids[i] = i;
703         bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
704
705         for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
706                 unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
707
708                 if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
709                         continue;
710
711                 ret = bch2_gc_btree(trans, btree, true);
712
713                 if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
714                                         c, btree_node_read_error,
715                                "btree node read error for %s",
716                                bch2_btree_id_str(btree)))
717                         ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
718         }
719 fsck_err:
720         bch2_trans_put(trans);
721         bch_err_fn(c, ret);
722         return ret;
723 }
724
725 static int bch2_mark_superblocks(struct bch_fs *c)
726 {
727         mutex_lock(&c->sb_lock);
728         gc_pos_set(c, gc_phase(GC_PHASE_sb));
729
730         int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
731         mutex_unlock(&c->sb_lock);
732         return ret;
733 }
734
735 static void bch2_gc_free(struct bch_fs *c)
736 {
737         genradix_free(&c->reflink_gc_table);
738         genradix_free(&c->gc_stripes);
739
740         for_each_member_device(c, ca) {
741                 kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
742                 ca->buckets_gc = NULL;
743
744                 free_percpu(ca->usage_gc);
745                 ca->usage_gc = NULL;
746         }
747
748         free_percpu(c->usage_gc);
749         c->usage_gc = NULL;
750 }
751
752 static int bch2_gc_done(struct bch_fs *c)
753 {
754         struct bch_dev *ca = NULL;
755         struct printbuf buf = PRINTBUF;
756         unsigned i;
757         int ret = 0;
758
759         percpu_down_write(&c->mark_lock);
760
761 #define copy_field(_err, _f, _msg, ...)                                         \
762         if (fsck_err_on(dst->_f != src->_f, c, _err,                            \
763                         _msg ": got %llu, should be %llu" , ##__VA_ARGS__,      \
764                         dst->_f, src->_f))                                      \
765                 dst->_f = src->_f
766 #define copy_dev_field(_err, _f, _msg, ...)                                     \
767         copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
768 #define copy_fs_field(_err, _f, _msg, ...)                                      \
769         copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
770
771         for (i = 0; i < ARRAY_SIZE(c->usage); i++)
772                 bch2_fs_usage_acc_to_base(c, i);
773
774         __for_each_member_device(c, ca) {
775                 struct bch_dev_usage *dst = ca->usage_base;
776                 struct bch_dev_usage *src = (void *)
777                         bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
778                                              dev_usage_u64s());
779
780                 for (i = 0; i < BCH_DATA_NR; i++) {
781                         copy_dev_field(dev_usage_buckets_wrong,
782                                        d[i].buckets,    "%s buckets", bch2_data_type_str(i));
783                         copy_dev_field(dev_usage_sectors_wrong,
784                                        d[i].sectors,    "%s sectors", bch2_data_type_str(i));
785                         copy_dev_field(dev_usage_fragmented_wrong,
786                                        d[i].fragmented, "%s fragmented", bch2_data_type_str(i));
787                 }
788         }
789
790         {
791                 unsigned nr = fs_usage_u64s(c);
792                 struct bch_fs_usage *dst = c->usage_base;
793                 struct bch_fs_usage *src = (void *)
794                         bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
795
796                 copy_fs_field(fs_usage_hidden_wrong,
797                               b.hidden,         "hidden");
798                 copy_fs_field(fs_usage_btree_wrong,
799                               b.btree,          "btree");
800
801                 copy_fs_field(fs_usage_data_wrong,
802                               b.data,   "data");
803                 copy_fs_field(fs_usage_cached_wrong,
804                               b.cached, "cached");
805                 copy_fs_field(fs_usage_reserved_wrong,
806                               b.reserved,       "reserved");
807                 copy_fs_field(fs_usage_nr_inodes_wrong,
808                               b.nr_inodes,"nr_inodes");
809
810                 for (i = 0; i < BCH_REPLICAS_MAX; i++)
811                         copy_fs_field(fs_usage_persistent_reserved_wrong,
812                                       persistent_reserved[i],
813                                       "persistent_reserved[%i]", i);
814
815                 for (i = 0; i < c->replicas.nr; i++) {
816                         struct bch_replicas_entry_v1 *e =
817                                 cpu_replicas_entry(&c->replicas, i);
818
819                         printbuf_reset(&buf);
820                         bch2_replicas_entry_to_text(&buf, e);
821
822                         copy_fs_field(fs_usage_replicas_wrong,
823                                       replicas[i], "%s", buf.buf);
824                 }
825         }
826
827 #undef copy_fs_field
828 #undef copy_dev_field
829 #undef copy_stripe_field
830 #undef copy_field
831 fsck_err:
832         bch2_dev_put(ca);
833         bch_err_fn(c, ret);
834         percpu_up_write(&c->mark_lock);
835         printbuf_exit(&buf);
836         return ret;
837 }
838
839 static int bch2_gc_start(struct bch_fs *c)
840 {
841         BUG_ON(c->usage_gc);
842
843         c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
844                                          sizeof(u64), GFP_KERNEL);
845         if (!c->usage_gc) {
846                 bch_err(c, "error allocating c->usage_gc");
847                 return -BCH_ERR_ENOMEM_gc_start;
848         }
849
850         for_each_member_device(c, ca) {
851                 BUG_ON(ca->usage_gc);
852
853                 ca->usage_gc = alloc_percpu(struct bch_dev_usage);
854                 if (!ca->usage_gc) {
855                         bch_err(c, "error allocating ca->usage_gc");
856                         bch2_dev_put(ca);
857                         return -BCH_ERR_ENOMEM_gc_start;
858                 }
859
860                 this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
861                                ca->mi.nbuckets - ca->mi.first_bucket);
862         }
863
864         return 0;
865 }
866
867 /* returns true if not equal */
868 static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
869                                      struct bch_alloc_v4 r)
870 {
871         return  l.gen != r.gen                          ||
872                 l.oldest_gen != r.oldest_gen            ||
873                 l.data_type != r.data_type              ||
874                 l.dirty_sectors != r.dirty_sectors      ||
875                 l.cached_sectors != r.cached_sectors     ||
876                 l.stripe_redundancy != r.stripe_redundancy ||
877                 l.stripe != r.stripe;
878 }
879
880 static int bch2_alloc_write_key(struct btree_trans *trans,
881                                 struct btree_iter *iter,
882                                 struct bch_dev *ca,
883                                 struct bkey_s_c k)
884 {
885         struct bch_fs *c = trans->c;
886         struct bkey_i_alloc_v4 *a;
887         struct bch_alloc_v4 old_gc, gc, old_convert, new;
888         const struct bch_alloc_v4 *old;
889         int ret;
890
891         if (!bucket_valid(ca, k.k->p.offset))
892                 return 0;
893
894         old = bch2_alloc_to_v4(k, &old_convert);
895         gc = new = *old;
896
897         percpu_down_read(&c->mark_lock);
898         __bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset));
899
900         old_gc = gc;
901
902         if ((old->data_type == BCH_DATA_sb ||
903              old->data_type == BCH_DATA_journal) &&
904             !bch2_dev_is_online(ca)) {
905                 gc.data_type = old->data_type;
906                 gc.dirty_sectors = old->dirty_sectors;
907         }
908
909         /*
910          * gc.data_type doesn't yet include need_discard & need_gc_gen states -
911          * fix that here:
912          */
913         alloc_data_type_set(&gc, gc.data_type);
914
915         if (gc.data_type != old_gc.data_type ||
916             gc.dirty_sectors != old_gc.dirty_sectors)
917                 bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true);
918         percpu_up_read(&c->mark_lock);
919
920         gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca);
921
922         if (fsck_err_on(new.data_type != gc.data_type, c,
923                         alloc_key_data_type_wrong,
924                         "bucket %llu:%llu gen %u has wrong data_type"
925                         ": got %s, should be %s",
926                         iter->pos.inode, iter->pos.offset,
927                         gc.gen,
928                         bch2_data_type_str(new.data_type),
929                         bch2_data_type_str(gc.data_type)))
930                 new.data_type = gc.data_type;
931
932 #define copy_bucket_field(_errtype, _f)                                 \
933         if (fsck_err_on(new._f != gc._f, c, _errtype,                   \
934                         "bucket %llu:%llu gen %u data type %s has wrong " #_f   \
935                         ": got %llu, should be %llu",                   \
936                         iter->pos.inode, iter->pos.offset,              \
937                         gc.gen,                                         \
938                         bch2_data_type_str(gc.data_type),               \
939                         (u64) new._f, (u64) gc._f))                             \
940                 new._f = gc._f;                                         \
941
942         copy_bucket_field(alloc_key_gen_wrong,                  gen);
943         copy_bucket_field(alloc_key_dirty_sectors_wrong,        dirty_sectors);
944         copy_bucket_field(alloc_key_cached_sectors_wrong,       cached_sectors);
945         copy_bucket_field(alloc_key_stripe_wrong,               stripe);
946         copy_bucket_field(alloc_key_stripe_redundancy_wrong,    stripe_redundancy);
947         copy_bucket_field(alloc_key_fragmentation_lru_wrong,    fragmentation_lru);
948 #undef copy_bucket_field
949
950         if (!bch2_alloc_v4_cmp(*old, new))
951                 return 0;
952
953         a = bch2_alloc_to_v4_mut(trans, k);
954         ret = PTR_ERR_OR_ZERO(a);
955         if (ret)
956                 return ret;
957
958         a->v = new;
959
960         /*
961          * The trigger normally makes sure these are set, but we're not running
962          * triggers:
963          */
964         if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
965                 a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
966
967         ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
968 fsck_err:
969         return ret;
970 }
971
972 static int bch2_gc_alloc_done(struct bch_fs *c)
973 {
974         int ret = 0;
975
976         for_each_member_device(c, ca) {
977                 ret = bch2_trans_run(c,
978                         for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
979                                         POS(ca->dev_idx, ca->mi.first_bucket),
980                                         POS(ca->dev_idx, ca->mi.nbuckets - 1),
981                                         BTREE_ITER_slots|BTREE_ITER_prefetch, k,
982                                         NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
983                                 bch2_alloc_write_key(trans, &iter, ca, k)));
984                 if (ret) {
985                         bch2_dev_put(ca);
986                         break;
987                 }
988         }
989
990         bch_err_fn(c, ret);
991         return ret;
992 }
993
994 static int bch2_gc_alloc_start(struct bch_fs *c)
995 {
996         for_each_member_device(c, ca) {
997                 struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
998                                 ca->mi.nbuckets * sizeof(struct bucket),
999                                 GFP_KERNEL|__GFP_ZERO);
1000                 if (!buckets) {
1001                         bch2_dev_put(ca);
1002                         bch_err(c, "error allocating ca->buckets[gc]");
1003                         return -BCH_ERR_ENOMEM_gc_alloc_start;
1004                 }
1005
1006                 buckets->first_bucket   = ca->mi.first_bucket;
1007                 buckets->nbuckets       = ca->mi.nbuckets;
1008                 buckets->nbuckets_minus_first =
1009                         buckets->nbuckets - buckets->first_bucket;
1010                 rcu_assign_pointer(ca->buckets_gc, buckets);
1011         }
1012
1013         struct bch_dev *ca = NULL;
1014         int ret = bch2_trans_run(c,
1015                 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1016                                          BTREE_ITER_prefetch, k, ({
1017                         ca = bch2_dev_iterate(c, ca, k.k->p.inode);
1018                         if (!ca) {
1019                                 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
1020                                 continue;
1021                         }
1022
1023                         if (bucket_valid(ca, k.k->p.offset)) {
1024                                 struct bch_alloc_v4 a_convert;
1025                                 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1026
1027                                 struct bucket *g = gc_bucket(ca, k.k->p.offset);
1028                                 g->gen_valid    = 1;
1029                                 g->gen          = a->gen;
1030                         }
1031                         0;
1032                 })));
1033         bch2_dev_put(ca);
1034         bch_err_fn(c, ret);
1035         return ret;
1036 }
1037
1038 static int bch2_gc_write_reflink_key(struct btree_trans *trans,
1039                                      struct btree_iter *iter,
1040                                      struct bkey_s_c k,
1041                                      size_t *idx)
1042 {
1043         struct bch_fs *c = trans->c;
1044         const __le64 *refcount = bkey_refcount_c(k);
1045         struct printbuf buf = PRINTBUF;
1046         struct reflink_gc *r;
1047         int ret = 0;
1048
1049         if (!refcount)
1050                 return 0;
1051
1052         while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
1053                r->offset < k.k->p.offset)
1054                 ++*idx;
1055
1056         if (!r ||
1057             r->offset != k.k->p.offset ||
1058             r->size != k.k->size) {
1059                 bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
1060                 return -EINVAL;
1061         }
1062
1063         if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
1064                         reflink_v_refcount_wrong,
1065                         "reflink key has wrong refcount:\n"
1066                         "  %s\n"
1067                         "  should be %u",
1068                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
1069                         r->refcount)) {
1070                 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1071                 ret = PTR_ERR_OR_ZERO(new);
1072                 if (ret)
1073                         goto out;
1074
1075                 if (!r->refcount)
1076                         new->k.type = KEY_TYPE_deleted;
1077                 else
1078                         *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
1079                 ret = bch2_trans_update(trans, iter, new, 0);
1080         }
1081 out:
1082 fsck_err:
1083         printbuf_exit(&buf);
1084         return ret;
1085 }
1086
1087 static int bch2_gc_reflink_done(struct bch_fs *c)
1088 {
1089         size_t idx = 0;
1090
1091         int ret = bch2_trans_run(c,
1092                 for_each_btree_key_commit(trans, iter,
1093                                 BTREE_ID_reflink, POS_MIN,
1094                                 BTREE_ITER_prefetch, k,
1095                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1096                         bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
1097         c->reflink_gc_nr = 0;
1098         return ret;
1099 }
1100
1101 static int bch2_gc_reflink_start(struct bch_fs *c)
1102 {
1103         c->reflink_gc_nr = 0;
1104
1105         int ret = bch2_trans_run(c,
1106                 for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
1107                                    BTREE_ITER_prefetch, k, ({
1108                         const __le64 *refcount = bkey_refcount_c(k);
1109
1110                         if (!refcount)
1111                                 continue;
1112
1113                         struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
1114                                                         c->reflink_gc_nr++, GFP_KERNEL);
1115                         if (!r) {
1116                                 ret = -BCH_ERR_ENOMEM_gc_reflink_start;
1117                                 break;
1118                         }
1119
1120                         r->offset       = k.k->p.offset;
1121                         r->size         = k.k->size;
1122                         r->refcount     = 0;
1123                         0;
1124                 })));
1125
1126         bch_err_fn(c, ret);
1127         return ret;
1128 }
1129
1130 static int bch2_gc_write_stripes_key(struct btree_trans *trans,
1131                                      struct btree_iter *iter,
1132                                      struct bkey_s_c k)
1133 {
1134         struct bch_fs *c = trans->c;
1135         struct printbuf buf = PRINTBUF;
1136         const struct bch_stripe *s;
1137         struct gc_stripe *m;
1138         bool bad = false;
1139         unsigned i;
1140         int ret = 0;
1141
1142         if (k.k->type != KEY_TYPE_stripe)
1143                 return 0;
1144
1145         s = bkey_s_c_to_stripe(k).v;
1146         m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
1147
1148         for (i = 0; i < s->nr_blocks; i++) {
1149                 u32 old = stripe_blockcount_get(s, i);
1150                 u32 new = (m ? m->block_sectors[i] : 0);
1151
1152                 if (old != new) {
1153                         prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
1154                                    i, old, new);
1155                         bad = true;
1156                 }
1157         }
1158
1159         if (bad)
1160                 bch2_bkey_val_to_text(&buf, c, k);
1161
1162         if (fsck_err_on(bad, c, stripe_sector_count_wrong,
1163                         "%s", buf.buf)) {
1164                 struct bkey_i_stripe *new;
1165
1166                 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1167                 ret = PTR_ERR_OR_ZERO(new);
1168                 if (ret)
1169                         return ret;
1170
1171                 bkey_reassemble(&new->k_i, k);
1172
1173                 for (i = 0; i < new->v.nr_blocks; i++)
1174                         stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
1175
1176                 ret = bch2_trans_update(trans, iter, &new->k_i, 0);
1177         }
1178 fsck_err:
1179         printbuf_exit(&buf);
1180         return ret;
1181 }
1182
1183 static int bch2_gc_stripes_done(struct bch_fs *c)
1184 {
1185         return bch2_trans_run(c,
1186                 for_each_btree_key_commit(trans, iter,
1187                                 BTREE_ID_stripes, POS_MIN,
1188                                 BTREE_ITER_prefetch, k,
1189                                 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1190                         bch2_gc_write_stripes_key(trans, &iter, k)));
1191 }
1192
1193 /**
1194  * bch2_check_allocations - walk all references to buckets, and recompute them:
1195  *
1196  * @c:                  filesystem object
1197  *
1198  * Returns: 0 on success, or standard errcode on failure
1199  *
1200  * Order matters here:
1201  *  - Concurrent GC relies on the fact that we have a total ordering for
1202  *    everything that GC walks - see  gc_will_visit_node(),
1203  *    gc_will_visit_root()
1204  *
1205  *  - also, references move around in the course of index updates and
1206  *    various other crap: everything needs to agree on the ordering
1207  *    references are allowed to move around in - e.g., we're allowed to
1208  *    start with a reference owned by an open_bucket (the allocator) and
1209  *    move it to the btree, but not the reverse.
1210  *
1211  *    This is necessary to ensure that gc doesn't miss references that
1212  *    move around - if references move backwards in the ordering GC
1213  *    uses, GC could skip past them
1214  */
1215 int bch2_check_allocations(struct bch_fs *c)
1216 {
1217         int ret;
1218
1219         lockdep_assert_held(&c->state_lock);
1220
1221         down_write(&c->gc_lock);
1222
1223         bch2_btree_interior_updates_flush(c);
1224
1225         ret   = bch2_gc_start(c) ?:
1226                 bch2_gc_alloc_start(c) ?:
1227                 bch2_gc_reflink_start(c);
1228         if (ret)
1229                 goto out;
1230
1231         gc_pos_set(c, gc_phase(GC_PHASE_start));
1232
1233         ret = bch2_mark_superblocks(c);
1234         BUG_ON(ret);
1235
1236         ret = bch2_gc_btrees(c);
1237         if (ret)
1238                 goto out;
1239
1240         c->gc_count++;
1241
1242         bch2_journal_block(&c->journal);
1243 out:
1244         ret   = bch2_gc_alloc_done(c) ?:
1245                 bch2_gc_done(c) ?:
1246                 bch2_gc_stripes_done(c) ?:
1247                 bch2_gc_reflink_done(c);
1248
1249         bch2_journal_unblock(&c->journal);
1250
1251         percpu_down_write(&c->mark_lock);
1252         /* Indicates that gc is no longer in progress: */
1253         __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
1254
1255         bch2_gc_free(c);
1256         percpu_up_write(&c->mark_lock);
1257
1258         up_write(&c->gc_lock);
1259
1260         /*
1261          * At startup, allocations can happen directly instead of via the
1262          * allocator thread - issue wakeup in case they blocked on gc_lock:
1263          */
1264         closure_wake_up(&c->freelist_wait);
1265         bch_err_fn(c, ret);
1266         return ret;
1267 }
1268
1269 static int gc_btree_gens_key(struct btree_trans *trans,
1270                              struct btree_iter *iter,
1271                              struct bkey_s_c k)
1272 {
1273         struct bch_fs *c = trans->c;
1274         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1275         struct bkey_i *u;
1276         int ret;
1277
1278         if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
1279                 return -EROFS;
1280
1281         percpu_down_read(&c->mark_lock);
1282         rcu_read_lock();
1283         bkey_for_each_ptr(ptrs, ptr) {
1284                 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1285                 if (!ca)
1286                         continue;
1287
1288                 if (dev_ptr_stale(ca, ptr) > 16) {
1289                         rcu_read_unlock();
1290                         percpu_up_read(&c->mark_lock);
1291                         goto update;
1292                 }
1293         }
1294
1295         bkey_for_each_ptr(ptrs, ptr) {
1296                 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1297                 if (!ca)
1298                         continue;
1299
1300                 u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
1301                 if (gen_after(*gen, ptr->gen))
1302                         *gen = ptr->gen;
1303         }
1304         rcu_read_unlock();
1305         percpu_up_read(&c->mark_lock);
1306         return 0;
1307 update:
1308         u = bch2_bkey_make_mut(trans, iter, &k, 0);
1309         ret = PTR_ERR_OR_ZERO(u);
1310         if (ret)
1311                 return ret;
1312
1313         bch2_extent_normalize(c, bkey_i_to_s(u));
1314         return 0;
1315 }
1316
1317 static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct bch_dev *ca,
1318                                        struct btree_iter *iter, struct bkey_s_c k)
1319 {
1320         struct bch_alloc_v4 a_convert;
1321         const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1322         struct bkey_i_alloc_v4 *a_mut;
1323         int ret;
1324
1325         if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
1326                 return 0;
1327
1328         a_mut = bch2_alloc_to_v4_mut(trans, k);
1329         ret = PTR_ERR_OR_ZERO(a_mut);
1330         if (ret)
1331                 return ret;
1332
1333         a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
1334         alloc_data_type_set(&a_mut->v, a_mut->v.data_type);
1335
1336         return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
1337 }
1338
1339 int bch2_gc_gens(struct bch_fs *c)
1340 {
1341         u64 b, start_time = local_clock();
1342         int ret;
1343
1344         /*
1345          * Ideally we would be using state_lock and not gc_lock here, but that
1346          * introduces a deadlock in the RO path - we currently take the state
1347          * lock at the start of going RO, thus the gc thread may get stuck:
1348          */
1349         if (!mutex_trylock(&c->gc_gens_lock))
1350                 return 0;
1351
1352         trace_and_count(c, gc_gens_start, c);
1353         down_read(&c->gc_lock);
1354
1355         for_each_member_device(c, ca) {
1356                 struct bucket_gens *gens = bucket_gens(ca);
1357
1358                 BUG_ON(ca->oldest_gen);
1359
1360                 ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
1361                 if (!ca->oldest_gen) {
1362                         bch2_dev_put(ca);
1363                         ret = -BCH_ERR_ENOMEM_gc_gens;
1364                         goto err;
1365                 }
1366
1367                 for (b = gens->first_bucket;
1368                      b < gens->nbuckets; b++)
1369                         ca->oldest_gen[b] = gens->b[b];
1370         }
1371
1372         for (unsigned i = 0; i < BTREE_ID_NR; i++)
1373                 if (btree_type_has_ptrs(i)) {
1374                         c->gc_gens_btree = i;
1375                         c->gc_gens_pos = POS_MIN;
1376
1377                         ret = bch2_trans_run(c,
1378                                 for_each_btree_key_commit(trans, iter, i,
1379                                                 POS_MIN,
1380                                                 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
1381                                                 k,
1382                                                 NULL, NULL,
1383                                                 BCH_TRANS_COMMIT_no_enospc,
1384                                         gc_btree_gens_key(trans, &iter, k)));
1385                         if (ret)
1386                                 goto err;
1387                 }
1388
1389         struct bch_dev *ca = NULL;
1390         ret = bch2_trans_run(c,
1391                 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1392                                 POS_MIN,
1393                                 BTREE_ITER_prefetch,
1394                                 k,
1395                                 NULL, NULL,
1396                                 BCH_TRANS_COMMIT_no_enospc, ({
1397                         ca = bch2_dev_iterate(c, ca, k.k->p.inode);
1398                         if (!ca) {
1399                                 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
1400                                 continue;
1401                         }
1402                         bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
1403                 })));
1404         bch2_dev_put(ca);
1405
1406         if (ret)
1407                 goto err;
1408
1409         c->gc_gens_btree        = 0;
1410         c->gc_gens_pos          = POS_MIN;
1411
1412         c->gc_count++;
1413
1414         bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1415         trace_and_count(c, gc_gens_end, c);
1416 err:
1417         for_each_member_device(c, ca) {
1418                 kvfree(ca->oldest_gen);
1419                 ca->oldest_gen = NULL;
1420         }
1421
1422         up_read(&c->gc_lock);
1423         mutex_unlock(&c->gc_gens_lock);
1424         if (!bch2_err_matches(ret, EROFS))
1425                 bch_err_fn(c, ret);
1426         return ret;
1427 }
1428
1429 static void bch2_gc_gens_work(struct work_struct *work)
1430 {
1431         struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
1432         bch2_gc_gens(c);
1433         bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
1434 }
1435
1436 void bch2_gc_gens_async(struct bch_fs *c)
1437 {
1438         if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
1439             !queue_work(c->write_ref_wq, &c->gc_gens_work))
1440                 bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
1441 }
1442
1443 void bch2_fs_gc_init(struct bch_fs *c)
1444 {
1445         seqcount_init(&c->gc_pos_lock);
1446
1447         INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);
1448 }
This page took 0.118038 seconds and 4 git commands to generate.